Gamestudio Links
Zorro Links
Newest Posts
optimize global parameters SOLVED
by dBc. 09/27/25 17:07
ZorroGPT
by TipmyPip. 09/27/25 10:05
Release 2.68 replacement of the .par format
by Martin_HH. 09/23/25 20:48
assetHistory one candle shift
by jcl. 09/21/25 11:36
Plugins update
by Grant. 09/17/25 16:28
AUM Magazine
Latest Screens
Rocker`s Revenge
Stug 3 Stormartillery
Iljuschin 2
Galactic Strike X
Who's Online Now
2 registered members (TipmyPip, AndrewAMD), 15,897 guests, and 5 spiders.
Key: Admin, Global Mod, Mod
Newest Members
krishna, DrissB, James168, Ed_Love, xtns
19168 Registered Users
Previous Thread
Next Thread
Print Thread
Rate Thread
Page 1 of 11 1 2 3 10 11
ZorroGPT #487923
11/19/23 11:26
11/19/23 11:26
Joined: Sep 2017
Posts: 164
TipmyPip Online OP
Member
TipmyPip  Online OP
Member

Joined: Sep 2017
Posts: 164
Something Big is coming... Complete Automation development within VSC for Zorro using Agents with wide platform asynchronous development using AI/ML to directly interact with compiler.

https://bit.ly/3Gbsm4S

May all traders enjoy.


Code
Initialize parameters and constants
Initialize data buffers for multiple timeframes
Initialize historical performance record

For each new bar:
    For each timeframe in {Fast, Medium, Slow}:
        Update price buffer
        Compute AbstractMetric_A[timeframe]  // e.g., some transformation of price history

    Compute AbstractFeatureSet1 from AbstractMetric_A and other transformations

    RawSignal ? AbstractDecisionFunction1(AbstractFeatureSet1)

    ModelInput1 ? AbstractFeatureSet1
    [ProbLong, ProbShort] ? Model1(ModelInput1)
    AdjustedSignal ? AbstractSignalFilter(RawSignal, ProbLong, ProbShort)

    Update performance history from closed trades
    Compute AbstractFeatureSet2 from AbstractMetric_A and performance statistics

    ModelInput2 ? AbstractFeatureSet2
    [StopMultiplier, TakeProfitMultiplier] ? Model2(ModelInput2)
    Constrain StopMultiplier, TakeProfitMultiplier to allowable ranges

    StopDistance ? AbstractVolatilityMeasure * StopMultiplier
    TakeProfitDistance ? AbstractVolatilityMeasure * TakeProfitMultiplier

    If in training mode:
        Execute trades based on RawSignal
    Else:
        Execute trades based on AdjustedSignal

    Plot monitoring metrics (non-trading)

Last edited by TipmyPip; 08/24/25 18:21.
Re: Zorro Trader GPT [Re: TipmyPip] #487926
11/21/23 15:14
11/21/23 15:14
Joined: Apr 2023
Posts: 3
T
thumper14 Offline
Guest
thumper14  Offline
Guest
T

Joined: Apr 2023
Posts: 3
Thank you for sharing!

Very interested in this capability. I'm familiar with the concept of ChatGPT and its applications for coding; however, I have never personally used it before. Are you able to provide some examples of it being used in a Zorro specific application?

Re: Zorro Trader GPT [Re: TipmyPip] #487927
11/22/23 05:08
11/22/23 05:08
Joined: Sep 2017
Posts: 164
TipmyPip Online OP
Member
TipmyPip  Online OP
Member

Joined: Sep 2017
Posts: 164
Here is one Idea : I Wish Zorro Trader (lite-C) could manage better dynamic memory. Trying to code the below program in dynamic memory, doesn't work out properly. (Error 111 in main())

Code
#include <stdio.h>

#define INT_BITS 32
#define MAX_NODES 1024 

// Trie node structure
typedef struct TrieNode {
    struct TrieNode* bit[2];
} TrieNode;

TrieNode trieNodes[MAX_NODES]; 
int trieNodeCount = 0; 


TrieNode* newTrieNode() {
    if (trieNodeCount >= MAX_NODES) {
        printf("Exceeded maximum trie nodes\n");
        return NULL;
    }
    TrieNode* newNode = &trieNodes[trieNodeCount++];
    newNode->bit[0] = NULL;
    newNode->bit[1] = NULL;
    return newNode;
}


void insert(TrieNode* root, int number) {
    TrieNode* current = root;
    int i, bit;
    for (i = INT_BITS - 1; i >= 0; i--) {
        bit = (number >> i) & 1;
        if (!current->bit[bit]) {
            current->bit[bit] = newTrieNode();
            if (!current->bit[bit]) return; 
        }
        current = current->bit[bit];
    }
}


int findMaxXOR(TrieNode* root, int number) {
    TrieNode* current = root;
    int maxXOR = 0;
    int i, bit;
    for (i = INT_BITS - 1; i >= 0; i--) {
        bit = (number >> i) & 1;
        if (current->bit[1 - bit]) {
            maxXOR |= (1 << i);
            current = current->bit[1 - bit];
        } else {
            current = current->bit[bit];
        }
    }
    return maxXOR;
}


int getMaxXOR(int* arr, int size) {
    TrieNode* root = newTrieNode();
    if (!root) return 0; 
    int maxXOR = 0;
    int i, currentXOR;
    for (i = 0; i < size; i++) {
        insert(root, arr[i]);
        currentXOR = findMaxXOR(root, arr[i]);
        if (currentXOR > maxXOR) {
            maxXOR = currentXOR;
        }
    }
    return maxXOR;
}

void main() {
    int arr[10] = {3, 10, 5, 25, 2, 8};
    int size = 6;
    int result = getMaxXOR(arr, size);
    printf("Maximum XOR: %d\n", result);
}


And another version :

Code
#define INT_BITS 32
#define MAX_NODES 1024 
#define MAX_ARRAY_SIZE 10
#define RAND_MAX_VALUE 100  // maximum random number value

// Trie node structure
typedef struct TrieNode {
    struct TrieNode* bit[2];
} TrieNode;

TrieNode trieNodes[MAX_NODES]; 
int trieNodeCount = 0; 

// === Clear trie memory safely ===
void clearTrie()
{
    int i;
    for(i = 0; i < MAX_NODES; i++) {
        trieNodes[i].bit[0] = NULL;
        trieNodes[i].bit[1] = NULL;
    }
    trieNodeCount = 0;
}

// === Allocate new node ===
TrieNode* newTrieNode() {
    if (trieNodeCount >= MAX_NODES) {
        printf("\nExceeded maximum trie nodes\n");
        return NULL;
    }
    TrieNode* newNode = &trieNodes[trieNodeCount++];
    newNode->bit[0] = NULL;
    newNode->bit[1] = NULL;
    return newNode;
}

// === Insert number into trie ===
void insert(TrieNode* root, int number) {
    TrieNode* current = root;
    int i, bit;
    for (i = INT_BITS - 1; i >= 0; i--) {
        bit = (number >> i) & 1;
        if (!current->bit[bit]) {
            current->bit[bit] = newTrieNode();
            if (!current->bit[bit]) return; 
        }
        current = current->bit[bit];
    }
}

// === Find max XOR for given number ===
int findMaxXOR(TrieNode* root, int number) {
    TrieNode* current = root;
    int maxXOR = 0;
    int i, bit;
    for (i = INT_BITS - 1; i >= 0; i--) {
        bit = (number >> i) & 1;
        if (current->bit[1 - bit]) {
            maxXOR |= (1 << i);
            current = current->bit[1 - bit];
        } else {
            current = current->bit[bit];
        }
    }
    return maxXOR;
}

// === Compute max XOR from array & track best pair ===
int getMaxXOR(int* arr, int size, int* num1, int* num2) {
    clearTrie();  // ensure trie is empty before use

    TrieNode* root = newTrieNode();
    if (!root) return 0; 

    int maxXOR = 0;
    int i, currentXOR;

    *num1 = 0;
    *num2 = 0;

    for (i = 0; i < size; i++) {
        insert(root, arr[i]);
        currentXOR = findMaxXOR(root, arr[i]);
        if (currentXOR > maxXOR) {
            maxXOR = currentXOR;
            *num1 = arr[i];
            *num2 = arr[i] ^ maxXOR; // second number from XOR relation
        }
    }
    return maxXOR;
}

// === Zorro entry point ===
function run() {
    int size = MAX_ARRAY_SIZE;
    int arr[MAX_ARRAY_SIZE];
    int i;

    // Generate random integers
    printf("\nGenerated numbers: ");
    for (i = 0; i < size; i++) {
        arr[i] = random(RAND_MAX_VALUE); 
        printf("%d ", arr[i]);
    }
    printf("\n");

    int n1, n2;
    int result = getMaxXOR(arr, size, &n1, &n2);

    printf("Maximum XOR: %d (from %d ^ %d)\n", result, n1, n2);
}


Last edited by TipmyPip; 08/10/25 11:25.
Re: Zorro Trader GPT [Re: TipmyPip] #487928
11/22/23 12:25
11/22/23 12:25
Joined: Feb 2017
Posts: 1,806
Chicago
AndrewAMD Online
Serious User
AndrewAMD  Online
Serious User

Joined: Feb 2017
Posts: 1,806
Chicago
You could just write a C++ Zorro script and use std::vector instead.

Re: Zorro Trader GPT [Re: TipmyPip] #487929
11/22/23 14:11
11/22/23 14:11
Joined: Sep 2017
Posts: 164
TipmyPip Online OP
Member
TipmyPip  Online OP
Member

Joined: Sep 2017
Posts: 164
Yes, Thank you very much for the idea, and it is a very good solution, But it is going beyond the scope of the platform and requires external resources, that depend on other languages.
This is very easy to do, but our focus currently is only on Zorro Trader, not going beyond the scope of the platform. While we can always find external resources to solve any problem
which can't be solved by Zorro Trader's internal resources.

Re: Zorro Trader GPT [Re: TipmyPip] #487930
11/22/23 15:24
11/22/23 15:24
Joined: Feb 2017
Posts: 1,806
Chicago
AndrewAMD Online
Serious User
AndrewAMD  Online
Serious User

Joined: Feb 2017
Posts: 1,806
Chicago
Originally Posted by TipmyPip
But it is going beyond the scope of the platform and requires external resources, that depend on other languages.
Zorro supports C++ out-of-the-box directly by invoking the VS compiler. It really is superior to Lite-C in every way.

Re: Zorro Trader GPT [Re: AndrewAMD] #487931
11/22/23 16:53
11/22/23 16:53
Joined: Sep 2017
Posts: 164
TipmyPip Online OP
Member
TipmyPip  Online OP
Member

Joined: Sep 2017
Posts: 164
I do understand, But being dependent on another language or compiler, is mostly what you can do with any language or compiler, the question is can you solve a problem with only the limited tools that lite-C offers you?
I have already solved the problem with C, I don't need C++ to solve the problem, I am asking if can anyone solve the problem without being dependent on other external tools, but lite-C only.

Re: Zorro Trader GPT [Re: TipmyPip] #487959
12/02/23 02:15
12/02/23 02:15
Joined: Mar 2021
Posts: 35
Ocean county, Florida
NewtraderX Offline
Newbie
NewtraderX  Offline
Newbie

Joined: Mar 2021
Posts: 35
Ocean county, Florida
Originally Posted by TipmyPip
Hi, Friends, We are happy to announce the First version of ZorroTrader GPT.

https://bit.ly/3Gbsm4S

May all traders enjoy.



Hello TipmyPip, I will try to utilize the ZorroTrader GPT, bard ai by Google is also pretty good. What do you think about it? since its free, can we use bard to spit out lite c code?

Re: Zorro Trader GPT [Re: NewtraderX] #487960
12/02/23 06:10
12/02/23 06:10
Joined: Sep 2017
Posts: 164
TipmyPip Online OP
Member
TipmyPip  Online OP
Member

Joined: Sep 2017
Posts: 164
I have not tried Bard, And I don't know if Bard can analyze a long instruction manual.

Thank you for your interest.

Re: Zorro Trader GPT [Re: TipmyPip] #487966
12/04/23 09:16
12/04/23 09:16
Joined: Dec 2023
Posts: 8
F
fairtrader Offline
Newbie
fairtrader  Offline
Newbie
F

Joined: Dec 2023
Posts: 8
This is super cool thank you. All my newbies questions answered in a jiffy and now have GPT as coding assistant and reducing the learning curve massively. All the best

Re: Zorro Trader GPT [Re: fairtrader] #487968
12/04/23 11:34
12/04/23 11:34
Joined: Sep 2017
Posts: 164
TipmyPip Online OP
Member
TipmyPip  Online OP
Member

Joined: Sep 2017
Posts: 164
Thank you too.

Re: Zorro Trader GPT [Re: TipmyPip] #487979
12/10/23 20:09
12/10/23 20:09
Joined: Mar 2021
Posts: 35
Ocean county, Florida
NewtraderX Offline
Newbie
NewtraderX  Offline
Newbie

Joined: Mar 2021
Posts: 35
Ocean county, Florida
Hello TipmyPip,
Do I need to pay 20 dollars per month to be able to use this specific GPT?

Attached Files delete.png
Re: Zorro Trader GPT [Re: NewtraderX] #487982
12/13/23 11:01
12/13/23 11:01
Joined: Sep 2017
Posts: 164
TipmyPip Online OP
Member
TipmyPip  Online OP
Member

Joined: Sep 2017
Posts: 164
Well, 20 Dollars per month, can produce for you strategies you have not dreamed about.

Look into the Xor Memory Problem example -> https://opserver.de/ubb7/ubbthreads.php?ubb=showflat&Number=487981#Post487981

Last edited by TipmyPip; 12/13/23 11:29.
Re: Zorro Trader GPT [Re: TipmyPip] #488017
12/28/23 14:22
12/28/23 14:22
Joined: Sep 2017
Posts: 164
TipmyPip Online OP
Member
TipmyPip  Online OP
Member

Joined: Sep 2017
Posts: 164
A new phase of algorithmic targets that enable optimized structures to enable better decision-making in optimal time.

Code
Data Structure:
- Array of KeyValue objects

Operations:
- CreateDictionary(size)
- Insert(key, value)
- Lookup(key)

Function customHash(key, size):
    return key % size

Function CreateDictionary(size):
    dictionary = new KeyValue[size]
    for i = 0 to size - 1:
        dictionary[i].key = -1
    return dictionary

Function Insert(dictionary, size, key, value):
    index = customHash(key, size)
    while dictionary[index].key != -1:
        index = (index + 1) % size
    dictionary[index].key = key
    dictionary[index].value = value

Function Lookup(dictionary, size, key):
    index = customHash(key, size)
    while dictionary[index].key != key:
        if dictionary[index].key == -1:
            return -1
        index = (index + 1) % size
    return dictionary[index].value

___________________________________________

Data Structure:
- Array of ComplexElement objects

Operations:
- InitializeComplexDataStructure(size)
- ExecuteComplexOperation(key)

Function InitializeComplexDataStructure(size):
    complexDataStructure = new ComplexElement[size]
    // Initialize complexDataStructure as needed
    return complexDataStructure

Function ExecuteComplexOperation(complexDataStructure, size, key):
    // Perform a complex trading operation on complexDataStructure based on the key
    // This operation has a complexity of O(n^(n*n-1)).

Main:
    n = 10
    dictionarySize = n * n * n * n
    complexDataStructureSize = n * n * n * n  // Adjust the size based on complexity requirements

    dictionary = CreateDictionary(dictionarySize)
    complexDataStructure = InitializeComplexDataStructure(complexDataStructureSize)

    for i = 0 to n - 1:
        for j = 0 to n - 1:
            key = i * n + j
            value = key * key
            Insert(dictionary, dictionarySize, key, value)

    searchKey = 7
    result = Lookup(dictionary, dictionarySize, searchKey)
    if result != -1:
        Print "Value for key", searchKey, "in ComplexDictionary:", result
    else:
        Print "Key", searchKey, "not found in ComplexDictionary."

    // Execute a complex trading operation on ComplexTradingOperation
    complexKey = 5  // Adjust the key as needed
    ExecuteComplexOperation(complexDataStructure, complexDataStructureSize, complexKey)

Last edited by TipmyPip; 12/28/23 14:30.
Re: Zorro Trader GPT [Re: TipmyPip] #488135
02/13/24 16:24
02/13/24 16:24
Joined: Apr 2021
Posts: 22
S
scatters Offline
Newbie
scatters  Offline
Newbie
S

Joined: Apr 2021
Posts: 22
Hi @TipmyPip,

Would Zorro Trader GPT help me convert my trading view pinescript indicator into Zorro Scirpt? If so what would the difference be between using the Zorro Trader GPT and just using Chat GPT ?

Re: Zorro Trader GPT [Re: scatters] #488139
02/16/24 06:53
02/16/24 06:53
Joined: Sep 2017
Posts: 164
TipmyPip Online OP
Member
TipmyPip  Online OP
Member

Joined: Sep 2017
Posts: 164
It will, but you would need to debug, It will convert at least 80%

To achieve 100%, you will need to debug with the AI, if you are not familiar with the syntax of both languages.

Last edited by TipmyPip; 02/16/24 07:01.
ZorroTraderGPT Update (2.6) [Re: TipmyPip] #488147
03/06/24 09:27
03/06/24 09:27
Joined: Sep 2017
Posts: 164
TipmyPip Online OP
Member
TipmyPip  Online OP
Member

Joined: Sep 2017
Posts: 164
Updated ZorroTraderGPT to version 2.6

And if you consider helping out, you can always help find the bugs, and improve on our ideas, Thank you.

Code
#include <contract.c>

// Define your global parameters such as target profit, days to expiration, and strike price offset
var TargetProfit = 500; // Example target profit
int DaysToExpiration = 30; // Target days until expiration
var StrikeOffset = 50; // Offset from the current price for strike selection

void run() {
  // Setup basic parameters
  BarPeriod = 1440; // Use daily bars
  LookBack = 0; // No need for historical bars in options trading setup
  StartDate = 2020;
  EndDate = 2024; // Set your backtest period
  assetList("AssetsIB");
  asset("SPY"); // Example using SPY ETF as the underlying asset

  // Ensure we're trading in American Options for SPY
  AmericanOption = 1;

  // Update the contract chain for the underlying asset
  if(!contractUpdate(Asset, 0, CALL | PUT)) return;

  // Trading logic executed once per day
  if(is(EXITRUN)) return; // Skip logic at the end of the backtest
  
  // Define your strangle strategy here
  if(NumOpenLong + NumOpenShort == 0) { // Check if there's no open position
    // Calculate target strike prices based on current price and offset
    var CurrentPrice = priceClose(0);
    var StrikeCall = CurrentPrice + StrikeOffset;
    var StrikePut = CurrentPrice - StrikeOffset;

    // Attempt to find and enter a Strangle combo
    if(combo(
      contractFind(CALL, DaysToExpiration, StrikeCall), 1, // Buy 1 Call
      contractFind(PUT, DaysToExpiration, StrikePut), 1,  // Buy 1 Put
      0, 0)) { 
        // Enter the combo trade
        enterLong(comboLeg(1)); // Enter long for the call option leg
        enterLong(comboLeg(2)); // Enter long for the put option leg
    }
  }

  // Monitor and manage open positions
  for(open_trades) { // Loop through all open trades
    if(TradeIsOption && TradeIsOpen && (comboProfit(TradePriceClose, 1) > TargetProfit || daysToExpiration() < 5)) {
      exitTrade(ThisTrade); // Close the trade if target profit is reached or approaching expiration
    }
  }
}

// Placeholder function for days to expiration calculation - implement as needed
int daysToExpiration() {
  // Custom logic to calculate and return days to expiration for the current combo
  return 10; // Placeholder return value
}

// Placeholder function for calculating combo profit - implement based on actual requirements
var comboProfit(var CurrentClosePrice, int Leg) {
  // Custom logic to calculate and return profit for the combo based on current prices
  return 600; // Placeholder return value
}

Last edited by TipmyPip; 03/06/24 09:44.
Re: Zorro Trader GPT [Re: TipmyPip] #488155
04/01/24 11:43
04/01/24 11:43
Joined: Sep 2017
Posts: 164
TipmyPip Online OP
Member
TipmyPip  Online OP
Member

Joined: Sep 2017
Posts: 164
Asset Ownership: This strategy assumes you initially buy shares of SPY if you don't already own them.

Option Sale: It sells a call option on SPY with a strike price determined by StrikeOffset above the current price and targets DaysToExpiration for expiration.

Position Management: The script also checks for the option position to close it if the premium target is reached or the expiration is near.


Code
#include <contract.c>

// Global parameters for the covered call strategy
var PremiumTarget = 100; // Target premium income from selling the call
int DaysToExpiration = 45; // Target days until expiration for the call option
var StrikeOffset = 100; // Offset from the current price for strike selection

void run() {
  // Setup basic parameters
  BarPeriod = 1440; // Use daily bars
  LookBack = 0; // No need for historical bars in this setup
  StartDate = 2020;
  EndDate = 2024; // Set your backtest period
  assetList("AssetsIB");
  asset("SPY"); // Example using SPY ETF as the underlying asset
  
  // Ensure we're trading in American Options for SPY
  AmericanOption = 1;

  // Update the contract chain for the underlying asset
  if(!contractUpdate(Asset, 0, CALL)) return;

  // Trading logic executed once per day
  if(is(EXITRUN)) return; // Skip logic at the end of the backtest

  // Check if we already own SPY
  if(!NumOpenLong) enterLong(1); // Enter long position if we don't own SPY

  // Sell a call option if we haven't already
  if(NumOpenShort == 0) {
    var CurrentPrice = priceClose(0);
    var StrikeCall = CurrentPrice + StrikeOffset;

    // Finding the call option contract
    CONTRACT* callContract = contractFind(CALL, DaysToExpiration, StrikeCall);
    if(callContract) {
      // Enter a short position by selling the call option
      enterShort(1, callContract); 
    }
  }

  // Managing the open option position
  for(open_trades) {
    CONTRACT* c = ThisTrade->Contract;
    if(TradeIsOption && TradeIsShort && (comboProfit(c->fAsk, 1) > PremiumTarget || daysToExpiration(c) < 5)) {
      exitTrade(ThisTrade); // Close the call option if premium target is reached or approaching expiration
    }
  }
}

// A more refined function for calculating days to expiration based on contract data
int daysToExpiration(CONTRACT* c) {
  if(!c) return 0;
  return (c->Expiry - wdate()) / 86400; // Convert seconds to days
}

Last edited by TipmyPip; 04/01/24 21:17.
Re: Zorro Trader GPT [Re: TipmyPip] #488156
04/01/24 11:53
04/01/24 11:53
Joined: Sep 2017
Posts: 164
TipmyPip Online OP
Member
TipmyPip  Online OP
Member

Joined: Sep 2017
Posts: 164
Selling the Call Option: Use fuzzy logic to evaluate the market condition and the appropriateness of the option's strike price and days to expiration.

Closing the Position: Apply fuzzy logic to decide on closing the option based on current profit and time to expiration.

Code
#include <contract.c>

var FuzzyRange = 0.1; // Adjust based on market volatility or strategy needs
var PremiumTarget = 100; // Example target premium from selling a call
var StrikeOffset = 50; // Offset from the current price for strike selection
int DaysToExpiration = 30; // Target days until expiration

// Initialize fuzzy logic settings - no changes needed here
void initFuzzyLogic() {
  FuzzyRange = 0.1; 
}

// Decision-making on whether to initiate an option combo trade
bool shouldSellCallOption(var currentPrice, var strikeOffset) {
  // Selling a call option if the current price plus strikeOffset is fuzzy-above the current price
  return fuzzy(aboveF(currentPrice + strikeOffset, currentPrice));
}

// Logic for closing position based on profit and days to expiration
bool shouldClosePosition(var tradeProfit, int daysToExpiration) {
  var profitCondition = aboveF(tradeProfit, PremiumTarget);
  var expirationCondition = belowF((var)daysToExpiration, 5.0); // Close if less than 5 days to expiration
  
  return fuzzy(orF(profitCondition, expirationCondition));
}

void run() {
  BarPeriod = 1440; // Use daily bars
  LookBack = 0; // No need for historical bars in this strategy
  StartDate = 2020;
  EndDate = 2024;
  assetList("AssetsIB");
  asset("SPY");
  AmericanOption = 1; // Trading American options
  
  initFuzzyLogic();

  if (!contractUpdate(Asset, 0, CALL)) return;

  if (is(EXITRUN)) return;

  // Decision to sell a call option
  if (NumOpenShort == 0 && shouldSellCallOption(priceClose(0), StrikeOffset)) {
    CONTRACT* CallContract = contractFind(CALL, DaysToExpiration, priceClose(0) + StrikeOffset);
    if (CallContract) {
      combo(CallContract, -1, 0, 0, 0, 0); // Prepare a combo to sell 1 call option
      enterShort(comboLeg(1)); // Enter short position on the combo leg
    }
  }

  // Loop through open trades to manage the position
  for(open_trades) {
    if (TradeIsOption && TradeIsShort && shouldClosePosition(TradeProfit, daysToExpiration())) {
      exitTrade(ThisTrade); // Close the position based on fuzzy logic conditions
    }
  }
}

// You might need to implement or adjust the daysToExpiration function to suit your requirements
int daysToExpiration() {
  // Implement logic to calculate days to expiration for the current option combo
  return 10; // Placeholder return value
}

Last edited by TipmyPip; 04/01/24 21:20.
Re: Zorro Trader GPT [Re: TipmyPip] #488157
04/01/24 11:59
04/01/24 11:59
Joined: Sep 2017
Posts: 164
TipmyPip Online OP
Member
TipmyPip  Online OP
Member

Joined: Sep 2017
Posts: 164
Selling Call Options Based on Trend: Sell call options when the underlying asset shows a strong upward trend, as indicated by a high value from risingF, implying that the price rise is consistent and may continue.

Closing Positions with Trend Reversals: Consider closing the call option positions when the asset's price starts showing a downward trend, as indicated by fallingF, suggesting a reversal or correction that might impact the strategy negatively.

Code
#include <contract.c>

vars PriceClose;

void run() {
    BarPeriod = 1440; // Daily bars for trend analysis
    LookBack = 30; // Looking back 30 bars for accurate trend detection
    StartDate = 2020;
    EndDate = 2024;
    assetList("AssetsIB");
    asset("SPY");

    PriceClose = series(priceClose());

    AmericanOption = 1; // Trading American Options
    
    if (is(EXITRUN)) return; // Exit if at the end of the backtest

    // Ensure the contract chain is updated for SPY
    if (!contractUpdate(Asset, 0, CALL | PUT)) return;

    // Entry Logic: Detect a rising trend to sell a call option
    if (NumOpenLong + NumOpenShort == 0) { // Check if there's no open position
        var trendStrength = risingF(PriceClose); // Assess the rising trend strength
        if (trendStrength > 0.5) { // Threshold for a strong rising trend
            // Define the strike price a bit above the current price for selling a call
            var StrikeCall = priceClose(0) + 100; // Offset from the current price for the strike
            int CallContract = contractFind(CALL, 45, StrikeCall); // Find a call option 45 days to expiration
            if (CallContract >= 0) {
                CONTRACT* c = contract(CallContract);
                if (combo(c, -1, 0, 0, 0, 0, 0, 0) > 0) { // Prepare the combo for selling 1 call
                    enterShort(comboLeg(1)); // Enter short for the call option leg
                }
            }
        }
    }

    // Exit Logic: Close the short call option position on a falling trend
    for(open_trades) {
        if (TradeIsOption && TradeIsShort) {
            var reversalStrength = fallingF(PriceClose); // Assess the strength of the falling trend
            if (reversalStrength > 0.5) { // Threshold indicating a strong downward trend
                exitTrade(ThisTrade); // Close the call option position
            }
        }
    }
}

Last edited by TipmyPip; 04/01/24 21:22.
Re: Zorro Trader GPT [Re: TipmyPip] #488158
04/01/24 12:06
04/01/24 12:06
Joined: Sep 2017
Posts: 164
TipmyPip Online OP
Member
TipmyPip  Online OP
Member

Joined: Sep 2017
Posts: 164
Market Volatility: Determine the market's volatility to adjust the sensitivity of our fuzzy logic conditions. High volatility might require more stringent conditions to enter or exit a trade to avoid false signals.

Trend Confirmation: Use a combination of risingF and fallingF functions to confirm the strength and direction of the market trend.

Entry Condition: Enter a trade based on a confirmed upward trend or after a significant market bottom (valleyF), adjusted for market volatility.

Exit Condition: Exit a trade based on a confirmed downward trend or after a significant market peak (peakF), also adjusted for market volatility.



Code
#include <contract.c>

vars PriceClose;
var Volatility;

void initFuzzyLogicSettings() {
  FuzzyRange = 0.05; // Adjust based on backtesting
}

var calculateVolatility(vars Data, int period) {
  var sumDelta = 0;
  for(int i = 1; i <= period; i++) {
    sumDelta += abs(Data[i] - Data[i-1]);
  }
  return sumDelta / period;
}

var adjustFuzzyForVolatility(var fuzzyValue) {
  var adjustmentFactor = 1 + Volatility * 10;
  return clamp(fuzzyValue / adjustmentFactor, 0, 1);
}

bool fuzzyEntryCondition(vars Data) {
  var entryFuzzy = max(risingF(Data), valleyF(Data));
  return adjustFuzzyForVolatility(entryFuzzy) > 0.5;
}

bool fuzzyExitCondition(vars Data) {
  var exitFuzzy = max(fallingF(Data), peakF(Data));
  return adjustFuzzyForVolatility(exitFuzzy) > 0.5;
}

void optionComboTrade() {
  CONTRACT* C1; // Define pointer for the first leg of the combo
  CONTRACT* C2; // Define pointer for the second leg of the combo
  
  // Dynamically adjust option strike based on current price and volatility
  var strikeCall = round(priceClose(0) + 10 + Volatility * 5); // Example for call strike
  var strikePut = round(priceClose(0) - 10 - Volatility * 5); // Example for put strike
  
  // Initialize contracts for a strangle combo
  C1 = contractFind(CALL, 30, strikeCall); // Find call option contract
  C2 = contractFind(PUT, 30, strikePut); // Find put option contract

  // Check if contracts are found and if entry condition is met
  if(C1 && C2 && fuzzyEntryCondition(PriceClose)) {
    combo(C1, 1, C2, 1, 0, 0, 0, 0); // Create a strangle combo
    enterLong(comboLeg(1)); // Enter long on both legs of the combo
    enterLong(comboLeg(2));
  }
}

void run() {
  BarPeriod = 60;
  LookBack = 100;
  StartDate = 2020;
  EndDate = 2024;
  assetList("AssetsIB");
  asset("SPY");

  PriceClose = series(priceClose());
  initFuzzyLogicSettings();

  Volatility = calculateVolatility(PriceClose, 20); // Calculate market volatility

  if (is(EXITRUN)) return;

  optionComboTrade(); // Execute the option combo trade based on fuzzy logic conditions
}

Last edited by TipmyPip; 04/01/24 21:14.
Re: Zorro Trader GPT [Re: TipmyPip] #488159
04/01/24 12:23
04/01/24 12:23
Joined: Sep 2017
Posts: 164
TipmyPip Online OP
Member
TipmyPip  Online OP
Member

Joined: Sep 2017
Posts: 164
The Following strategy calculates the EMA of ATR at the beginning of the run to establish a volatility baseline.
It then iterates through selected assets (SPY, AAPL, MSFT), applying its entry and exit logic to each based on the current market conditions as interpreted through its fuzzy logic framework.
Trades are made based on the composite fuzzy logic conditions, with the strategy entering long positions when the entry conditions are met and exiting positions when the exit conditions are met.


Code
#include <contract.c>

vars PriceClose, VolatilityEMA;

void init() {
  assetList("AssetsIB");
  BarPeriod = 60; // Setting bar period for the price series
  LookBack = 100; // Lookback period for technical indicators

  PriceClose = series(priceClose());
  initFuzzyLogicSettings();
  calculateEMAVolatility(20); // Initialize volatility calculation
}

void initFuzzyLogicSettings() {
  FuzzyRange = 0.05; // Adjust based on backtesting
}

void calculateEMAVolatility(int period) {
  vars ATRValues = series(ATR(period));
  VolatilityEMA = series(EMA(ATRValues, period));
}

void tradeOptions() {
  // Assuming current asset is SPY and we're setting up a strangle
  if (NumOpenLong == 0 && fuzzyEntryCondition()) {
    // Find contracts for the call and put options
    CONTRACT* callContract = contractFind("Call", 30, priceClose(0) + 10); // Example: 30 days to expiration, strike 10 points above
    CONTRACT* putContract = contractFind("Put", 30, priceClose(0) - 10); // Example: 30 days to expiration, strike 10 points below

    if(callContract && putContract) {
      // Setup the combo - buying 1 call and 1 put
      combo(callContract, 1, putContract, 1, 0, 0, 0, 0);
      
      // Enter the combo trade
      enterLong(comboLeg(1)); // Enter long for the call option leg
      enterLong(comboLeg(2)); // Enter long for the put option leg
      
      printf("\nEntered a Strangle on SPY");
    }
  }
}

void run() {
  StartDate = 2020;
  EndDate = 2024;
  assetList("AssetsIB");
  asset("SPY");

  while(asset(loop("SPY","AAPL","MSFT"))) {
    if (is(EXITRUN)) continue;
    tradeOptions();
  }
}

Last edited by TipmyPip; 04/01/24 21:27.
Re: Zorro Trader GPT [Re: TipmyPip] #488160
04/01/24 21:39
04/01/24 21:39
Joined: Sep 2017
Posts: 164
TipmyPip Online OP
Member
TipmyPip  Online OP
Member

Joined: Sep 2017
Posts: 164
The Following Strategy will Adapt to changing market conditions using both trend analysis and volatility measurements to inform trade decisions.

Risk Management: Incorporates measures of market volatility into trade decision processes, although explicit risk management tactics (e.g., position sizing, stop-loss orders) are not detailed within the provided summary.

Theoretical Pricing: Utilizes the Black-Scholes model for option valuation, aiding in the identification of potentially mispriced options, though the direct application of this pricing within the trade execution process is implied rather than explicitly defined.

Code
#include <contract.h>

// Initialize global variables for storing price and volatility data
vars PriceClose, VolatilityEMA;

void init() {
  assetList("AssetsIB");
  BarPeriod = 60;
  LookBack = 100;
  StartDate = 2020;
  EndDate = 2024;
  
  PriceClose = series(priceClose());
  initFuzzyLogicSettings();
  calculateEMAVolatility(20); // Initialize volatility calculation
}

void initFuzzyLogicSettings() {
  FuzzyRange = 0.05; // Adjust based on backtesting
}

void calculateEMAVolatility(int period) {
  vars ATRValues = series(ATR(period));
  VolatilityEMA = series(EMA(ATRValues, period));
}

// Cumulative Normal Distribution Function for use in Black-Scholes Model
var CNDF(var x) {
  int neg = (x < 0.0) ? 1 : 0;
  if (neg) x = -x;
  
  var k = (1.0 / ( 1.0 + 0.2316419 * x));
  var y = (((1.330274429 * k - 1.821255978) * k + 1.781477937) *
           k - 0.356563782) * k + 0.319381530;
  y = 1.0 - 0.398942280401 * exp(-0.5 * x * x) * y;
  
  return (1.0 - neg) * y + neg * (1.0 - y);
}

// Black-Scholes Option Pricing Model
var BlackScholes(char *CallPutFlag, var S, var X, var T, var r, var v) {
  var d1, d2;
  d1 = (log(S / X) + (r + 0.5 * v * v) * T) / (v * sqrt(T));
  d2 = d1 - v * sqrt(T);
  
  if (CallPutFlag[0] == 'c' || CallPutFlag[0] == 'C') {
    return S * CNDF(d1) - X * exp(-r * T) * CNDF(d2);
  } else {
    return X * exp(-r * T) * CNDF(-d2) - S * CNDF(-d1);
  }
}

bool fuzzyEntryCondition() {
  return risingF(PriceClose) > 0.5 && VolatilityEMA[0] < 0.2;
}

bool fuzzyExitCondition() {
  return fallingF(PriceClose) > 0.5 || VolatilityEMA[0] > 0.3;
}

void tradeOptions() {
  CONTRACT* CallContract;
  CONTRACT* PutContract;
  var strikePrice = round(priceClose(0),100); // Example for rounding to nearest hundred
  var expirationDays = 30; // Targeting options 30 days to expiration

  if (fuzzyEntryCondition()) {
    // Prepare to trade a call option
    CallContract = contractFind("Call", strikePrice, wdate() + expirationDays * 86400);
    if(CallContract) {
      // Buy the call option
      combo(CallContract, 1, 0, 0, 0, 0, 0, 0);
      enterLong(comboLeg(1));
    }
  } else if (fuzzyExitCondition()) {
    // Prepare to trade a put option
    PutContract = contractFind("Put", strikePrice, wdate() + expirationDays * 86400);
    if(PutContract) {
      // Buy the put option
      combo(0, 0, PutContract, 1, 0, 0, 0, 0);
      enterLong(comboLeg(2));
    }
  }
}

void main() {
  init();
  while(asset(loop("Assets"))) {
    if (is(EXITRUN)) continue;
    tradeOptions(); // Execute the option trading logic based on market conditions
  }
}

Re: Zorro Trader GPT [Re: TipmyPip] #488164
04/05/24 04:51
04/05/24 04:51
Joined: Dec 2014
Posts: 206
Germany
Smon Offline
Member
Smon  Offline
Member

Joined: Dec 2014
Posts: 206
Germany
Hi!

GPT-4 seems not to be the best choice when it comes to coding any longer. I'm developing code with Claude-3 Sonnet (free version), and it's really great. I need to itereate less often compared to GPT-4. It has 200.000 tokens context length and at least the biggest model (Claude-3 Opus) is told to have almost perfect recall capabilities. I found a way to access Claude-3 Opus from a restricted country: https://poe.com/
You can even build your custom GPTs there. I wonder, how you fed ChatGPT the knowledge about Zorro. Did you preprocess the manual, aka summarize it?

More info about Claude-3: https://www.anthropic.com/news/claude-3-family

Last edited by Smon; 04/05/24 04:57.
Re: Zorro Trader GPT [Re: Smon] #488165
04/05/24 06:35
04/05/24 06:35
Joined: Sep 2017
Posts: 164
TipmyPip Online OP
Member
TipmyPip  Online OP
Member

Joined: Sep 2017
Posts: 164
Thank you for sharing the information, I suppose that you need more input in order that GPT4 will focus on your intentions and get the right results, but running a free version needs a serious graphics card, and getting 40 questions in 3 hours is quite hard without computing power. I have a number of ideas on how to feed ChatGPT, but I am quite sure it is improving.

Last edited by TipmyPip; 04/05/24 07:08.
Re: Zorro Trader GPT [Re: TipmyPip] #488166
04/06/24 05:12
04/06/24 05:12
Joined: Dec 2014
Posts: 206
Germany
Smon Offline
Member
Smon  Offline
Member

Joined: Dec 2014
Posts: 206
Germany
I believe there was a misunderstanding. The Claude3-Sonnet is hosted yet remains free of charge. However, it is not an open-source model. I was considering the possibility of integrating the entire manual into a custom GPT, but I wanted to consult with you first on how you managed to do it. Actually, I began exploring this concept several months ago and even developed a fine-tuned version of GPT-3.5 by converting the manual into question-answer pairs for the training dataset. It appears that this dataset was likely too small, leading me to abandon the fine-tuning process. It seems that retrieval-augmented generation might be the better approach.

Re: Zorro Trader GPT [Re: TipmyPip] #488167
04/06/24 08:11
04/06/24 08:11
Joined: Sep 2017
Posts: 164
TipmyPip Online OP
Member
TipmyPip  Online OP
Member

Joined: Sep 2017
Posts: 164
Wow, that is great free use of LLM without paying for the service, I am quite sure, Claude3 is much better. But Custom Agents API is already coming out. Which will increase the accuracy of the answers.

In addition, I use Mathematica, and Wolfram with OpenAI, so Claude3 will not have any advantage in my case, But Thank you for sharing the information.

Re: Zorro Trader GPT [Re: Smon] #488228
04/27/24 13:50
04/27/24 13:50
Joined: Sep 2017
Posts: 164
TipmyPip Online OP
Member
TipmyPip  Online OP
Member

Joined: Sep 2017
Posts: 164
"I believe there was a misunderstanding. The Claude3-Sonnet is hosted yet remains free of charge. However, it is not an open-source model. I was considering the possibility of integrating the entire manual into a custom GPT, but I wanted to consult with you first on how you managed to do it. Actually, I began exploring this concept several months ago and even developed a fine-tuned version of GPT-3.5 by converting the manual into question-answer pairs for the training dataset. It appears that this dataset was likely too small, leading me to abandon the fine-tuning process. It seems that retrieval-augmented generation might be the better approach.

well, now I believe you, and I have started making some changes.

Re: Zorro Trader GPT [Re: TipmyPip] #488290
07/06/24 15:23
07/06/24 15:23
Joined: Sep 2017
Posts: 164
TipmyPip Online OP
Member
TipmyPip  Online OP
Member

Joined: Sep 2017
Posts: 164
Machine Learning for Option Trading

Code
#include <contract.h>

// Initialize global variables for storing price and volatility data
vars PriceClose, VolatilityEMA, BollingerBands, StdDevVolatility;

// Parameter optimization
var BarPeriod, LookBack, FuzzyRange, emaPeriod, StopLoss, TakeProfit, Lots, threshold, momentumThreshold;

void init() {
  assetList("AssetsIB");
  BarPeriod = optimize(60, 30, 120, 10);
  LookBack = optimize(100, 50, 200, 10);
  FuzzyRange = optimize(0.05, 0.01, 0.1, 0.01);
  emaPeriod = optimize(20, 10, 50, 5);
  
  PriceClose = series(priceClose());
  calculateEMAVolatility(emaPeriod); // Initialize volatility calculation
  
  // Initialize additional volatility measures
  BollingerBands = series(BBands(priceClose(), 20, 2));
  StdDevVolatility = series(StdDev(priceClose(), 20));
  
  // Risk management parameters
  StopLoss = optimize(50, 10, 100, 10) * ATR(20);
  TakeProfit = optimize(100, 50, 200, 10) * ATR(20);
  
  // Position sizing
  Lots = optimize(1, 0.1, 2, 0.1) * (1 / VolatilityEMA[0]);
}

void calculateEMAVolatility(int period) {
  vars ATRValues = series(ATR(period));
  VolatilityEMA = series(EMA(ATRValues, period));
}

// Cumulative Normal Distribution Function for use in Black-Scholes Model
var CNDF(var x) {
  int neg = (x < 0.0) ? 1 : 0;
  if (neg) x = -x;
  
  var k = (1.0 / ( 1.0 + 0.2316419 * x));
  var y = (((1.330274429 * k - 1.821255978) * k + 1.781477937) *
           k - 0.356563782) * k + 0.319381530;
  y = 1.0 - 0.398942280401 * exp(-0.5 * x * x) * y;
  
  return (1.0 - neg) * y + neg * (1.0 - y);
}

// Black-Scholes Option Pricing Model
var BlackScholes(char *CallPutFlag, var S, var X, var T, var r, var v) {
  var d1, d2;
  d1 = (log(S / X) + (r + 0.5 * v * v) * T) / (v * sqrt(T));
  d2 = d1 - v * sqrt(T);
  
  if (CallPutFlag[0] == 'c' || CallPutFlag[0] == 'C') {
    return S * CNDF(d1) - X * exp(-r * T) * CNDF(d2);
  } else {
    return X * exp(-r * T) * CNDF(-d2) - S * CNDF(-d1);
  }
}

bool enhancedFuzzyEntryCondition() {
  return risingF(PriceClose) > 0.5 && VolatilityEMA[0] < 0.2 && RSI(PriceClose, 14) < 30 && additionalFilters();
}

bool enhancedFuzzyExitCondition() {
  return fallingF(PriceClose) > 0.5 || VolatilityEMA[0] > 0.3;
}

bool additionalFilters() {
  return MarketVolatility() > threshold && momentum > momentumThreshold;
}

void tradeOptions() {
  CONTRACT* CallContract;
  CONTRACT* PutContract;
  var strikePrice = round(priceClose(0), 100); // Example for rounding to nearest hundred
  var expirationDays = 30; // Targeting options 30 days to expiration

  if (enhancedFuzzyEntryCondition()) {
    // Prepare to trade a call option
    CallContract = contractFind("Call", strikePrice, wdate() + expirationDays * 86400);
    if(CallContract) {
      // Buy the call option
      combo(CallContract, 1, 0, 0, 0, 0, 0, 0);
      enterLong(comboLeg(1));
    }
  } else if (enhancedFuzzyExitCondition()) {
    // Prepare to trade a put option
    PutContract = contractFind("Put", strikePrice, wdate() + expirationDays * 86400);
    if(PutContract) {
      // Buy the put option
      combo(0, 0, PutContract, 1, 0, 0, 0, 0);
      enterLong(comboLeg(2));
    }
  }
}

void run() {
  if (is(INITRUN)) {
    init();
    set(TESTNOW | TRAINMODE);
  }
  
  while(asset(loop("Assets"))) {
    if (is(EXITRUN)) continue;
    tradeOptions(); // Execute the option trading logic based on market conditions

    // Debugging and logging
    if (is(LOGFILE)) {
      log("Entering trade: PriceClose = %.2f, VolatilityEMA = %.2f", PriceClose[0], VolatilityEMA[0]);
    }
  }
  
  // Machine learning integration
  if(is(TRAINMODE)) {
    trainModel(PriceClose, LookBack);
  } else {
    var prediction = predictModel(PriceClose, LookBack);
    if(prediction > threshold) {
      enterLong();
    } else if(prediction < -threshold) {
      enterShort();
    }
  }
}

Last edited by TipmyPip; 07/06/24 15:24.
Gaussian Channel Adaptive Strategy [Re: TipmyPip] #488291
07/06/24 15:31
07/06/24 15:31
Joined: Sep 2017
Posts: 164
TipmyPip Online OP
Member
TipmyPip  Online OP
Member

Joined: Sep 2017
Posts: 164
Gaussian Channel Adaptive Moving Average Strategy, for those who like adaptive strategies :-)


Code
#include <default.c>

var alpha, beta;
var filt, filttr, hband, lband;
vars PriceClose, GaussianFiltered, GaussianFilteredTR;

void init() {
  BarPeriod = 60; // 1-hour bars
  LookBack = 144; // Lookback period for the Gaussian filter

  PriceClose = series(priceClose());
  GaussianFiltered = series(0);
  GaussianFilteredTR = series(0);
  
  alpha = calculateAlpha(144, 4); // Sampling Period and Poles
  beta = (1 - cos(2 * PI / 144)) / (pow(1.414, 2 / 4) - 1);
}

var calculateAlpha(int period, int poles) {
  var beta = (1 - cos(2 * PI / period)) / (pow(1.414, 2 / poles) - 1);
  return -beta + sqrt(beta * beta + 2 * beta);
}

void calculateGaussianFilter(vars src, int poles, int period) {
  vars filtSeries = series(0);
  vars filtSeriesTR = series(0);
  var lag = (period - 1) / (2 * poles);
  vars srcData = series(0);
  vars trData = series(0);
  
  for(int i = 0; i < Bar; i++) {
    if(i > lag) {
      srcData[i] = src[i] + (src[i] - src[i - lag]);
      trData[i] = TR() + (TR() - TR(lag));
    } else {
      srcData[i] = src[i];
      trData[i] = TR();
    }
  }
  
  for(int i = 0; i < Bar; i++) {
    filtSeries[i] = filt9x(alpha, srcData[i], 1, poles);
    filtSeriesTR[i] = filt9x(alpha, trData[i], 1, poles);
  }
  
  GaussianFiltered[0] = filtSeries[0];
  GaussianFilteredTR[0] = filtSeriesTR[0];
  
  hband = GaussianFiltered[0] + GaussianFilteredTR[0] * 1.414;
  lband = GaussianFiltered[0] - GaussianFilteredTR[0] * 1.414;
}

var filt9x(var a, var s, int i, int poles) {
  static vars f[10];
  var x = 1 - a;
  var filt = pow(a, i) * s + i * x * f[1] - (i >= 2 ? 36 * pow(x, 2) * f[2] : 0) + (i >= 3 ? 84 * pow(x, 3) * f[3] : 0)
            - (i >= 4 ? 126 * pow(x, 4) * f[4] : 0) + (i >= 5 ? 126 * pow(x, 5) * f[5] : 0) - (i >= 6 ? 84 * pow(x, 6) * f[6] : 0)
            + (i >= 7 ? 36 * pow(x, 7) * f[7] : 0) - (i >= 8 ? 9 * pow(x, 8) * f[8] : 0) + (i == 9 ? 1 * pow(x, 9) * f[9] : 0);
  
  for(int j = 9; j > 0; j--) {
    f[j] = f[j - 1];
  }
  f[0] = filt;
  
  return filt;
}

bool crossOver(vars data, var level) {
  return (data[1] < level && data[0] >= level);
}

bool crossUnder(vars data, var level) {
  return (data[1] > level && data[0] <= level);
}

void run() {
  if(is(INITRUN)) {
    init();
  }

  calculateGaussianFilter(PriceClose, 4, 144);
  
  bool longCondition = crossOver(priceClose(), hband);
  bool closeAllCondition = crossUnder(priceClose(), hband);

  if (longCondition) {
    enterLong();
  }

  if (closeAllCondition) {
    exitLong();
  }
}

Multi-Factor Gaussian FX Strategy [Re: TipmyPip] #488292
07/06/24 15:56
07/06/24 15:56
Joined: Sep 2017
Posts: 164
TipmyPip Online OP
Member
TipmyPip  Online OP
Member

Joined: Sep 2017
Posts: 164
Following more factors of correlation, and Market sentiments, we can also include factor exposure, market capitalization, and industry data from various data sources on the web :


Code
#include <default.c>

var alpha, beta;
var filt, filttr, hband, lband;
vars PriceClose, GaussianFiltered, GaussianFilteredTR;

// Define additional factor variables
vars FactorExposure, MarketCap, IndustryData;
var beta0, betaFactor, betaMktCap, betaIndustry;

// Initialize factors for each time period
void initFactors() {
  FactorExposure = series(0);
  MarketCap = series(0);
  IndustryData = series(0);
  
  // Set coefficients based on your model or optimization
  beta0 = 0.01;
  betaFactor = 0.02;
  betaMktCap = 0.03;
  betaIndustry = 0.04;
}

void init() {
  BarPeriod = 60; // 1-hour bars
  LookBack = 144; // Lookback period for the Gaussian filter

  PriceClose = series(priceClose());
  GaussianFiltered = series(0);
  GaussianFilteredTR = series(0);
  
  alpha = calculateAlpha(144, 4); // Sampling Period and Poles
  beta = (1 - cos(2 * PI / 144)) / (pow(1.414, 2 / 4) - 1);
  
  initFactors();
}

var calculateAlpha(int period, int poles) {
  var beta = (1 - cos(2 * PI / period)) / (pow(1.414, 2 / poles) - 1);
  return -beta + sqrt(beta * beta + 2 * beta);
}

void calculateGaussianFilter(vars src, int poles, int period) {
  vars filtSeries = series(0);
  vars filtSeriesTR = series(0);
  var lag = (period - 1) / (2 * poles);
  vars srcData = series(0);
  vars trData = series(0);
  
  for(int i = 0; i < Bar; i++) {
    if(i > lag) {
      srcData[i] = src[i] + (src[i] - src[i - lag]);
      trData[i] = TR() + (TR() - TR(lag));
    } else {
      srcData[i] = src[i];
      trData[i] = TR();
    }
  }
  
  for(int i = 0; i < Bar; i++) {
    filtSeries[i] = filt9x(alpha, srcData[i], 1, poles);
    filtSeriesTR[i] = filt9x(alpha, trData[i], 1, poles);
  }
  
  GaussianFiltered[0] = filtSeries[0];
  GaussianFilteredTR[0] = filtSeriesTR[0];
  
  hband = GaussianFiltered[0] + GaussianFilteredTR[0] * 1.414;
  lband = GaussianFiltered[0] - GaussianFilteredTR[0] * 1.414;
}

var filt9x(var a, var s, int i, int poles) {
  static vars f[10];
  var x = 1 - a;
  var filt = pow(a, i) * s + i * x * f[1] - (i >= 2 ? 36 * pow(x, 2) * f[2] : 0) + (i >= 3 ? 84 * pow(x, 3) * f[3] : 0)
            - (i >= 4 ? 126 * pow(x, 4) * f[4] : 0) + (i >= 5 ? 126 * pow(x, 5) * f[5] : 0) - (i >= 6 ? 84 * pow(x, 6) * f[6] : 0)
            + (i >= 7 ? 36 * pow(x, 7) * f[7] : 0) - (i >= 8 ? 9 * pow(x, 8) * f[8] : 0) + (i == 9 ? 1 * pow(x, 9) * f[9] : 0);
  
  for(int j = 9; j > 0; j--) {
    f[j] = f[j - 1];
  }
  f[0] = filt;
  
  return filt;
}

bool crossOver(vars data, var level) {
  return (data[1] < level && data[0] >= level);
}

bool crossUnder(vars data, var level) {
  return (data[1] > level && data[0] <= level);
}

// Predict returns based on factors
var predictReturn() {
  var predictedReturn = beta0 + betaFactor * FactorExposure[0] + betaMktCap * MarketCap[0] + betaIndustry * IndustryData[0];
  return predictedReturn;
}

void run() {
  if(is(INITRUN)) {
    init();
  }

  calculateGaussianFilter(PriceClose, 4, 144);
  
  bool longCondition = crossOver(priceClose(), hband) && predictReturn() > 0;
  bool closeAllCondition = crossUnder(priceClose(), hband) || predictReturn() < 0;

  if (longCondition) {
    enterLong();
  }

  if (closeAllCondition) {
    exitLong();
  }
}

Last edited by TipmyPip; 07/06/24 15:58.
Gaussian Bands Strategy [Re: TipmyPip] #488293
07/06/24 17:16
07/06/24 17:16
Joined: Sep 2017
Posts: 164
TipmyPip Online OP
Member
TipmyPip  Online OP
Member

Joined: Sep 2017
Posts: 164
Here is another version for the Gaussian Filter Strategy, while it can be considered non-linear under multi-correlation factors of overlapping data :


Code
#include <default.c>

// Variables
var alpha, beta;
var filt, filttr, hband, lband;
vars PriceClose, GaussianFiltered, GaussianFilteredTR;
vars UpperBand, LowerBand;

// Initialization
void init() {
  BarPeriod = 60; // 1-hour bars
  LookBack = 144; // Lookback period for the Gaussian filter

  PriceClose = series(priceClose());
  GaussianFiltered = series(0);
  GaussianFilteredTR = series(0);
  UpperBand = series(0);
  LowerBand = series(0);

  alpha = calculateAlpha(144, 4); // Sampling Period and Poles
  beta = (1 - cos(2 * PI / 144)) / (pow(1.414, 2 / 4) - 1);
}

// Calculate Alpha
var calculateAlpha(int period, int poles) {
  var beta = (1 - cos(2 * PI / period)) / (pow(1.414, 2 / poles) - 1);
  return -beta + sqrt(beta * beta + 2 * beta);
}

// Modified Gaussian Filter with Bands
var filt9x_with_bands(var a, var s, int poles, var deviationMultiplier, var* upperBand, var* lowerBand) {
  static vars f[10]; // Array to store previous filter values
  var x = 1 - a;
  var filt = s * a;

  // Precompute powers of x to avoid redundant calculations
  var x2 = x * x;
  var x3 = x2 * x;
  var x4 = x3 * x;
  var x5 = x4 * x;
  var x6 = x5 * x;
  var x7 = x6 * x;
  var x8 = x7 * x;
  var x9 = x8 * x;

  // Calculate the filter value iteratively
  for (int i = 1; i <= poles; i++) {
    switch (i) {
      case 1:
        filt += x * f[i];
        break;
      case 2:
        filt -= 36 * x2 * f[i];
        break;
      case 3:
        filt += 84 * x3 * f[i];
        break;
      case 4:
        filt -= 126 * x4 * f[i];
        break;
      case 5:
        filt += 126 * x5 * f[i];
        break;
      case 6:
        filt -= 84 * x6 * f[i];
        break;
      case 7:
        filt += 36 * x7 * f[i];
        break;
      case 8:
        filt -= 9 * x8 * f[i];
        break;
      case 9:
        filt += x9 * f[i];
        break;
    }
  }

  // Shift the previous values in the array
  for (int j = 9; j > 0; j--) {
    f[j] = f[j - 1];
  }
  f[0] = filt;

  // Calculate standard deviation of filter values
  var sum = 0, mean = 0, count = 10;
  for (int j = 0; j < count; j++) {
    sum += f[j];
  }
  mean = sum / count;

  var varianceSum = 0;
  for (int j = 0; j < count; j++) {
    varianceSum += (f[j] - mean) * (f[j] - mean);
  }
  var stddev = sqrt(varianceSum / count);

  // Calculate upper and lower bands
  *upperBand = filt + deviationMultiplier * stddev;
  *lowerBand = filt - deviationMultiplier * stddev;

  return filt;
}

// Gaussian Filter Calculation
void calculateGaussianFilter(vars src, int poles, int period) {
  vars filtSeries = series(0);
  vars filtSeriesTR = series(0);
  var lag = (period - 1) / (2 * poles);
  vars srcData = series(0);
  vars trData = series(0);
  
  for(int i = 0; i < Bar; i++) {
    if(i > lag) {
      srcData[i] = src[i] + (src[i] - src[i - lag]);
      trData[i] = TR() + (TR() - TR(lag));
    } else {
      srcData[i] = src[i];
      trData[i] = TR();
    }
  }
  
  for(int i = 0; i < Bar; i++) {
    var upper, lower;
    filtSeries[i] = filt9x_with_bands(alpha, srcData[i], poles, 1.414, &upper, &lower);
    filtSeriesTR[i] = filt9x_with_bands(alpha, trData[i], poles, 1.414, &upper, &lower);
    UpperBand[i] = upper;
    LowerBand[i] = lower;
  }
  
  GaussianFiltered[0] = filtSeries[0];
  GaussianFilteredTR[0] = filtSeriesTR[0];
  
  hband = UpperBand[0];
  lband = LowerBand[0];
}

// CrossOver and CrossUnder Functions
bool crossOver(vars data, var level) {
  return (data[1] < level && data[0] >= level);
}

bool crossUnder(vars data, var level) {
  return (data[1] > level && data[0] <= level);
}

// Main Trading Logic
void run() {
  if(is(INITRUN)) {
    init();
  }

  calculateGaussianFilter(PriceClose, 4, 144);
  
  bool longCondition = crossOver(priceClose(), hband);
  bool closeAllCondition = crossUnder(priceClose(), lband);

  if (longCondition) {
    enterLong();
  }

  if (closeAllCondition) {
    exitLong();
  }
}

Gaussian Decision Tree Hedging Strategy [Re: TipmyPip] #488305
07/11/24 05:43
07/11/24 05:43
Joined: Sep 2017
Posts: 164
TipmyPip Online OP
Member
TipmyPip  Online OP
Member

Joined: Sep 2017
Posts: 164
This trading strategy leverages advanced signal processing and machine learning techniques to make informed trading decisions. We employ a Gaussian filter, which smooths the price series by reducing noise, and derive key indicators from it. These indicators include the filtered price and its bands, calculated by a non-recursive method. The signals are generated by comparing the filtered price to its bands, and identifying potential entry and exit points.

To enhance decision-making, we integrate a decision tree algorithm. This machine learning model is trained on historical signals, capturing complex patterns and relationships in the data. The model predicts future price movements, guiding long and short positions. By combining signal processing with machine learning, the strategy aims to exploit market inefficiencies and improve trading performance.

Code
#include <default.c>

// Variables
var alpha, beta;
var filt, filttr, hband, lband;
vars PriceClose, GaussianFiltered, GaussianFilteredTR;
vars UpperBand, LowerBand;

// Initialization
void init() {
    BarPeriod = 60; // 1-hour bars
    LookBack = 150; // Lookback period for the Gaussian filter

    PriceClose = series(priceClose());
    GaussianFiltered = series(0);
    GaussianFilteredTR = series(0);
    UpperBand = series(0);
    LowerBand = series(0);

    alpha = calculateAlpha(144, 4); // Sampling Period and Poles
    beta = (1 - cos(2 * PI / 144)) / (pow(1.414, 2 / 4) - 1);

    adviseLong(DTREE, 0, NULL, 0); // Initialize the Decision Tree model
}

// Calculate Alpha
var calculateAlpha(int period, int poles) {
    var beta = (1 - cos(2 * PI / period)) / (pow(1.414, 2 / poles) - 1);
    return -beta + sqrt(beta * beta + 2 * beta);
}

// Modified Gaussian Filter with Bands
var filt9x_with_bands(var a, var s, int poles, var deviationMultiplier, var* upperBand, var* lowerBand) {
    static vars f[10]; // Array to store previous filter values
    var x = 1 - a;
    var filt = s * a;

    // Precompute powers of x to avoid redundant calculations
    var x2 = x * x;
    var x3 = x2 * x;
    var x4 = x3 * x;
    var x5 = x4 * x;
    var x6 = x5 * x;
    var x7 = x6 * x;
    var x8 = x7 * x;
    var x9 = x8 * x;

    // Calculate the filter value iteratively
    for (int i = 1; i <= poles; i++) {
        switch (i) {
            case 1:
                filt += x * f[i];
                break;
            case 2:
                filt -= 36 * x2 * f[i];
                break;
            case 3:
                filt += 84 * x3 * f[i];
                break;
            case 4:
                filt -= 126 * x4 * f[i];
                break;
            case 5:
                filt += 126 * x5 * f[i];
                break;
            case 6:
                filt -= 84 * x6 * f[i];
                break;
            case 7:
                filt += 36 * x7 * f[i];
                break;
            case 8:
                filt -= 9 * x8 * f[i];
                break;
            case 9:
                filt += x9 * f[i];
                break;
        }
    }

    // Shift the previous values in the array
    for (int j = 9; j > 0; j--) {
        f[j] = f[j - 1];
    }
    f[0] = filt;

    // Calculate standard deviation of filter values
    var sum = 0, mean = 0, count = 10;
    for (int j = 0; j < count; j++) {
        sum += f[j];
    }
    mean = sum / count;

    var varianceSum = 0;
    for (int j = 0; j < count; j++) {
        varianceSum += (f[j] - mean) * (f[j] - mean);
    }
    var stddev = sqrt(varianceSum / count);

    // Calculate upper and lower bands
    *upperBand = filt + deviationMultiplier * stddev;
    *lowerBand = filt - deviationMultiplier * stddev;

    return filt;
}

// Gaussian Filter Calculation
void calculateGaussianFilter(vars src, int poles, int period) {
    vars filtSeries = series(0);
    vars filtSeriesTR = series(0);
    var lag = (period - 1) / (2 * poles);
    vars srcData = series(0);
    vars trData = series(0);
    
    for (int i = 0; i < Bar; i++) {
        if (i > lag) {
            srcData[i] = src[i] + (src[i] - src[i - lag]);
            trData[i] = TR() + (TR() - TR(lag));
        } else {
            srcData[i] = src[i];
            trData[i] = TR();
        }
    }
    
    for (int i = 0; i < Bar; i++) {
        var upper, lower;
        filtSeries[i] = filt9x_with_bands(alpha, srcData[i], poles, 1.414, &upper, &lower);
        filtSeriesTR[i] = filt9x_with_bands(alpha, trData[i], poles, 1.414, &upper, &lower);
        UpperBand[i] = upper;
        LowerBand[i] = lower;
    }
    
    GaussianFiltered[0] = filtSeries[0];
    GaussianFilteredTR[0] = filtSeriesTR[0];
    
    hband = UpperBand[0];
    lband = LowerBand[0];
}

// Main Trading Logic
void run() {
    BarPeriod = 60; // 1-hour bars
    LookBack = 150; // Lookback period
    TradesPerBar = 2;

    if (Train) {
        Hedge = 2; // Hedge during training
    }

    set(RULES | TESTNOW); // Set rules and test mode

    if (is(INITRUN)) {
        init();
    }

    calculateGaussianFilter(PriceClose, 4, 144);

    // Generate some signals from GaussianFiltered in the -100..100 range
    var Signals[2]; 
    Signals[0] = (GaussianFiltered[0] - GaussianFilteredTR[0]) / PIP;
    Signals[1] = 100 * FisherN(priceClose(), 100);

    // Train and trade the signals using the decision tree model
    if (adviseLong(DTREE, 0, Signals, 2) > 0)
        enterLong();
    if (adviseShort(DTREE, 0, Signals, 2) > 0)
        enterShort();
}

Last edited by TipmyPip; 07/11/24 05:45.
Gaussian-Enhanced Hybrid Ensemble Strategy [Re: TipmyPip] #488318
07/20/24 05:00
07/20/24 05:00
Joined: Sep 2017
Posts: 164
TipmyPip Online OP
Member
TipmyPip  Online OP
Member

Joined: Sep 2017
Posts: 164
The following is an optimized version of the Gaussian Filter Strategy, which includes Decision Trees, and Neural Networks to monitor error correction within the signals.

This code combines decision trees and neural networks to enhance previous versions of the implementation. It initializes and trains both models, uses Gaussian filters for signal processing, and employs a meta-model neural network to integrate their predictions for final trading actions, aiming to reduce errors and improve trading performance in a systematic, data-driven manner.

Code
#include <default.c>

// Variables
var alpha, beta;
var filt, filttr, hband, lband;
vars PriceClose, GaussianFiltered, GaussianFilteredTR;
vars UpperBand, LowerBand;
var Signals[2];
var MetaSignals[2]; // Signals for the meta-model

// Initialization
void init() {
    BarPeriod = 60; // 1-hour bars
    LookBack = 150; // Lookback period for the Gaussian filter

    PriceClose = series(priceClose());
    GaussianFiltered = series(0);
    GaussianFilteredTR = series(0);
    UpperBand = series(0);
    LowerBand = series(0);

    alpha = calculateAlpha(144, 4); // Sampling Period and Poles
    beta = (1 - cos(2 * PI / 144)) / (pow(1.414, 2 / 4) - 1);

    adviseLong(DTREE, 0, NULL, 0); // Initialize the Decision Tree model
    adviseLong(NN, 0, NULL, 0); // Initialize the Neural Network model
    adviseLong(NN, 1, NULL, 0); // Initialize the Meta-Model Neural Network
}

// Calculate Alpha
var calculateAlpha(int period, int poles) {
    var beta = (1 - cos(2 * PI / period)) / (pow(1.414, 2 / poles) - 1);
    return -beta + sqrt(beta * beta + 2 * beta);
}

// Optimized Gaussian Filter with Bands
var filt9x_with_bands(var a, var s, int poles, var deviationMultiplier, var* upperBand, var* lowerBand) {
    static vars f[10]; // Array to store previous filter values
    var x = 1 - a;
    var filt = s * a;

    // Precompute powers of x to avoid redundant calculations
    var x_powers[10];
    x_powers[1] = x;
    for (int i = 2; i <= 9; i++) {
        x_powers[i] = x_powers[i - 1] * x;
    }

    // Calculate the filter value iteratively
    var coefficients[10] = {0, 1, -36, 84, -126, 126, -84, 36, -9, 1};
    for (int i = 1; i <= poles; i++) {
        filt += coefficients[i] * x_powers[i] * f[i];
    }

    // Shift the previous values in the array
    for (int j = 9; j > 0; j--) {
        f[j] = f[j - 1];
    }
    f[0] = filt;

    // Calculate mean and variance in a single pass
    var sum = 0, varianceSum = 0, count = 10;
    for (int j = 0; j < count; j++) {
        sum += f[j];
    }
    var mean = sum / count;

    for (int j = 0; j < count; j++) {
        varianceSum += (f[j] - mean) * (f[j] - mean);
    }
    var stddev = sqrt(varianceSum / count);

    // Calculate upper and lower bands
    *upperBand = filt + deviationMultiplier * stddev;
    *lowerBand = filt - deviationMultiplier * stddev;

    return filt;
}

// Gaussian Filter Calculation
void calculateGaussianFilter(vars src, int poles, int period) {
    vars filtSeries = series(0);
    vars filtSeriesTR = series(0);
    var lag = (period - 1) / (2 * poles);
    vars srcData = series(0);
    vars trData = series(0);
    
    for (int i = 0; i < Bar; i++) {
        if (i > lag) {
            srcData[i] = src[i] + (src[i] - src[i - lag]);
            trData[i] = TR() + (TR() - TR(lag));
        } else {
            srcData[i] = src[i];
            trData[i] = TR();
        }
    }
    
    for (int i = 0; i < Bar; i++) {
        var upper, lower;
        filtSeries[i] = filt9x_with_bands(alpha, srcData[i], poles, 1.414, &upper, &lower);
        filtSeriesTR[i] = filt9x_with_bands(alpha, trData[i], poles, 1.414, &upper, &lower);
        UpperBand[i] = upper;
        LowerBand[i] = lower;
    }
    
    GaussianFiltered[0] = filtSeries[0];
    GaussianFilteredTR[0] = filtSeriesTR[0];
    
    hband = UpperBand[0];
    lband = LowerBand[0];
}

// Meta-Model for Combining Predictions using Neural Network
var metaModel(var dtreePrediction, var nnPrediction) {
    MetaSignals[0] = dtreePrediction;
    MetaSignals[1] = nnPrediction;
    return advise(NN, 1, MetaSignals, 2); // Use another neural network (NN) model as meta-model
}

// Main Trading Logic
void run() {
    BarPeriod = 60; // 1-hour bars
    LookBack = 150; // Lookback period
    TradesPerBar = 2;

    if (Train) {
        Hedge = 2; // Hedge during training
    }

    set(RULES | TESTNOW); // Set rules and test mode

    if (is(INITRUN)) {
        init();
    }

    calculateGaussianFilter(PriceClose, 4, 144);

    // Generate some signals from GaussianFiltered in the -100..100 range
    Signals[0] = (GaussianFiltered[0] - GaussianFilteredTR[0]) / PIP;
    Signals[1] = 100 * FisherN(priceClose(), 100);

    // Train and trade the signals using decision tree and neural network models
    var dtreePrediction = advise(DTREE, 0, Signals, 2);
    var nnPrediction = advise(NN, 0, Signals, 2);

    var finalDecision = metaModel(dtreePrediction, nnPrediction);
    
    if (finalDecision > 0)
        enterLong();
    else if (finalDecision < 0)
        enterShort();
    else {
        if (isLong()) exitLong();
        if (isShort()) exitShort();
    }
}

Re: Gaussian-Enhanced Hybrid Ensemble Strategy [Re: TipmyPip] #488319
07/20/24 06:46
07/20/24 06:46
Joined: Sep 2017
Posts: 164
TipmyPip Online OP
Member
TipmyPip  Online OP
Member

Joined: Sep 2017
Posts: 164
I must confess, my fascination with automation knows no bounds. While resolving coding problems can be a daunting task, the correct implementation of automation can significantly simplify much of this work.

Imagine the incredible efficiency we could achieve by quickly resolving problems and eliminating the need for error corrections in our code. This would not only save us time but also enrich our lives by allowing us to focus on more creative and fulfilling pursuits.

It's true that addressing errors in automated code generation can be a slow process. However, developing and debugging code manually without automation demands just as much effort and time.

My enthusiasm for automation is matched by my eagerness to share its benefits with others. Please forgive my insistence, but I wholeheartedly encourage you to embrace automation as I have. Together, we can transform our manual trading tasks into streamlined, automated solutions. :-) Hahahah!

I surely, will need to many errors, in my code, But if anyone, feels like sharing any solutions, or ideas, please do.

Last edited by TipmyPip; 07/22/24 19:04.
Re: Gaussian-Enhanced Hybrid Ensemble Strategy [Re: TipmyPip] #488397
10/01/24 10:42
10/01/24 10:42
Joined: Dec 2016
Posts: 74
F
firecrest Offline
Junior Member
firecrest  Offline
Junior Member
F

Joined: Dec 2016
Posts: 74
Thanks @TipmyPip, Is there any simple code that makes consistent money? Even annual return of 10% is great.

Re: Gaussian-Enhanced Hybrid Ensemble Strategy [Re: firecrest] #488399
10/04/24 19:33
10/04/24 19:33
Joined: Sep 2017
Posts: 164
TipmyPip Online OP
Member
TipmyPip  Online OP
Member

Joined: Sep 2017
Posts: 164
Well, that is what you need to work on, and simple code doesn't exist for 10%, you need to use, test, and retest.

Re: Gaussian-Enhanced Hybrid Ensemble Strategy [Re: TipmyPip] #488428
11/07/24 07:53
11/07/24 07:53
Joined: Dec 2016
Posts: 74
F
firecrest Offline
Junior Member
firecrest  Offline
Junior Member
F

Joined: Dec 2016
Posts: 74
I was wondering if there are any codes can beat buy and hold of SPY well. Thanks for the input and i will test it out.

Re: Gaussian-Enhanced Hybrid Ensemble Strategy [Re: firecrest] #488429
11/09/24 16:42
11/09/24 16:42
Joined: Sep 2017
Posts: 164
TipmyPip Online OP
Member
TipmyPip  Online OP
Member

Joined: Sep 2017
Posts: 164
Let me give you an example of answers from ZorroGPT:

Train the PyTorch Model Separately
First, you’ll use PyTorch to train your DRL model based on historical data. After training, save the model so Zorro can use it for predictions.

Code
import torch
from torch import nn, optim

# Example DRL Model (Adjust according to your architecture)
class TradingModel(nn.Module):
    def __init__(self):
        super(TradingModel, self).__init__()
        self.fc1 = nn.Linear(NumSignals, Neurons)
        self.relu = nn.ReLU()
        self.fc2 = nn.Linear(Neurons, 1)
        self.sigmoid = nn.Sigmoid()

    def forward(self, x):
        x = self.relu(self.fc1(x))
        x = self.sigmoid(self.fc2(x))
        return x

# Training function
def train_model(data_path, model_save_path):
    data = np.loadtxt(data_path, delimiter=',')  # Load data from CSV
    X, y = data[:, :-1], data[:, -1]
    X = torch.tensor(X, dtype=torch.float32)
    y = torch.tensor(y, dtype=torch.float32).view(-1, 1)

    model = TradingModel()
    criterion = nn.BCELoss()
    optimizer = optim.Adam(model.parameters(), lr=Rate)

    for epoch in range(Epochs):
        optimizer.zero_grad()
        predictions = model(X)
        loss = criterion(predictions, y)
        loss.backward()
        optimizer.step()

    # Save model for use in Zorro
    torch.save(model.state_dict(), model_save_path)


Train the model on your historical data and save it:

Code
train_model("Data/Signals.csv", "model.pth")


2. Load and Use the Model in Zorro is possible but with different code:
Since Zorro cannot directly run PyTorch, you’ll need to create a separate Python script (e.g., predict.py) that:

Loads the trained model.
Receives data from Zorro for predictions.
Outputs the prediction results back to Zorro.

Code
import torch
import numpy as np
from TradingModel import TradingModel  # Import your model class

# Initialize model
model = TradingModel()
model.load_state_dict(torch.load("model.pth"))  # Load saved model
model.eval()  # Set model to evaluation mode

def predict(input_data):
    input_tensor = torch.tensor(input_data, dtype=torch.float32)
    with torch.no_grad():
        prediction = model(input_tensor)
    return prediction.item()

# Example usage for testing
# Replace [0.5, 1.2, -0.7, ...] with real data passed from Zorro
print(predict([0.5, 1.2, -0.7, ...]))  # Output a prediction for testing


3. Integrate Prediction with Zorro in lite-C Code
In your Zorro script, the neural function will send input data to predict.py, retrieve the prediction result, and make trading decisions based on this prediction.
To link Zorro with Python, you’ll use Zorro’s Rx() function to run the predict.py script and get the result back.

Code
function run()
{
    vars InputData = series(priceClose());
    double Data[NumSignals];
    for(int i = 0; i < NumSignals; i++)
        Data[i] = InputData[i];  // Gather recent data for model input

    // Save input data to CSV format expected by predict.py
    file_write("Data\\input_data.csv", Data, 0);

    // Call Python predict script and retrieve prediction
    Rx("Y <- predict('Data/input_data.csv')");
    var prediction = Rd("Y[1]"); // Assuming Y[1] holds the prediction result

    // Based on prediction, send buy or sell order
    if(prediction > 0.5)
        enterLong();  // Buy if prediction is above 0.5 threshold
    else
        enterShort(); // Sell if prediction is 0.5 or below
}


Now, there might be some errors, which you will need to ask ZorroGPT how do resolve them, let me assure you that it took me at least 40-80 exchanges of error handling to create the Gaussian Band indicator, of which I am still not sure the values are working properly in all time frames.

Last edited by TipmyPip; 11/09/24 16:47.
PyTorch DRL with ZorroGPT [Re: TipmyPip] #488430
11/09/24 16:52
11/09/24 16:52
Joined: Sep 2017
Posts: 164
TipmyPip Online OP
Member
TipmyPip  Online OP
Member

Joined: Sep 2017
Posts: 164
Zorro can indeed use PyTorch directly if you configure it to run Python scripts through the pynet.cpp interface. This method allows you to train and use a PyTorch model directly from Zorro without needing to write a separate data passing script. Here’s how to do it using Zorro’s pynet.cpp and a Python script with PyTorch:

Install Required Python Libraries:

Make sure you have Python 64-bit installed.
Install the necessary libraries by running:

Code
pip3 install torch numpy math


Open Zorro.ini and set the PythonPath64 variable to the path of your Python installation. For example:

Code
PythonPath64 = "C:\Users\YourName\AppData\Local\Programs\Python\Python312"


Write a PyTorch Model Script:

In the same directory as your Zorro script, create a Python file with the same name as your Zorro strategy file, but with a .py extension (e.g., if your Zorro script is MyStrategy.c, name the Python file MyStrategy.py).
This Python script will contain the PyTorch model, functions for prediction, and optionally for training.

Code
import torch
import torch.nn as nn
import numpy as np

class TradingModel(nn.Module):
    def __init__(self, input_size, hidden_size):
        super(TradingModel, self).__init__()
        self.fc1 = nn.Linear(input_size, hidden_size)
        self.relu = nn.ReLU()
        self.fc2 = nn.Linear(hidden_size, 1)
        self.sigmoid = nn.Sigmoid()

    def forward(self, x):
        x = self.relu(self.fc1(x))
        x = self.sigmoid(self.fc2(x))
        return x

# Initialize the model with input size and hidden layer size
model = TradingModel(input_size=8, hidden_size=256)  # Adjust based on your input features

# Load trained model weights if available
try:
    model.load_state_dict(torch.load("model.pth"))
    model.eval()
except FileNotFoundError:
    print("Model weights file not found. Train the model first.")

def predict(input_data):
    input_tensor = torch.tensor(input_data, dtype=torch.float32)
    with torch.no_grad():
        prediction = model(input_tensor)
    return prediction.item()


Using pynet.cpp to Call Python from Zorro:
pynet.cpp allows Zorro to call Python functions, making it possible to run PyTorch directly for predictions.
To use the model in Zorro, write the lite-C code that sends data to predict in your Python script, fetches the result, and executes trades based on the output.

Code
function run()
{
    vars InputData = series(priceClose());
    double Data[8];  // Adjust based on your input features
    for(int i = 0; i < 8; i++)
        Data[i] = InputData[i];  // Gather recent data for model input

    // Call the predict function in MyStrategy.py
    python("Y = predict", Data, 8); // Sends data to the Python function `predict`
    var prediction = python("Y");   // Retrieve the prediction result from Python

    // Trade decision based on prediction
    if(prediction > 0.5) enterLong();
    else enterShort();
}


Compile and Run with Zorro:
Use the 64-bit version of Zorro to run this script, as pynet.cpp requires a 64-bit Python setup.
This setup calls the predict function in the MyStrategy.py script, passing Data as input, and receives Y, which is used to decide whether to enter a long or short position.
Explanation of the Code
Zorro’s python() Command: python() is used to send and retrieve data between Zorro and Python.
Trade Logic: Based on the prediction returned by python("Y"), the script enters a long or short position.
Model Reusability: You can save and load the trained model in the Python script, so Zorro will use the latest saved model for predictions.
This setup provides a clean integration that allows Zorro to use a PyTorch model directly for predictions without needing extra data transfer scripts.


Please note : these scripts are 80% logical and need to be refined so that it will work well. While deeper experimentation is always needed to make a trading strategy perfect.

Last edited by TipmyPip; 11/09/24 16:54.
DRL direct Feed from Zorro [Re: TipmyPip] #488431
11/09/24 17:07
11/09/24 17:07
Joined: Sep 2017
Posts: 164
TipmyPip Online OP
Member
TipmyPip  Online OP
Member

Joined: Sep 2017
Posts: 164
It’s possible to pass the live data feed from Zorro Trader directly to a Deep Reinforcement Learning (DRL) model without having to call a function from Zorro each time. This can be achieved by running the DRL model in a separate Python script or process that continuously ingests the live data from Zorro in real-time, allowing it to learn and adapt independently. Here’s how to set this up:

Steps to Integrate Zorro’s Live Feed Directly into a DRL Model
Set Up a Data Stream from Zorro to Python:

Configure Zorro to save live market data into a continuously updated file or to use an inter-process communication (IPC) method, such as sockets or shared memory, to feed data into the Python DRL model.
Run the DRL Model as an Independent Python Process:

Implement the DRL model in a standalone Python script that continuously reads data from the Zorro feed and performs training in real-time.
The DRL model can process the data asynchronously and make trading decisions based on updated observations.
Implement Communication Back to Zorro for Trade Execution:

When the DRL model decides on a trade action, it can communicate this action back to Zorro, which will execute the trade.

Implementation Options
Option 1: Data Streaming Using File I/O
Zorro Script for Writing Data Feed:

Modify the Zorro run function to write the latest market data to a CSV file (e.g., live_data.csv) at each bar interval.

Code
function run()
{
    vars InputData = series(priceClose());  // Example data series
    
    // Save data to CSV file for the DRL model
    file_write("Data/live_data.csv", InputData, 0);

    // Execute trades based on external signals
    var signal = file_read("Data/drl_signal.txt");  // Read DRL action signal
    if (signal > 0.5)
        enterLong();
    else if (signal <= 0.5)
        enterShort();
}


Python DRL Script (Independent Process):

In a separate Python script, the DRL model continuously monitors live_data.csv for new data and updates its learning and action policy accordingly.

Code
import torch
import numpy as np
import time
from model import TradingModel  # Define your model in model.py

model = TradingModel(input_size=8, hidden_size=256)
optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
criterion = torch.nn.BCELoss()

def train_model(input_data, target):
    model.train()
    input_tensor = torch.tensor(input_data, dtype=torch.float32)
    target_tensor = torch.tensor([target], dtype=torch.float32)

    optimizer.zero_grad()
    prediction = model(input_tensor)
    loss = criterion(prediction, target_tensor)
    loss.backward()
    optimizer.step()
    return loss.item()

while True:
    # Read the latest data from live_data.csv
    try:
        data = np.loadtxt("Data/live_data.csv", delimiter=',')
        input_data = data[-1]  # Get the most recent row of data
        prediction = model(torch.tensor(input_data, dtype=torch.float32)).item()
        
        # Save the action decision back to a file that Zorro reads
        with open("Data/drl_signal.txt", "w") as f:
            f.write("1" if prediction > 0.5 else "0")  # Save the signal for Zorro
        
        # Optionally train model with feedback (e.g., profit from the last trade)
        # target = get_trade_feedback()  # You may define a function to get trade results
        # train_model(input_data, target)

    except Exception as e:
        print(f"Error reading data or training model: {e}")

    time.sleep(1)  # Polling interval, adjust based on data frequency


Option 2: Using Sockets for Real-Time Data Streaming
Set Up a Socket Server in the Python DRL Script:

Run a socket server in Python to continuously receive data from Zorro and send back trade actions.

Code
import socket
import torch
import numpy as np
from model import TradingModel

model = TradingModel(input_size=8, hidden_size=256)

# Initialize socket server
HOST = 'localhost'  
PORT = 65432        
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.bind((HOST, PORT))
server.listen()

print("Waiting for Zorro connection...")

conn, addr = server.accept()
print(f"Connected by {addr}")

while True:
    data = conn.recv(1024)  # Adjust buffer size as needed
    if not data:
        break
    
    # Convert data to numpy array
    input_data = np.frombuffer(data, dtype=np.float32)
    
    # Make a prediction
    prediction = model(torch.tensor(input_data)).item()

    # Send decision to Zorro
    decision = b'1' if prediction > 0.5 else b'0'
    conn.sendall(decision)


Modify the Zorro Script to Send Data via Socket:

In the Zorro script, connect to the Python socket server to send live data and receive the model’s action signal.

Code
function run()
{
    vars InputData = series(priceClose());
    double Data[NumSignals];
    for(int i = 0; i < NumSignals; i++)
        Data[i] = InputData[i];

    // Send data to Python via socket
    int socket = socket_connect("localhost", 65432);
    if (socket != 0) {
        socket_send(socket, Data, sizeof(Data));  // Send data
        int action = socket_receive(socket);      // Receive action decision
        socket_close(socket);

        // Act on received decision
        if (action == '1')
            enterLong();
        else if (action == '0')
            enterShort();
    }
}


By running the DRL model as an independent process, it can continuously learn and adapt from real-time data while only sending trade decisions back to Zorro for execution. This setup provides the flexibility to train the DRL model in live environments without interrupting Zorro’s main trading operations.

In the Zorro platform, socket-based networking functions like socket_connect, socket_send, and socket_receive are not built-in, as Zorro’s lite-C scripting language does not natively support socket programming. However, you can achieve socket communication in Zorro using DLLs (Dynamic Link Libraries) to bridge between Zorro and Python for real-time data feeds.

Steps to Set Up Socket Communication Between Zorro and Python Using a DLL
Create a C/C++ Socket DLL:

Write a C/C++ library that implements socket functions (connect, send, receive, close).
Compile the library into a DLL that Zorro can load and call directly from the lite-C script.
Use the DLL in Zorro:

Load the DLL in your Zorro script.
Use the DLL functions to send data to and receive data from a Python socket server.
Example of a Simple C/C++ Socket DLL
Here’s a minimal example to create a C++ DLL for socket communication. This example provides functions for connecting, sending, receiving, and closing a socket.

C++ Socket DLL Code
Save this file as SocketDLL.cpp, and then compile it into a DLL.

Code
#include <winsock2.h>
#include <ws2tcpip.h>

#pragma comment(lib, "ws2_32.lib")

extern "C" {
    SOCKET sock = INVALID_SOCKET;

    __declspec(dllexport) int socket_connect(const char* ip, int port) {
        WSADATA wsaData;
        WSAStartup(MAKEWORD(2,2), &wsaData);
        
        sock = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP);
        if (sock == INVALID_SOCKET) return -1;

        sockaddr_in clientService;
        clientService.sin_family = AF_INET;
        clientService.sin_addr.s_addr = inet_addr(ip);
        clientService.sin_port = htons(port);

        if (connect(sock, (SOCKADDR*)&clientService, sizeof(clientService)) == SOCKET_ERROR) {
            closesocket(sock);
            WSACleanup();
            return -1;
        }
        return 0;
    }

    __declspec(dllexport) int socket_send(const char* data, int length) {
        if (sock == INVALID_SOCKET) return -1;
        return send(sock, data, length, 0);
    }

    __declspec(dllexport) int socket_receive(char* buffer, int length) {
        if (sock == INVALID_SOCKET) return -1;
        return recv(sock, buffer, length, 0);
    }

    __declspec(dllexport) void socket_close() {
        if (sock != INVALID_SOCKET) {
            closesocket(sock);
            WSACleanup();
        }
    }
}


Explanation:
socket_connect: Connects to the given IP and port.
socket_send: Sends data through the connected socket.
socket_receive: Receives data from the socket into a buffer.
socket_close: Closes the socket and cleans up resources.
Compile the DLL
Use a C++ compiler, like Microsoft Visual Studio, to compile this code into a DLL (e.g., SocketDLL.dll). Ensure it’s a 64-bit DLL if you’re using the 64-bit version of Zorro.

Zorro Script to Use the DLL
Place the compiled DLL (SocketDLL.dll) in the same directory as your Zorro script.
Use the DLL functions in your Zorro script to connect, send, and receive data.
Zorro lite-C Script Example

Code
function run()
{
    // Load the socket DLL
    int connect_result = call("SocketDLL.dll", "socket_connect", "localhost", 65432);

    if (connect_result == 0) {
        printf("\nConnected to socket.");

        // Example data to send
        double data[8] = { 1.2, 2.3, 3.4, 4.5, 5.6, 6.7, 7.8, 8.9 };
        call("SocketDLL.dll", "socket_send", data, sizeof(data));

        // Buffer for receiving data
        char buffer[8];
        int bytes_received = call("SocketDLL.dll", "socket_receive", buffer, sizeof(buffer));
        
        if (bytes_received > 0) {
            printf("\nReceived data: %s", buffer);  // Example printout
        }
        
        // Close the socket connection
        call("SocketDLL.dll", "socket_close");
    } else {
        printf("\nFailed to connect to socket.");
    }
}


Explanation of the Zorro Script
call() Function: This function is used to call functions from the DLL in Zorro.
call("SocketDLL.dll", "socket_connect", "localhost", 65432) connects to the Python server running on localhost at port 65432.
call("SocketDLL.dll", "socket_send", data, sizeof(data)) sends the data array to the Python server.
call("SocketDLL.dll", "socket_receive", buffer, sizeof(buffer)) receives data from the server into the buffer.
call("SocketDLL.dll", "socket_close") closes the socket connection.
Python Socket Server to Receive Data from Zorro

Code
import socket

HOST = 'localhost'
PORT = 65432

server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.bind((HOST, PORT))
server.listen()
print("Server listening...")

conn, addr = server.accept()
print(f"Connected by {addr}")

while True:
    data = conn.recv(1024)
    if not data:
        break
    print("Received data:", data)

    # Send response
    response = b"1"  # For example, send a '1' signal for buy
    conn.sendall(response)

conn.close()


One important note for all traders and programmers, As previously noted, we all want final solutions without having any bugs, scripts that are unlogical, and hard-to-understand code, but the whole point of these examples, it to learn to use ZorroGPT and find faster solutions for your imaginations.

Last edited by TipmyPip; 11/10/24 03:17.
Multi Agent DRL with Simple Stategy [Re: TipmyPip] #488432
11/10/24 10:36
11/10/24 10:36
Joined: Sep 2017
Posts: 164
TipmyPip Online OP
Member
TipmyPip  Online OP
Member

Joined: Sep 2017
Posts: 164
To leverage Zorro Trader’s efficient, event-driven structure for managing a multi-agent Deep Reinforcement Learning (DRL) model setup, we can design the system to optimize computational speed and real-time trade execution while leaving the resource-intensive training to Python. Here’s an approach that maximizes Zorro's fast computation for managing multi-agent DRL models.

Key Principles for Zorro’s Role in Multi-Agent Management
Separation of Prediction and Training:

Zorro handles trade execution and prediction requests, which are computationally light and can run at high frequency.
The heavier tasks—training and tuning the DRL models—are handled by Python independently, reducing Zorro’s load and keeping trade execution highly responsive.
Socket Communication for Real-Time Data Transfer:

Zorro communicates with Python via a socket connection, sending real-time data for each agent and receiving predictions.
This eliminates I/O delays from file reads and writes, allowing Zorro to request predictions instantly and act on them immediately.
Dynamic and Concurrent Agent Management:

Zorro uses its fast computational ability to request predictions concurrently across multiple agents and assets.
It can trigger trade decisions for several agents in parallel, enabling highly responsive multi-asset or multi-strategy trading.
Implementation Strategy
Here’s how to set up Zorro to manage multiple agents effectively:

Zorro: Efficient, Concurrent Data Handling and Trade Execution
Python: Dedicated, Asynchronous Training with Real-Time Prediction Server
Step-by-Step Code Structure
1. Python Socket Server Code (Handling Multi-Agent Predictions and Training)
In Python, we set up a socket server that:

Listens for data requests from Zorro.
Uses pre-trained models to make quick predictions.
Runs training processes in the background asynchronously, ensuring it can respond to prediction requests without delay.

Code
import socket
import torch
import numpy as np
from model import TradingModel  # Assume TradingModel is defined appropriately

# Initialize models and load them if available
agents = {
    "EURUSD": TradingModel(input_size=5, hidden_layers=2, neurons_per_layer=64),
    "GBPUSD": TradingModel(input_size=5, hidden_layers=2, neurons_per_layer=64),
    "EURGBP": TradingModel(input_size=5, hidden_layers=2, neurons_per_layer=64)
}

for name, model in agents.items():
    try:
        model.load_state_dict(torch.load(f"{name}_model.pth"))
        model.eval()
    except FileNotFoundError:
        print(f"No saved model found for {name}, starting fresh.")

# Start a socket server to handle prediction requests
HOST = 'localhost'
PORT = 65432
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.bind((HOST, PORT))
server.listen()
print("Waiting for Zorro connection...")

conn, addr = server.accept()
print(f"Connected by {addr}")

while True:
    data = conn.recv(1024)
    if not data:
        break
    
    try:
        # Parse data to identify agent and extract feature inputs
        data_str = data.decode().split(',')
        agent_name = data_str[0]
        input_data = np.array(data_str[1:], dtype=np.float32)
        
        # Make a prediction if the agent exists
        if agent_name in agents:
            model = agents[agent_name]
            input_tensor = torch.tensor(input_data, dtype=torch.float32)
            with torch.no_grad():
                prediction = model(input_tensor).item()
            decision = '1' if prediction > 0.5 else '0'
        else:
            decision = '0'  # Default action if agent is unrecognized

        # Send the prediction decision back to Zorro
        conn.sendall(decision.encode())

    except Exception as e:
        print(f"Error processing request: {e}")
        break

conn.close()



2. Zorro Code with Socket Communication and Concurrent Agent Management
In Zorro, we:

Use a single socket_connect for each agent.
Request predictions concurrently and execute trades immediately after receiving each prediction.

Code
function run()
{
    // Define data feeds for each pair
    double EURUSD_Data[5] = { priceHigh(), priceLow(), priceClose(), priceOpen(), Vol() };
    double GBPUSD_Data[5] = { priceHigh(), priceLow(), priceClose(), priceOpen(), Vol() };
    double EURGBP_Data[5] = { priceHigh(), priceLow(), priceClose(), priceOpen(), Vol() };

    // Connect to Python server
    int socket = socket_connect("localhost", 65432);
    if (socket == 0) {
        printf("\nFailed to connect to Python socket.");
        return;
    }

    // Agent 1: EURUSD - Send data and get prediction
    string eurusd_data_str = strf("EURUSD,%f,%f,%f,%f,%f", EURUSD_Data[0], EURUSD_Data[1], EURUSD_Data[2], EURUSD_Data[3], EURUSD_Data[4]);
    socket_send(socket, eurusd_data_str, strlen(eurusd_data_str));
    char buffer[2];
    int bytes_received = socket_receive(socket, buffer, sizeof(buffer));
    if (bytes_received > 0 && buffer[0] == '1')
        enterLong("EURUSD");
    else
        enterShort("EURUSD");

    // Agent 2: GBPUSD - Send data and get prediction
    string gbpusd_data_str = strf("GBPUSD,%f,%f,%f,%f,%f", GBPUSD_Data[0], GBPUSD_Data[1], GBPUSD_Data[2], GBPUSD_Data[3], GBPUSD_Data[4]);
    socket_send(socket, gbpusd_data_str, strlen(gbpusd_data_str));
    bytes_received = socket_receive(socket, buffer, sizeof(buffer));
    if (bytes_received > 0 && buffer[0] == '1')
        enterLong("GBPUSD");
    else
        enterShort("GBPUSD");

    // Agent 3: EURGBP - Send data and get prediction
    string eurgbp_data_str = strf("EURGBP,%f,%f,%f,%f,%f", EURGBP_Data[0], EURGBP_Data[1], EURGBP_Data[2], EURGBP_Data[3], EURGBP_Data[4]);
    socket_send(socket, eurgbp_data_str, strlen(eurgbp_data_str));
    bytes_received = socket_receive(socket, buffer, sizeof(buffer));
    if (bytes_received > 0 && buffer[0] == '1')
        enterLong("EURGBP");
    else
        enterShort("EURGBP");

    // Close socket connection
    socket_close(socket);
}


Explanation of the Zorro Code
Concurrent Data Handling:

Zorro gathers data for each agent and sends it independently over the socket.
Each agent’s prediction is received immediately, and trades are executed without waiting for other predictions.
Direct Trade Execution:

Based on the prediction received from Python, Zorro immediately triggers a Long or Short position for each currency pair.
This approach leverages Zorro’s fast computation for rapid trade decisions, as the time-consuming training process is handled asynchronously by Python.
Benefits of This Setup
Parallel Prediction Requests:

Each agent’s data is processed individually, allowing predictions to be handled concurrently.
Zorro executes trades immediately, maximizing the benefit of fast computation.
Efficient Use of Resources:

Training is handled asynchronously by Python, while Zorro focuses purely on trade execution and prediction requests.
The socket communication enables real-time data exchange, ensuring Zorro’s high-frequency capabilities are fully utilized.
Scalable to More Agents:

Adding new agents is straightforward: simply expand the data gathering and prediction request handling sections in both Zorro and Python, as each agent operates independently in this setup.
By separating Zorro’s fast execution from Python’s computational load, this setup achieves efficient multi-agent management, ensuring real-time responsiveness and scalability.

Delta Dynamics [Re: TipmyPip] #488493
12/24/24 18:36
12/24/24 18:36
Joined: Sep 2017
Posts: 164
TipmyPip Online OP
Member
TipmyPip  Online OP
Member

Joined: Sep 2017
Posts: 164
Sigma Matrix:

The Correlation Web
A web of nodes (N₁, N₂, ..., N₆) is interconnected, where each edge (Eᵢⱼ) between nodes Nᵢ and Nⱼ has a weight representing their correlation coefficient. The weights are given as follows:

| | N₁ | N₂ | N₃ | N₄ | N₅ | N₆ | |-------|------|------|------|------|------| | N₁ | 1.0 | 0.8 | 0.3 | 0.5 | 0.7 | 0.6 | | N₂ | 0.8 | 1.0 | 0.4 | 0.6 | 0.9 | 0.2 | | N₃ | 0.3 | 0.4 | 1.0 | 0.7 | 0.5 | 0.3 | | N₄ | 0.5 | 0.6 | 0.7 | 1.0 | 0.8 | 0.4 | | N₅ | 0.7 | 0.9 | 0.5 | 0.8 | 1.0 | 0.6 | | N₆ | 0.6 | 0.2 | 0.3 | 0.4 | 0.6 | 1.0 |

Your task:

Find the subset of nodes (clusters) such that the average weight of edges within each cluster is maximized.
Ensure that no node is part of more than one cluster.

Code
#define PAIRS 28 // Number of currency pairs

string CurrencyPairs[PAIRS] = {
    "EURUSD", "GBPUSD", "USDJPY", "GBPJPY", "USDCAD", "EURAUD", "EURJPY", 
    "AUDCAD", "AUDJPY", "AUDNZD", "AUDUSD", "CADJPY", "EURCAD", "EURCHF", 
    "EURGBP", "EURNZD", "GBPCAD", "GBPCHF", "NZDCAD", "NZDJPY", "NZDUSD", 
    "USDCHF", "CHFJPY", "AUDCHF", "GBPNZD", "NZDCHF", "CADCHF", "GBPAUD"
};

vars CorrelationMatrix[PAIRS][PAIRS]; // Correlation adjacency matrix
vars ArbitrageMatrix[PAIRS][PAIRS];  // Arbitrage adjacency matrix
int StateMatrix[PAIRS][3][3];        // Transition matrix for each pair
int LookBack = 200;                 // Lookback period for calculations

// Function to calculate correlation between two series
function calculateCorrelation(vars series1, vars series2, int len) {
    var mean1 = SMA(series1, len);
    var mean2 = SMA(series2, len);
    var numerator = 0, denom1 = 0, denom2 = 0;
    for (int i = 0; i < len; i++) {
        numerator += (series1[i] - mean1) * (series2[i] - mean2);
        denom1 += pow(series1[i] - mean1, 2);
        denom2 += pow(series2[i] - mean2, 2);
    }
    return numerator / sqrt(denom1 * denom2);
}

// Initialize the correlation network
function initializeCorrelationNetwork() {
    for (int i = 0; i < PAIRS; i++) {
        for (int j = 0; j < PAIRS; j++) {
            if (i != j) {
                vars series1 = series(price(CurrencyPairs[i]));
                vars series2 = series(price(CurrencyPairs[j]));
                CorrelationMatrix[i][j] = calculateCorrelation(series1, series2, LookBack);
            } else {
                CorrelationMatrix[i][j] = 1; // Self-correlation
            }
        }
    }
}

// Calculate arbitrage opportunities between pairs
function calculateArbitrage() {
    for (int i = 0; i < PAIRS; i++) {
        for (int j = 0; j < PAIRS; j++) {
            if (i != j) {
                ArbitrageMatrix[i][j] = log(price(CurrencyPairs[i]) / price(CurrencyPairs[j]));
            } else {
                ArbitrageMatrix[i][j] = 0; // No arbitrage within the same pair
            }
        }
    }
}

// Detect arbitrage cycles using a Bellman-Ford-like algorithm
function detectArbitrageCycles() {
    for (int k = 0; k < PAIRS; k++) {
        for (int i = 0; i < PAIRS; i++) {
            for (int j = 0; j < PAIRS; j++) {
                if (ArbitrageMatrix[i][k] + ArbitrageMatrix[k][j] < ArbitrageMatrix[i][j]) {
                    ArbitrageMatrix[i][j] = ArbitrageMatrix[i][k] + ArbitrageMatrix[k][j];
                }
            }
        }
    }
}

// Get the current market state for a currency pair
function getState(string pair) {
    var fastMA = SMA(series(price(pair)), 50);
    var slowMA = SMA(series(price(pair)), 200);
    if (fastMA > slowMA) return 1;  // Bull
    else if (fastMA < slowMA) return -1; // Bear
    return 0;  // Sideways
}

// Update the state transition matrix
function updateStateTransition(int pairIndex) {
    int currentState = getState(CurrencyPairs[pairIndex]);
    int lastState = StateMatrix[pairIndex][2]; // Store last state
    StateMatrix[pairIndex][lastState + 1][currentState + 1]++;
    StateMatrix[pairIndex][2] = currentState; // Update last state
}

// Main trading function
function run() {
    set(PLOTNOW);

    // Initialize networks
    initializeCorrelationNetwork();
    calculateArbitrage();

    // Update state transitions for each pair
    for (int i = 0; i < PAIRS; i++) {
        updateStateTransition(i);
    }

    // Example: Trade when arbitrage is detected
    for (int i = 0; i < PAIRS; i++) {
        for (int j = 0; j < PAIRS; j++) {
            if (ArbitrageMatrix[i][j] > 0.01) { // Arbitrage threshold
                enterLong(CurrencyPairs[i]);
                enterShort(CurrencyPairs[j]);
            }
        }
    }
}

Delta Cycle Dynamics [Re: TipmyPip] #488494
12/24/24 18:40
12/24/24 18:40
Joined: Sep 2017
Posts: 164
TipmyPip Online OP
Member
TipmyPip  Online OP
Member

Joined: Sep 2017
Posts: 164
Arbitrage Cycle Mystery
Imagine a trading system with 5 currency pairs (P₁, P₂, P₃, P₄, P₅). Each pair has a profit factor (PFᵢⱼ) when moving from pair Pᵢ to pair Pⱼ. The profit factors are given in a directed graph:

P₁ → P₂: 1.2 P₂ → P₁: 0.8
P₁ → P₃: 1.1 P₃ → P₁: 0.9
P₂ → P₃: 1.3 P₃ → P₂: 0.7
P₄ → P₁: 1.5 P₁ → P₄: 0.6
P₄ → P₃: 1.4 P₃ → P₄: 0.8
P₅ → P₄: 1.2 P₄ → P₅: 0.9
P₅ → P₃: 1.1 P₃ → P₅: 0.8

Your task:

Find the cycle of pairs that maximizes the product of profit factors (PF₁ × PF₂ × PF₃ × ...).
Ensure the cycle returns to the starting pair.


Code
#define PAIRS 28 // Number of currency pairs

string CurrencyPairs[PAIRS] = {
    "EURUSD", "GBPUSD", "USDJPY", "GBPJPY", "USDCAD", "EURAUD", "EURJPY", 
    "AUDCAD", "AUDJPY", "AUDNZD", "AUDUSD", "CADJPY", "EURCAD", "EURCHF", 
    "EURGBP", "EURNZD", "GBPCAD", "GBPCHF", "NZDCAD", "NZDJPY", "NZDUSD", 
    "USDCHF", "CHFJPY", "AUDCHF", "GBPNZD", "NZDCHF", "CADCHF", "GBPAUD"
};

vars CorrelationMatrix[PAIRS][PAIRS]; // Correlation adjacency matrix
vars ArbitrageMatrix[PAIRS][PAIRS];  // Arbitrage adjacency matrix
vars RiskMatrix[PAIRS];             // Risk matrix for each pair (e.g., volatility)
int StateMatrix[PAIRS][3][3];       // Transition matrix for each pair
vars PortfolioWeights[PAIRS];       // Optimized portfolio weights
int LookBack = 200;                 // Lookback period for calculations

// Function to calculate correlation between two series
function calculateCorrelation(vars series1, vars series2, int len) {
    var mean1 = SMA(series1, len);
    var mean2 = SMA(series2, len);
    var numerator = 0, denom1 = 0, denom2 = 0;
    for (int i = 0; i < len; i++) {
        numerator += (series1[i] - mean1) * (series2[i] - mean2);
        denom1 += pow(series1[i] - mean1, 2);
        denom2 += pow(series2[i] - mean2, 2);
    }
    return numerator / sqrt(denom1 * denom2);
}

// Initialize the correlation network
function initializeCorrelationNetwork() {
    for (int i = 0; i < PAIRS; i++) {
        for (int j = 0; j < PAIRS; j++) {
            if (i != j) {
                vars series1 = series(price(CurrencyPairs[i]));
                vars series2 = series(price(CurrencyPairs[j]));
                CorrelationMatrix[i][j] = calculateCorrelation(series1, series2, LookBack);
            } else {
                CorrelationMatrix[i][j] = 1; // Self-correlation
            }
        }
    }
}

// Calculate arbitrage opportunities between pairs
function calculateArbitrage() {
    for (int i = 0; i < PAIRS; i++) {
        for (int j = 0; j < PAIRS; j++) {
            if (i != j) {
                ArbitrageMatrix[i][j] = log(price(CurrencyPairs[i]) / price(CurrencyPairs[j]));
            } else {
                ArbitrageMatrix[i][j] = 0; // No arbitrage within the same pair
            }
        }
    }
}

// Calculate risk for each pair (volatility-based)
function calculateRiskMatrix() {
    for (int i = 0; i < PAIRS; i++) {
        RiskMatrix[i] = StdDev(series(price(CurrencyPairs[i])), LookBack);
    }
}

// Optimize portfolio weights based on risk and correlation
function optimizePortfolio() {
    var TotalWeight = 0;
    for (int i = 0; i < PAIRS; i++) {
        PortfolioWeights[i] = 1 / RiskMatrix[i]; // Risk-based weighting
        TotalWeight += PortfolioWeights[i];
    }
    for (int i = 0; i < PAIRS; i++) {
        PortfolioWeights[i] /= TotalWeight; // Normalize weights
    }
}

// Detect arbitrage cycles using a Bellman-Ford-like algorithm
function detectArbitrageCycles() {
    for (int k = 0; k < PAIRS; k++) {
        for (int i = 0; i < PAIRS; i++) {
            for (int j = 0; j < PAIRS; j++) {
                if (ArbitrageMatrix[i][k] + ArbitrageMatrix[k][j] < ArbitrageMatrix[i][j]) {
                    ArbitrageMatrix[i][j] = ArbitrageMatrix[i][k] + ArbitrageMatrix[k][j];
                }
            }
        }
    }
}

// Get the current market state for a currency pair
function getState(string pair) {
    var fastMA = SMA(series(price(pair)), 50);
    var slowMA = SMA(series(price(pair)), 200);
    if (fastMA > slowMA) return 1;  // Bull
    else if (fastMA < slowMA) return -1; // Bear
    return 0;  // Sideways
}

// Update the state transition matrix
function updateStateTransition(int pairIndex) {
    int currentState = getState(CurrencyPairs[pairIndex]);
    int lastState = StateMatrix[pairIndex][2]; // Store last state
    StateMatrix[pairIndex][lastState + 1][currentState + 1]++;
    StateMatrix[pairIndex][2] = currentState; // Update last state
}

// Execute trades based on portfolio weights and arbitrage opportunities
function executeTrades() {
    for (int i = 0; i < PAIRS; i++) {
        for (int j = 0; j < PAIRS; j++) {
            if (ArbitrageMatrix[i][j] > 0.01) { // Arbitrage threshold
                var WeightI = PortfolioWeights[i];
                var WeightJ = PortfolioWeights[j];
                enterLong(CurrencyPairs[i], WeightI);  // Weighted trade
                enterShort(CurrencyPairs[j], WeightJ); // Weighted trade
            }
        }
    }
}

// Main trading function
function run() {
    set(PLOTNOW);

    // Initialize all networks and matrices
    initializeCorrelationNetwork();
    calculateArbitrage();
    calculateRiskMatrix();
    optimizePortfolio();

    // Update state transitions for each pair
    for (int i = 0; i < PAIRS; i++) {
        updateStateTransition(i);
    }

    // Execute trades based on analysis
    executeTrades();
}

Last edited by TipmyPip; 12/24/24 18:43.
Σ Φ - Δ Ξ (t), Σ Φ Δ Ξ (∫f(τ)dτ) [Re: TipmyPip] #488495
12/24/24 19:47
12/24/24 19:47
Joined: Sep 2017
Posts: 164
TipmyPip Online OP
Member
TipmyPip  Online OP
Member

Joined: Sep 2017
Posts: 164
The Weighted Dynamic Arbitrage Graph

You are given a directed weighted graph 𝐺=(𝑉,𝐸) where: 𝑉 represents currency pairs (e.g., EURUSD, GBPUSD, etc.).
𝐸 represents the relationships between the currency pairs (e.g., price ratios, arbitrage opportunities).
Each edge 𝑒(𝑖,𝑗) between nodes 𝑣𝑖 and 𝑣𝑗 has a weight 𝑤(𝑖,𝑗) representing the logarithm of the price ratio between
𝑣𝑖 and 𝑣𝑗.

Problem:
The graph 𝐺 is time-dependent. At each time step 𝑡, the edge weights 𝑤(𝑖,𝑗,𝑡) change according to a function:

𝑤(𝑖,𝑗,𝑡) = log(𝑃𝑖(𝑡)𝑃𝑗(𝑡))−Δ(𝑖,𝑗,𝑡) where: 𝑃𝑖(𝑡) and 𝑃𝑗(𝑡) are the prices of 𝑣𝑖 and 𝑣𝑗 at time 𝑡. Δ(𝑖,𝑗,𝑡) is a dynamic threshold, influenced by volatility.
A negative weight cycle in 𝐺 represents an arbitrage opportunity, where the product of the weights along the cycle is greater than

1: ∏(𝑖,𝑗)∈𝐶𝑒𝑤(𝑖,𝑗,𝑡)>1.

Your goal is to: Identify all negative weight cycles in 𝐺 for 𝑡1 ≤ 𝑡 ≤𝑡2 .

For each negative cycle 𝐶, calculate the maximum profit factor:

Profit(𝐶)=∏(𝑖,𝑗)∈𝐶𝑒𝑤(𝑖,𝑗,𝑡).

Find the most profitable cycle across all time steps 𝑡.

Additional Constraints:

The graph 𝐺 has ∣𝑉∣=28 nodes (one for each currency pair) and ∣𝐸∣=∣𝑉∣×(∣𝑉∣−1)
∣E∣=∣V∣×(∣V∣−1) edges (fully connected directed graph).

Edge weights 𝑤(𝑖,𝑗,𝑡) vary dynamically with 𝑡 according to: Δ(𝑖,𝑗,𝑡)=𝜎𝑖(𝑡)⋅𝜎𝑗(𝑡), where

𝜎𝑖(𝑡) is the standard deviation of prices for 𝑣𝑖 over a rolling window of 20 time steps.

You must use an efficient algorithm (e.g., Bellman-Ford for detecting negative cycles) to handle the graph's dynamic nature.


Code
#define PAIRS 28 // Number of currency pairs

string CurrencyPairs[PAIRS] = {
    "EURUSD", "GBPUSD", "USDJPY", "GBPJPY", "USDCAD", "EURAUD", "EURJPY", 
    "AUDCAD", "AUDJPY", "AUDNZD", "AUDUSD", "CADJPY", "EURCAD", "EURCHF", 
    "EURGBP", "EURNZD", "GBPCAD", "GBPCHF", "NZDCAD", "NZDJPY", "NZDUSD", 
    "USDCHF", "CHFJPY", "AUDCHF", "GBPNZD", "NZDCHF", "CADCHF", "GBPAUD"
};

vars CorrelationMatrix[PAIRS][PAIRS];    // Correlation adjacency matrix
vars ArbitrageMatrix[PAIRS][PAIRS];     // Arbitrage adjacency matrix
vars VolatilityMatrix[PAIRS];           // Volatility levels for each pair
vars DynamicThresholdMatrix[PAIRS][PAIRS]; // Dynamic arbitrage thresholds
vars RiskAdjustedWeights[PAIRS];        // Risk-adjusted portfolio weights
int StateMatrix[PAIRS][3][3];           // Transition matrix for each pair
vars CurrentDrawdown;                   // Current drawdown level
int LookBack = 200;                     // Lookback period for calculations

// Function to calculate correlation between two series
function calculateCorrelation(vars series1, vars series2, int len) {
    var mean1 = SMA(series1, len);
    var mean2 = SMA(series2, len);
    var numerator = 0, denom1 = 0, denom2 = 0;
    for (int i = 0; i < len; i++) {
        numerator += (series1[i] - mean1) * (series2[i] - mean2);
        denom1 += pow(series1[i] - mean1, 2);
        denom2 += pow(series2[i] - mean2, 2);
    }
    return numerator / sqrt(denom1 * denom2);
}

// Initialize the correlation network
function initializeCorrelationNetwork() {
    for (int i = 0; i < PAIRS; i++) {
        for (int j = 0; j < PAIRS; j++) {
            if (i != j) {
                vars series1 = series(price(CurrencyPairs[i]));
                vars series2 = series(price(CurrencyPairs[j]));
                CorrelationMatrix[i][j] = calculateCorrelation(series1, series2, LookBack);
            } else {
                CorrelationMatrix[i][j] = 1; // Self-correlation
            }
        }
    }
}

// Calculate arbitrage opportunities between pairs with dynamic thresholds
function calculateDynamicArbitrage() {
    for (int i = 0; i < PAIRS; i++) {
        for (int j = 0; j < PAIRS; j++) {
            if (i != j) {
                var priceRatio = log(price(CurrencyPairs[i]) / price(CurrencyPairs[j]));
                var threshold = max(0.01, VolatilityMatrix[i] * 0.5); // Dynamic threshold
                DynamicThresholdMatrix[i][j] = threshold;
                ArbitrageMatrix[i][j] = (abs(priceRatio) > threshold) ? priceRatio : 0;
            } else {
                ArbitrageMatrix[i][j] = 0; // No arbitrage within the same pair
            }
        }
    }
}

// Calculate volatility levels for each pair
function calculateVolatilityMatrix() {
    for (int i = 0; i < PAIRS; i++) {
        VolatilityMatrix[i] = StdDev(series(price(CurrencyPairs[i])), LookBack);
    }
}

// Risk adjustment based on drawdown and portfolio composition
function adjustRiskWeights() {
    var TotalWeight = 0;
    for (int i = 0; i < PAIRS; i++) {
        var riskFactor = max(0.1, 1 - CurrentDrawdown / 0.2); // Reduce risk if drawdown exceeds 20%
        RiskAdjustedWeights[i] = (1 / VolatilityMatrix[i]) * riskFactor;
        TotalWeight += RiskAdjustedWeights[i];
    }
    for (int i = 0; i < PAIRS; i++) {
        RiskAdjustedWeights[i] /= TotalWeight; // Normalize weights
    }
}

// Execute trades based on dynamic arbitrage and risk-adjusted weights
function executeDynamicTrades() {
    for (int i = 0; i < PAIRS; i++) {
        for (int j = 0; j < PAIRS; j++) {
            if (ArbitrageMatrix[i][j] != 0) {
                var WeightI = RiskAdjustedWeights[i];
                var WeightJ = RiskAdjustedWeights[j];
                if (ArbitrageMatrix[i][j] > 0) { // Long-Short arbitrage
                    enterLong(CurrencyPairs[i], WeightI);
                    enterShort(CurrencyPairs[j], WeightJ);
                } else if (ArbitrageMatrix[i][j] < 0) { // Short-Long arbitrage
                    enterShort(CurrencyPairs[i], WeightI);
                    enterLong(CurrencyPairs[j], WeightJ);
                }
            }
        }
    }
}

// Track drawdown levels
function monitorDrawdown() {
    CurrentDrawdown = max(0, 1 - (Equity / MaxEquity));
    if (CurrentDrawdown > 0.2) { // Emergency shutdown at 20% drawdown
        exitLong();
        exitShort();
        printf("Emergency risk controls triggered. All positions closed.");
    }
}

// Main trading function
function run() {
    set(PLOTNOW);

    // Update and calculate all matrices
    initializeCorrelationNetwork();
    calculateVolatilityMatrix();
    calculateDynamicArbitrage();
    adjustRiskWeights();
    monitorDrawdown();

    // Execute trades based on advanced analysis
    executeDynamicTrades();
}

Last edited by TipmyPip; 12/24/24 20:39.
Profit Path Conundrum [Re: TipmyPip] #488496
12/25/24 14:23
12/25/24 14:23
Joined: Sep 2017
Posts: 164
TipmyPip Online OP
Member
TipmyPip  Online OP
Member

Joined: Sep 2017
Posts: 164
Algorithm Complexity
You are tasked with assessing the efficiency of the trading algorithm.

Scenario
The algorithm evaluates all possible trade paths across 𝑃𝐴𝐼𝑅𝑆 currency pairs to identify the most profitable path. Each path must satisfy a profitability threshold that is adjusted dynamically based on real-time performance. Trades are executed only when the risk-adjusted profit surpasses the threshold. The system operates continuously, recalculating profitability every second.

Challenge Questions

Graph Analysis:

Given 𝑃𝐴𝐼𝑅𝑆=28, the graph has ∣𝑉∣=28 vertices. How many edges ∣𝐸∣ exist in this graph if all trades are considered? Assume the graph is fully connected but excludes self-loops.

Path Complexity:

How many possible paths can exist from a source currency pair 𝐶𝑠 to a target pair 𝐶𝑒 if loops and revisits are allowed?
Bellman-Ford Complexity:

The Bellman-Ford algorithm iterates over all edges ∣𝐸∣ for ∣𝑉∣−1 rounds. Calculate the total computational complexity in terms of ∣𝑉∣ and ∣𝐸∣. Dynamic Threshold Adjustment:

If the threshold Δ is recalculated using volatility data for each currency pair and there are 50 price data points per pair, what is the time complexity of the threshold update process?

Scalability:

If 𝑃𝐴𝐼𝑅𝑆 increases to 56, describe how the computational complexity changes for: Profitability matrix calculation. Bellman-Ford algorithm. Dynamic threshold adjustment. Real-Time Execution:

Assume profitability recalculations are triggered every second. What is the expected latency for processing
𝑃𝐴𝐼𝑅𝑆= 28 if the system can handle 100,000 operations per second?


Code
#define PAIRS 28 // Number of currency pairs

string CurrencyPairs[PAIRS] = {
    "EURUSD", "GBPUSD", "USDJPY", "GBPJPY", "USDCAD", "EURAUD", "EURJPY", 
    "AUDCAD", "AUDJPY", "AUDNZD", "AUDUSD", "CADJPY", "EURCAD", "EURCHF", 
    "EURGBP", "EURNZD", "GBPCAD", "GBPCHF", "NZDCAD", "NZDJPY", "NZDUSD", 
    "USDCHF", "CHFJPY", "AUDCHF", "GBPNZD", "NZDCHF", "CADCHF", "GBPAUD"
};

vars ProfitMatrix[PAIRS][PAIRS]; // Edge weights (profitability)
int Previous[PAIRS];            // Stores the previous node in the path
vars PathProfit[PAIRS];         // Stores cumulative profit along the path
vars DynamicDeltaThreshold;     // Dynamic threshold to execute trades

// Function to calculate profitability matrix
function calculateProfitMatrix() {
    for (int i = 0; i < PAIRS; i++) {
        for (int j = 0; j < PAIRS; j++) {
            if (i != j) {
                var priceRatio = price(CurrencyPairs[i]) / price(CurrencyPairs[j]);
                ProfitMatrix[i][j] = log(priceRatio); // Logarithmic profitability
            } else {
                ProfitMatrix[i][j] = 0; // No self-loop
            }
        }
    }
}

// Calculate the Dynamic Delta Threshold with risk-adjustment and feedback
function calculateDynamicDeltaThreshold() {
    var totalRiskAdjustedProfit = 0;
    int count = 0;

    for (int i = 0; i < PAIRS; i++) {
        for (int j = 0; j < PAIRS; j++) {
            if (i != j && ProfitMatrix[i][j] > 0) {
                var volatility = StdDev(series(price(CurrencyPairs[i])), 50); // 50-period volatility
                var riskAdjustedProfit = ProfitMatrix[i][j] / volatility; // Risk-adjusted profit
                totalRiskAdjustedProfit += riskAdjustedProfit;
                count++;
            }
        }
    }

    if (count > 0) {
        var baselineThreshold = totalRiskAdjustedProfit / count; // Average risk-adjusted profit
        var performanceFactor = Equity / MaxEquity; // Performance feedback
        DynamicDeltaThreshold = baselineThreshold * performanceFactor; // Adjust threshold dynamically
    } else {
        DynamicDeltaThreshold = 0.001; // Default fallback
    }

    // Log the threshold for backtesting and analysis
    printf("DynamicDeltaThreshold: %.6f\n", DynamicDeltaThreshold);
}

// Bellman-Ford algorithm to find paths with the highest cumulative profit
function bellmanFord(int start) {
    for (int i = 0; i < PAIRS; i++) {
        PathProfit[i] = -1e10; // Negative infinity for unvisited nodes
        Previous[i] = -1;     // No predecessor
    }
    PathProfit[start] = 0; // Profit starts at 0 from the source

    for (int k = 0; k < PAIRS - 1; k++) {
        for (int i = 0; i < PAIRS; i++) {
            for (int j = 0; j < PAIRS; j++) {
                if (ProfitMatrix[i][j] != 0 && PathProfit[i] + ProfitMatrix[i][j] > PathProfit[j]) {
                    PathProfit[j] = PathProfit[i] + ProfitMatrix[i][j]; // Update cumulative profit
                    Previous[j] = i; // Track the path
                }
            }
        }
    }
}

// Execute trades along the highest cumulative profit path
function executePath(int start, int end) {
    int current = end;

    while (current != -1) {
        int previous = Previous[current];
        if (previous == -1) break;

        if (ProfitMatrix[previous][current] > 0) {
            enterLong(CurrencyPairs[previous]);
            enterShort(CurrencyPairs[current]);
        } else {
            enterShort(CurrencyPairs[previous]);
            enterLong(CurrencyPairs[current]);
        }

        current = previous;
    }
}

// Continuously execute trades while conditions exist
function executeContinuousTrades(int start) {
    while (1) {
        calculateProfitMatrix();          // Update the graph with live data
        calculateDynamicDeltaThreshold(); // Adjust the threshold dynamically
        bellmanFord(start);               // Recalculate paths with the highest cumulative profit

        for (int i = 0; i < PAIRS; i++) {
            for (int j = 0; j < PAIRS; j++) {
                if (i != j && ProfitMatrix[i][j] > DynamicDeltaThreshold) {
                    enterLong(CurrencyPairs[i]);
                    enterShort(CurrencyPairs[j]);
                    printf("Executing Trade: Long %s, Short %s\n", CurrencyPairs[i], CurrencyPairs[j]);
                }
            }
        }

        // Add a condition to exit the loop for backtesting or analysis
        if (isBacktest() && getBacktestPeriod() == "End") {
            break;
        }
    }
}

// Main trading function
function run() {
    set(PLOTNOW);
    int startPair = 0; // Start from the first currency pair

    executeContinuousTrades(startPair);
}

Stochastic Correlation in Currency Pairs [Re: TipmyPip] #488497
12/25/24 15:34
12/25/24 15:34
Joined: Sep 2017
Posts: 164
TipmyPip Online OP
Member
TipmyPip  Online OP
Member

Joined: Sep 2017
Posts: 164
Optimal Arbitrage with Stochastic Correlation in Currency Pairs
You are tasked with developing an arbitrage strategy between three independent currency pairs:
𝐴/𝐵, 𝐵/𝐶, and 𝐴/𝐶, where:

The price of each currency pair follows a geometric Brownian motion:

dS_i = μ_i * S_i * dt + σ_i * S_i * dW_i, for i ∈ {1, 2, 3}

μ_i: Drift rate of the 𝑖-th pair.
σ_i: Volatility of the 𝑖-th pair.
W_i: Independent Wiener processes for each pair.
A stochastic correlation ρ(t) governs the dependency between the pairs' volatilities:

dρ(t) = κ * (θ - ρ(t)) * dt + η * √(1 - ρ(t)^2) * dZ_t
κ: Mean-reversion speed of ρ(t).
θ: Long-term mean of ρ(t).
η: Volatility of ρ(t).
Z_t: Another Wiener process, independent of W_i.

Objective
Derive a dynamic arbitrage strategy that:

Exploits triangular arbitrage opportunities between the three currency pairs.
Incorporates stochastic correlation ρ(t) to optimize trading decisions.
The profit at each step is calculated as:

Profit = log(S_AB * S_BC / S_AC)

When Profit > Δ, execute arbitrage trades.
Δ: Dynamic threshold dependent on ρ(t).

Formulate the optimal stopping problem:

Define the time Ï„ to execute trades based on maximizing expected profit:

V(S, ρ) = sup_τ E [ ∫_0^τ Profit(S, ρ) * e^(-r * t) dt ]

r: Risk-free discount rate.


Code
#define PAIRS 28 // Number of currency pairs

string CurrencyPairs[PAIRS] = {
    "EURUSD", "GBPUSD", "USDJPY", "GBPJPY", "USDCAD", "EURAUD", "EURJPY", 
    "AUDCAD", "AUDJPY", "AUDNZD", "AUDUSD", "CADJPY", "EURCAD", "EURCHF", 
    "EURGBP", "EURNZD", "GBPCAD", "GBPCHF", "NZDCAD", "NZDJPY", "NZDUSD", 
    "USDCHF", "CHFJPY", "AUDCHF", "GBPNZD", "NZDCHF", "CADCHF", "GBPAUD"
};

vars ProfitMatrix[PAIRS][PAIRS];  // Profitability matrix
int Previous[PAIRS];             // Tracks the previous node in the path
vars PathProfit[PAIRS];          // Cumulative profit along the path
vars DynamicDeltaThreshold;      // Dynamic threshold for trade execution

var thresholdZ = 2;              // Z-score threshold
var volatilityThreshold = 1;     // Relative volatility threshold
int lookback = 50;               // Lookback period for mean and volatility

// Function to calculate profitability matrix
function calculateProfitMatrix() {
    for (int i = 0; i < PAIRS; i++) {
        for (int j = 0; j < PAIRS; j++) {
            if (i != j) {
                // Calculate price ratio and mean reversion metrics
                var priceRatio = price(CurrencyPairs[i]) / price(CurrencyPairs[j]);
                var mean = SMA(series(price(CurrencyPairs[i])), lookback);
                var zScore = (price(CurrencyPairs[i]) - mean) / StdDev(series(price(CurrencyPairs[i])), lookback);
                var volatility = StdDev(series(price(CurrencyPairs[i])), lookback);
                var relativeVolatility = volatility / SMA(series(volatility), lookback);

                // Filter trades based on Z-score and volatility
                if (abs(zScore) > thresholdZ && relativeVolatility > volatilityThreshold) {
                    ProfitMatrix[i][j] = log(priceRatio) - (DynamicDeltaThreshold / volatility);
                } else {
                    ProfitMatrix[i][j] = -INF; // Disqualify trade
                }
            } else {
                ProfitMatrix[i][j] = 0; // No self-loops
            }
        }
    }
}

// Calculate the dynamic delta threshold with risk-adjustment
function calculateDynamicDeltaThreshold() {
    var totalRiskAdjustedProfit = 0;
    int count = 0;

    for (int i = 0; i < PAIRS; i++) {
        for (int j = 0; j < PAIRS; j++) {
            if (i != j && ProfitMatrix[i][j] > -INF) {
                var volatility = StdDev(series(price(CurrencyPairs[i])), lookback);
                var riskAdjustedProfit = ProfitMatrix[i][j] / volatility;
                totalRiskAdjustedProfit += riskAdjustedProfit;
                count++;
            }
        }
    }

    if (count > 0) {
        var baselineThreshold = totalRiskAdjustedProfit / count; // Average risk-adjusted profit
        var performanceFactor = Equity / MaxEquity; // Performance feedback
        DynamicDeltaThreshold = baselineThreshold * performanceFactor; // Adjust threshold dynamically
    } else {
        DynamicDeltaThreshold = 0.001; // Default fallback
    }
}

// Bellman-Ford algorithm to find paths with the highest cumulative profit
function bellmanFord(int start) {
    for (int i = 0; i < PAIRS; i++) {
        PathProfit[i] = -INF; // Negative infinity for unvisited nodes
        Previous[i] = -1;     // No predecessor
    }
    PathProfit[start] = 0; // Profit starts at 0 from the source

    for (int k = 0; k < PAIRS - 1; k++) {
        for (int i = 0; i < PAIRS; i++) {
            for (int j = 0; j < PAIRS; j++) {
                if (ProfitMatrix[i][j] != -INF && PathProfit[i] + ProfitMatrix[i][j] > PathProfit[j]) {
                    PathProfit[j] = PathProfit[i] + ProfitMatrix[i][j]; // Update cumulative profit
                    Previous[j] = i; // Track the path
                }
            }
        }
    }
}

// Execute trades along the highest cumulative profit path
function executePath(int start, int end) {
    int current = end;

    while (current != -1) {
        int prev = Previous[current];
        if (prev == -1) break;

        if (ProfitMatrix[prev][current] > 0) {
            enterLong(CurrencyPairs[prev]);
            enterShort(CurrencyPairs[current]);
        } else {
            enterShort(CurrencyPairs[prev]);
            enterLong(CurrencyPairs[current]);
        }

        current = prev;
    }
}

// Continuously execute trades while conditions exist
function executeContinuousTrades(int start) {
    while (1) {
        calculateProfitMatrix();          // Update the graph with live data
        calculateDynamicDeltaThreshold(); // Adjust the threshold dynamically
        bellmanFord(start);               // Recalculate paths with the highest cumulative profit

        for (int i = 0; i < PAIRS; i++) {
            for (int j = 0; j < PAIRS; j++) {
                if (i != j && ProfitMatrix[i][j] > DynamicDeltaThreshold) {
                    enterLong(CurrencyPairs[i]);
                    enterShort(CurrencyPairs[j]);
                    printf("Executing Trade: Long %s, Short %s\n", CurrencyPairs[i], CurrencyPairs[j]);
                }
            }
        }

        // Add a condition to exit the loop for backtesting or analysis
        if (isBacktest() && getBacktestPeriod() == "End") {
            break;
        }
    }
}

// Main trading function
function run() {
    set(PLOTNOW);
    int startPair = 0; // Start from the first currency pair

    executeContinuousTrades(startPair);
}

Last edited by TipmyPip; 12/25/24 15:58.
PCA and Stochastic Volatility [Re: TipmyPip] #488498
12/25/24 17:43
12/25/24 17:43
Joined: Sep 2017
Posts: 164
TipmyPip Online OP
Member
TipmyPip  Online OP
Member

Joined: Sep 2017
Posts: 164
Dynamic Trading Strategy with PCA and Stochastic Volatility

You are tasked with designing an optimal dynamic trading strategy that exploits volatility patterns across multiple currency pairs. This strategy incorporates Principal Component Analysis (PCA) for dimensionality reduction and Stochastic Calculus to model the evolving market dynamics.

Setup

Currency Pairs and Volatility Dynamics:

Let X_t = [X_1(t), X_2(t), ..., X_n(t)] represent the instantaneous volatility spreads of n currency pairs at time t.
Each volatility spread X_i(t) evolves according to a stochastic differential equation (SDE):

dX_i(t) = mu_i * dt + sigma_i * dW_i(t),

where:
mu_i is the drift of the volatility spread (mean reversion to long-term volatility).
sigma_i is the volatility of the volatility spread (vol of vol).
W_i(t) is an independent Wiener process for each pair.

Principal Component Analysis (PCA):

At each time step, perform PCA on the volatility matrix to identify the top k principal components (k <= n).
The components are represented as:

Y_t = V * X_t,

where:
V is the matrix of eigenvectors (PCA transformation matrix).
Y_t contains the transformed coordinates in the reduced-dimensional space.

Profitability Metric:

Define the profitability of a trading path between two currency pairs based on the PCA-reduced volatility data:

Profit(i, j) = integral from t0 to t1 [ log(Y_i(t) / Y_j(t)) * dt ],
where Y_i(t) and Y_j(t) are the PCA components of the respective pairs.

The Puzzle

Stochastic Optimization:

Given n = 28 currency pairs, model the system of volatility spreads X_t using PCA and stochastic differential equations.
Derive a strategy to maximize the expected cumulative profitability:

E [ integral from t0 to t1 [ Profit(i, j) * dt ] ],
where Profit(i, j) depends on the PCA-reduced volatility spreads.

Constraints:

The number of components k must be optimized dynamically to ensure at least 90% of the variance is captured:

sum from i=1 to k [ lambda_i ] / sum from i=1 to n [ lambda_i ] >= 0.90,

where lambda_i are the eigenvalues of the covariance matrix.
The strategy must account for transaction costs proportional to the change in position:

Cost = c * abs(Delta_Position),

where c is the transaction cost coefficient.

Optimal Stopping:

Identify the optimal stopping time tau to rebalance the portfolio by solving the following stochastic control problem:

V(X_t) = sup_tau E [ integral from t to tau [ Profit(i, j) * exp(-r * (s - t)) * ds ] - Cost(tau) ], where r is the risk-free discount rate.

Questions to Solve

Variance Threshold:

At each time step, determine the optimal number of components k such that:

sum from i=1 to k [ lambda_i ] / sum from i=1 to n [ lambda_i ] >= 0.90, where lambda_i are the eigenvalues of the covariance matrix.

Stochastic Path Simulation:

Simulate the paths of X_t and Y_t for 28 currency pairs over a period [t0, t1].
How do the principal components evolve over time?

Profitability Surface:

Compute the profitability for all pairwise combinations of (i, j) and identify the most profitable trading path over [t0, t1].

Optimal Rebalancing:

Solve the optimal stopping problem to determine when to rebalance the portfolio to maximize net profitability.


Code
#define PAIRS 28 // Number of currency pairs

string CurrencyPairs[PAIRS] = {
    "EURUSD", "GBPUSD", "USDJPY", "GBPJPY", "USDCAD", "EURAUD", "EURJPY",
    "AUDCAD", "AUDJPY", "AUDNZD", "AUDUSD", "CADJPY", "EURCAD", "EURCHF",
    "EURGBP", "EURNZD", "GBPCAD", "GBPCHF", "NZDCAD", "NZDJPY", "NZDUSD",
    "USDCHF", "CHFJPY", "AUDCHF", "GBPNZD", "NZDCHF", "CADCHF", "GBPAUD"
};

vars VolatilityMatrix[PAIRS][PAIRS];  // Volatility relationship matrix
vars CovMatrix[PAIRS][PAIRS];        // Covariance matrix
vars Eigenvalues[PAIRS];             // Eigenvalues from PCA
vars Eigenvectors[PAIRS][PAIRS];     // Eigenvectors from PCA
vars volatilities[PAIRS];            // Volatility for each pair
vars ReducedMatrix[PAIRS][PAIRS];    // Reduced matrix for all components
int lookback = 50;                   // Lookback period for volatility calculation

// Function to calculate volatilities for all pairs
function calculateVolatilities() {
    for (int i = 0; i < PAIRS; i++) {
        volatilities[i] = StdDev(series(price(CurrencyPairs[i])), lookback);
    }
}

// Function to calculate the volatility matrix (volatility spreads)
function calculateVolatilityMatrix() {
    for (int i = 0; i < PAIRS; i++) {
        for (int j = 0; j < PAIRS; j++) {
            if (i != j) {
                VolatilityMatrix[i][j] = volatilities[i] - volatilities[j];
            } else {
                VolatilityMatrix[i][j] = 0; // Self-loops have no effect
            }
        }
    }
}

// Calculate the covariance between two series of data
function covariance(vars series1, vars series2, int length) {
    var mean1 = 0, mean2 = 0, cov = 0;

    // Step 1: Compute the means of both series
    for (int i = 0; i < length; i++) {
        mean1 += series1[i];
        mean2 += series2[i];
    }
    mean1 /= length;
    mean2 /= length;

    // Step 2: Compute the covariance
    for (int i = 0; i < length; i++) {
        cov += (series1[i] - mean1) * (series2[i] - mean2);
    }

    return cov / length;
}

// Function to calculate the covariance matrix
function calculateCovarianceMatrix() {
    int length = PAIRS; // Length of the series (number of pairs)
    for (int i = 0; i < PAIRS; i++) {
        for (int j = 0; j < PAIRS; j++) {
            CovMatrix[i][j] = covariance(VolatilityMatrix[i], VolatilityMatrix[j], length);
        }
    }
}

// Perform PCA: Decompose the covariance matrix into eigenvalues and eigenvectors
function performPCA() {
    for (int i = 0; i < PAIRS; i++) {
        for (int j = 0; j < PAIRS; j++) {
            Eigenvectors[i][j] = CovMatrix[i][j]; // Initialize with CovMatrix for decomposition
        }
    }

    // Perform power iteration or similar numerical eigenvalue decomposition
    for (int i = 0; i < PAIRS; i++) {
        Eigenvalues[i] = 0;
        for (int j = 0; j < PAIRS; j++) {
            Eigenvalues[i] += Eigenvectors[i][j] * Eigenvectors[i][j];
        }
        Eigenvalues[i] = sqrt(Eigenvalues[i]); // Compute the eigenvalue magnitude
    }

    // Sort eigenvalues and eigenvectors by descending eigenvalue order
    for (int i = 0; i < PAIRS - 1; i++) {
        for (int j = i + 1; j < PAIRS; j++) {
            if (Eigenvalues[i] < Eigenvalues[j]) {
                // Swap eigenvalues
                var tempValue = Eigenvalues[i];
                Eigenvalues[i] = Eigenvalues[j];
                Eigenvalues[j] = tempValue;

                // Swap eigenvectors
                for (int k = 0; k < PAIRS; k++) {
                    var tempVector = Eigenvectors[k][i];
                    Eigenvectors[k][i] = Eigenvectors[k][j];
                    Eigenvectors[k][j] = tempVector;
                }
            }
        }
    }
}

// Determine the optimal number of components based on cumulative variance explained
function optimizeComponents(vars Eigenvalues, int totalComponents, var targetVariance) {
    var totalVariance = 0;
    for (int i = 0; i < totalComponents; i++) {
        totalVariance += Eigenvalues[i];
    }

    var cumulativeVariance = 0;
    for (int i = 0; i < totalComponents; i++) {
        cumulativeVariance += Eigenvalues[i];
        if (cumulativeVariance / totalVariance >= targetVariance) { // Target variance explained
            return i + 1; // Return the optimal number of components
        }
    }
    return totalComponents; // Default to all components
}

// Project the volatility matrix onto the top principal components
function reduceMatrix(int topComponents) {
    for (int i = 0; i < PAIRS; i++) {
        for (int j = 0; j < topComponents; j++) {
            ReducedMatrix[i][j] = dotProduct(VolatilityMatrix[i], Eigenvectors[j]);
        }
    }
}

// Trade logic based on PCA-reduced components
function tradeWithPCA(int topComponents, var threshold) {
    for (int i = 0; i < PAIRS; i++) {
        for (int j = 0; j < topComponents; j++) {
            if (abs(ReducedMatrix[i][j]) > threshold) {
                if (ReducedMatrix[i][j] > 0) {
                    enterLong(CurrencyPairs[i]);
                } else {
                    enterShort(CurrencyPairs[i]);
                }
            }
        }
    }
}

// Main trading function
function run() {
    set(PLOTNOW);

    calculateVolatilities();          // Step 1: Calculate volatilities
    calculateVolatilityMatrix();      // Step 2: Compute volatility matrix
    calculateCovarianceMatrix();      // Step 3: Compute covariance matrix
    performPCA();                     // Step 4: Perform PCA

    // Optimize the number of components based on 90% variance explained
    int optimalComponents = optimizeComponents(Eigenvalues, PAIRS, 0.90);

    // Reduce the matrix using the optimal number of components
    reduceMatrix(optimalComponents);

    // Trade using PCA-reduced features
    var threshold = 0.05; // Set a trading threshold
    tradeWithPCA(optimalComponents, threshold);
}

Re: PCA and Stochastic Volatility [Re: TipmyPip] #488508
01/02/25 06:37
01/02/25 06:37
Joined: Apr 2023
Posts: 60
V
vicknick Offline
Junior Member
vicknick  Offline
Junior Member
V

Joined: Apr 2023
Posts: 60
Hi Tipmy. Are all the code above produced by ZorroGPT? Those code are quite complex from what I see, with the code separated into different functions, and very organized. But when I use ZorroGPT to come out with a code, the code won't be as "sophisticated" as yours. I am wondering what prompt you use to have it produce such complex code. Thanks!

Last edited by vicknick; 01/02/25 06:39.
Re: PCA and Stochastic Volatility [Re: vicknick] #488509
01/02/25 06:51
01/02/25 06:51
Joined: Sep 2017
Posts: 164
TipmyPip Online OP
Member
TipmyPip  Online OP
Member

Joined: Sep 2017
Posts: 164
First Thank you for looking into the code and being interested in ZorroGPT, The code GPT is producing for Zorro, is still considered simple, procedural code, I guess complex is a very relative term, Thank you for appreciating the complexity and organization.

If you are using ZorroGPT, than you need to define and describe your strategy in a simple English, with no double meanings or lack of precision, In addition, you need to work with short amounts of prompts, not more than 10 focusing on one problem or task, after you finish solving the task, start the prompt from the beginning by copying the code from the previous prompt. Every Strategy has a different description, so will the prompt or the series of prompts.

If you would like to share your strategy example, I will help you to formulize prompt

Last edited by TipmyPip; 01/02/25 06:52.
Entangled Kernel Arbitrage [Re: TipmyPip] #488510
01/02/25 09:02
01/02/25 09:02
Joined: Sep 2017
Posts: 164
TipmyPip Online OP
Member
TipmyPip  Online OP
Member

Joined: Sep 2017
Posts: 164
Consider the following Puzzle :

You are tasked with designing an algorithmic trading strategy for volatility arbitrage using a nonlinear volatility model. Your system must operate across
𝑁=28 currency pairs, where the relationships between volatilities are nonlinear and highly entangled. The problem involves deciphering the following computational puzzle:

The Problem
Volatility Web: Each currency pair 𝐶𝑖 has an evolving volatility 𝑉𝑖(𝑡) over time 𝑡, defined by the entangled volatility equation:

Vᵢ(t) = αᵢ ⋅ sin((π / 2) ⋅ Vⱼ(t-1)) + βᵢ ⋅ cos((π / 3) ⋅ Vₖ(t-2)) + γᵢ

Where:

𝑗≠𝑘≠𝑖 and 𝑗,𝑘∈[1,𝑁]

𝛼𝑖,𝛽𝑖,𝛾𝑖 are coefficients dependent on historical price movements.
𝑡 represents discrete time steps (ticks).
Kernel Transform: To uncover hidden arbitrage opportunities, a Gaussian kernel transformation is applied to the volatility spreads between pairs:

Kᵢⱼ(t) = exp(-((Vᵢ(t) - Vⱼ(t))²) / (2 ⋅ σ²))

Where 𝐾𝑖𝑗(𝑡)​ represents the similarity between volatilities 𝑉𝑖 and 𝑉𝑗.

Dynamic Market Dynamics: At any tick 𝑡, the kernel matrix 𝐾(𝑡) evolves based on incoming price data. Your goal is to extract principal components from 𝐾(𝑡) to identify arbitrage paths.

Principal Component Noise: The extracted components 𝜆1,𝜆2,…,𝜆𝑛 are influenced by noise-induced eigenvalue drift:

λₖ'(t) = λₖ(t) ⋅ (1 + ηₖ(t))

Where:

𝜂𝑘(𝑡) is Gaussian noise with variance proportional to the eigenvalue magnitude:
𝜂𝑘(𝑡)∼𝒩(0,𝜆𝑘(𝑡)⋅𝜈)​.
𝜈 is a tunable noise coefficient.

Profit Extraction: The trading signal for each pair 𝐶𝑖 is generated from the reduced kernel matrix
𝑅𝑖(𝑡) (t), calculated by projecting 𝐾(𝑡) onto the top 𝑚 m-principal components:

Rᵢ(t) = ∑ₖ₌₁ᵐ (⟨Kᵢ(t), vₖ⟩ ⋅ vₖ)

Where
𝑣𝑘 is the 𝑘-th eigenvector.

Dynamic Threshold for Execution: Trades are executed when:

Signal(Cᵢ) = Rᵢ(t) / √(∑ⱼ₌₁ⁿ Rⱼ(t)²) > Θ(t)

The threshold Θ(𝑡) evolves dynamically:

Θ(t) = 𝔼[R(t)] + δ ⋅ StdDev(R(t))

Where:

𝔼[𝑅(𝑡)] is the mean of 𝑅𝑖(𝑡) across all pairs. 𝛿 is a risk-adjustment coefficient.

Your Tasks
Model the Volatility Web: Simulate 𝑉𝑖(𝑡)
​
(t) for all 𝑖∈[1,𝑁] over 1000 ticks, ensuring the coefficients 𝛼𝑖,𝛽𝑖,𝛾𝑖 are randomly initialized but correlated to historical price changes.

Construct the Kernel Matrix: Compute 𝐾𝑖𝑗(𝑡)​ at each tick using the Gaussian kernel formula.

Perform Kernel PCA: Decompose 𝐾(𝑡) into eigenvalues 𝜆𝑘(𝑡) and eigenvectors 𝑣𝑘(𝑡)​. Extract the top 𝑚=3 components for trading signals.

Account for Noise: Simulate 𝜂𝑘(𝑡) as Gaussian noise and apply it to the eigenvalues to observe how noise affects the trading signals.

Optimize the Threshold: Experiment with different values of 𝛿 to maximize profitability while minimizing drawdowns.

Trading Logic: Implement a trading strategy that enters long or short positions based on the dynamic threshold Θ(𝑡). Evaluate performance using a back testing framework.

Additional Complexity

To further enhance the challenge, consider:

Dynamic Sigma for Kernel: Allow 𝜎 in the Gaussian kernel to adapt based on volatility clustering:

σ(t) = σ₀ ⋅ (1 + κ ⋅ StdDev(V(t)))

Multi-Asset Dependencies: Introduce correlation across non-currency asset classes (e.g., equities, bonds) to impact 𝑉𝑖(𝑡) (t).

Optimization of Principal Components: Automatically optimize the number of components 𝑚 to balance signal strength and noise robustness.

Puzzle Objective
Your task is to:

Identify nonlinear arbitrage paths hidden in the noisy principal components.
Design an efficient algorithm that adapts to the dynamic nature of the kernel matrix and volatility web.
Maximize cumulative profitability while maintaining a Sharpe ratio above 2.0 over the simulated 1000-tick dataset.


Code
#define PAIRS 28 // Number of currency pairs

string CurrencyPairs[PAIRS] = {
    "EURUSD", "GBPUSD", "USDJPY", "GBPJPY", "USDCAD", "EURAUD", "EURJPY",
    "AUDCAD", "AUDJPY", "AUDNZD", "AUDUSD", "CADJPY", "EURCAD", "EURCHF",
    "EURGBP", "EURNZD", "GBPCAD", "GBPCHF", "NZDCAD", "NZDJPY", "NZDUSD",
    "USDCHF", "CHFJPY", "AUDCHF", "GBPNZD", "NZDCHF", "CADCHF", "GBPAUD"
};

vars VolatilityMatrix[PAIRS][PAIRS];  // Volatility relationship matrix
vars kernelMatrix[PAIRS][PAIRS];     // Kernel matrix for Kernel PCA
vars eigenvalues[PAIRS];             // Eigenvalues from Kernel PCA
vars eigenvectors[PAIRS][PAIRS];     // Eigenvectors from Kernel PCA
vars volatilities[PAIRS];            // Volatility for each pair
vars ReducedMatrix[PAIRS][PAIRS];    // Reduced matrix for all components
vars smoothedSignals[PAIRS];         // Smoothed signals for risk control
int lookback = 50;                   // Lookback period for volatility calculation
var sigma = 0.5;                     // Kernel width parameter
var dynamicThreshold;                // Dynamic trading threshold

// Function to calculate volatilities for all pairs
function calculateVolatilities() {
    for (int i = 0; i < PAIRS; i++) {
        volatilities[i] = StdDev(series(price(CurrencyPairs[i]), lookback));
    }
}

// Function to calculate the volatility matrix (volatility spreads)
function calculateVolatilityMatrix() {
    for (int i = 0; i < PAIRS; i++) {
        for (int j = 0; j < PAIRS; j++) {
            if (i != j) {
                VolatilityMatrix[i][j] = volatilities[i] - volatilities[j];
            } else {
                VolatilityMatrix[i][j] = 0; // Self-loops have no effect
            }
        }
    }
}

// Function to calculate the kernel matrix using a Gaussian kernel
function calculateKernelMatrix(vars VolatilityMatrix[PAIRS][PAIRS], vars kernelMatrix[PAIRS][PAIRS], var sigma) {
    for (int i = 0; i < PAIRS; i++) {
        for (int j = 0; j < PAIRS; j++) {
            kernelMatrix[i][j] = exp(-pow(VolatilityMatrix[i][j], 2) / (2 * pow(sigma, 2))); // Gaussian kernel
        }
    }
}

// Perform Kernel PCA: Decompose the kernel matrix into eigenvalues and eigenvectors
function performKernelPCA(vars kernelMatrix[PAIRS][PAIRS], vars eigenvalues, vars eigenvectors) {
    eigenDecomposition(kernelMatrix, eigenvalues, eigenvectors); // Decompose the kernel matrix
}

// Reduce data using the top principal components
function reduceKernelData(vars kernelMatrix[PAIRS][PAIRS], vars eigenvectors[PAIRS][PAIRS], vars ReducedMatrix[PAIRS][PAIRS], int numComponents) {
    for (int i = 0; i < PAIRS; i++) {
        for (int j = 0; j < numComponents; j++) {
            ReducedMatrix[i][j] = dotProduct(kernelMatrix[i], eigenvectors[j]);
        }
    }
}

// Calculate a dynamic threshold based on standard deviation of reduced signals
function calculateDynamicThreshold(int topComponents) {
    var sumSquares = 0;
    int count = 0;

    for (int i = 0; i < PAIRS; i++) {
        for (int j = 0; j < topComponents; j++) {
            sumSquares += ReducedMatrix[i][j] * ReducedMatrix[i][j];
            count++;
        }
    }
    return sqrt(sumSquares / count); // Standard deviation as threshold
}

// Smooth signals to reduce noise
function smoothSignals(int topComponents) {
    for (int i = 0; i < PAIRS; i++) {
        smoothedSignals[i] = SMA(series(ReducedMatrix[i][0]), lookback); // Smooth first component
    }
}

// Trade logic based on Kernel PCA-reduced components
function tradeWithKernelPCA(int topComponents) {
    for (int i = 0; i < PAIRS; i++) {
        if (abs(smoothedSignals[i]) > dynamicThreshold) {
            if (smoothedSignals[i] > 0) {
                enterLong(CurrencyPairs[i]);
            } else {
                enterShort(CurrencyPairs[i]);
            }
        }
    }
}

// Main trading function
function run() {
    set(PLOTNOW);

    calculateVolatilities();          // Step 1: Calculate volatilities
    calculateVolatilityMatrix();      // Step 2: Compute volatility matrix

    // Step 3: Compute kernel matrix
    calculateKernelMatrix(VolatilityMatrix, kernelMatrix, sigma);

    // Step 4: Perform Kernel PCA
    performKernelPCA(kernelMatrix, eigenvalues, eigenvectors);

    // Step 5: Optimize the number of components based on variance explained
    int optimalComponents = optimizeComponents(eigenvalues, PAIRS, 0.90);

    // Step 6: Reduce data using the top principal components
    reduceKernelData(kernelMatrix, eigenvectors, ReducedMatrix, optimalComponents);

    // Step 7: Calculate dynamic threshold
    dynamicThreshold = calculateDynamicThreshold(optimalComponents);

    // Step 8: Smooth signals for stability
    smoothSignals(optimalComponents);

    // Step 9: Execute trades based on Kernel PCA-reduced features
    tradeWithKernelPCA(optimalComponents);
}

Last edited by TipmyPip; 01/07/25 12:51.
Kernel Volatility Arbitrage Enigma [Re: TipmyPip] #488511
01/02/25 09:36
01/02/25 09:36
Joined: Sep 2017
Posts: 164
TipmyPip Online OP
Member
TipmyPip  Online OP
Member

Joined: Sep 2017
Posts: 164
You are tasked with solving a nonlinear, multi-dimensional volatility arbitrage problem. Your goal is to design a strategy that detects complex relationships between currency pair volatilities using Kernel PCA and neural networks. The relationships between volatilities are highly entangled and nonlinear, requiring advanced mathematical techniques.

The Problem
Volatility Evolution: Each currency pair 𝐶𝑖 has a volatility 𝑉𝑖(𝑡), defined as:

V_i(t) = alpha_i * sin((pi / 2) * V_j(t-1)) + beta_i * cos((pi / 3) * V_k(t-2)) + gamma_i + epsilon_i(t)

Where:

𝑗!=𝑘!=𝑖, and 𝑗,𝑘∈[1,𝑁], with 𝑁=28 currency pairs.
𝛼𝑖,𝛽𝑖,𝛾𝑖 are coefficients based on historical data.
𝜖𝑖(𝑡)∼𝑁(0,𝜎2)ϵ i
​ (t)∼N(0,σ2) is Gaussian noise.

Kernel Transformation: The volatility spreads are mapped into a high-dimensional kernel space:

K_ij(t) = exp(-((V_i(t) - V_j(t))^2) / (2 * sigma^2))

Where:

𝐾𝑖𝑗(𝑡) is the similarity measure between volatilities 𝑉𝑖 and 𝑉𝑗.
𝜎 is the kernel width.
Principal Component Analysis (PCA): Decompose the kernel matrix
𝐾(𝑡) into eigenvalues and eigenvectors:

K(t) = sum_{k=1}^N (lambda_k * (v_k @ v_k'))

Extract the top 𝑚-principal components that explain at least
90% of the variance.

Noise-Induced Drift: Eigenvalues experience stochastic drift:

lambda_k'(t) = lambda_k(t) * (1 + eta_k(t))

Where:

𝜂𝑘(𝑡)∼𝑁(0,𝑙𝑎𝑚𝑏𝑑𝑎 𝑘(𝑡)∗𝑛𝑢)​
(t)∼N(0,lambda k(t)∗nu).
𝑛𝑢 is the noise scaling factor.
Reduced Data: Project the kernel matrix onto the top
𝑚 principal components:

R_i(t) = sum_{k=1}^m (dot(K_i(t), v_k) * v_k)

Dynamic Threshold: Neural networks predict thresholds for trade execution:

Theta_i(t) = NeuralNet(K_i(t), R_i(t))

Trade Signal: Normalize the reduced data and execute trades based on the signal:

Signal(C_i) = R_i(t) / sqrt(sum_{j=1}^N R_j(t)^2)

Trades are executed when:

abs(Signal(C_i)) > Theta_i(t)

Additional Complexity Dynamic Kernel Width:
Allow 𝜎 to adapt dynamically based on volatility clustering:

sigma(t) = sigma_0 * (1 + kappa * StdDev(V(t)))

Component Selection: Use a second neural network to optimize the number of components 𝑚:

m_opt = NeuralNet_Components(K(t), lambda(t), eigenvectors)

Cross-Asset Correlations: Introduce equities, bonds, or commodities to influence 𝑉𝑖(𝑡).

Mathematical Summary
Volatility Dynamics:

V_i(t) = alpha_i * sin((pi / 2) * V_j(t-1)) + beta_i * cos((pi / 3) * V_k(t-2)) + gamma_i + epsilon_i(t)

Kernel Matrix:

K_ij(t) = exp(-((V_i(t) - V_j(t))^2) / (2 * sigma^2))

Kernel PCA Decomposition:

K(t) = sum_{k=1}^N (lambda_k * (v_k @ v_k'))

Projected Data:

R_i(t) = sum_{k=1}^m (dot(K_i(t), v_k) * v_k)

Dynamic Threshold:

Theta_i(t) = NeuralNet(K_i(t), R_i(t))

Trade Signal:

Signal(C_i) = R_i(t) / sqrt(sum_{j=1}^N R_j(t)^2)

Objective
Simulate the market with 𝑁=28 currency pairs and 1000 ticks.
Train neural networks to predict thresholds (𝑇ℎ𝑒𝑡𝑎(𝑡)) and optimize components (𝑚).
Execute the strategy and maximize the Sharpe ratio while maintaining a maximum drawdown below 5%.


Code
#define PAIRS 28 // Number of currency pairs

string CurrencyPairs[PAIRS] = {
    "EURUSD", "GBPUSD", "USDJPY", "GBPJPY", "USDCAD", "EURAUD", "EURJPY",
    "AUDCAD", "AUDJPY", "AUDNZD", "AUDUSD", "CADJPY", "EURCAD", "EURCHF",
    "EURGBP", "EURNZD", "GBPCAD", "GBPCHF", "NZDCAD", "NZDJPY", "NZDUSD",
    "USDCHF", "CHFJPY", "AUDCHF", "GBPNZD", "NZDCHF", "CADCHF", "GBPAUD"
};

vars kernelMatrix[PAIRS][PAIRS];     // Kernel matrix for Kernel PCA
vars ReducedMatrix[PAIRS][PAIRS];    // Reduced matrix for principal components
vars Thresholds[PAIRS];              // Neural network predicted thresholds
vars smoothedSignals[PAIRS];         // Smoothed signals for risk control
vars Predictions[PAIRS];             // Neural network predictions
vars volatilities[PAIRS];            // Volatility for each pair
vars eigenvalues[PAIRS];             // Eigenvalues from PCA
vars eigenvectors[PAIRS][PAIRS];     // Eigenvectors from PCA
int lookback = 50;                   // Lookback period for volatility calculation
int NeuralModelThreshold;            // Handle for neural network predicting thresholds
int NeuralModelSigma;                // Handle for neural network predicting kernel width

var sigma = 0.5;                     // Default kernel width parameter
var dynamicThreshold;                // Dynamic trading threshold

// Function to calculate volatilities for all pairs
function calculateVolatilities() {
    for (int i = 0; i < PAIRS; i++) {
        volatilities[i] = StdDev(series(price(CurrencyPairs[i]), lookback));
    }
}

// Function to calculate the kernel matrix using a Gaussian kernel
function calculateKernelMatrix(vars volatilities[PAIRS], vars kernelMatrix[PAIRS][PAIRS], var sigma) {
    for (int i = 0; i < PAIRS; i++) {
        for (int j = 0; j < PAIRS; j++) {
            kernelMatrix[i][j] = exp(-pow(volatilities[i] - volatilities[j], 2) / (2 * pow(sigma, 2))); // Gaussian kernel
        }
    }
}

// Perform Kernel PCA: Decompose the kernel matrix into eigenvalues and eigenvectors
function performKernelPCA(vars kernelMatrix[PAIRS][PAIRS], vars eigenvalues, vars eigenvectors) {
    eigenDecomposition(kernelMatrix, eigenvalues, eigenvectors); // Decompose the kernel matrix
}

// Reduce data using the top principal components
function reduceKernelData(vars kernelMatrix[PAIRS][PAIRS], vars eigenvectors[PAIRS][PAIRS], vars ReducedMatrix[PAIRS][PAIRS], int numComponents) {
    for (int i = 0; i < PAIRS; i++) {
        for (int j = 0; j < numComponents; j++) {
            ReducedMatrix[i][j] = dotProduct(kernelMatrix[i], eigenvectors[j]);
        }
    }
}

// Neural network training function for dynamic threshold prediction
function trainNeuralNetworks() {
    // Train for thresholds
    LayerPerceptron(10); // 10 nodes in hidden layer
    LayerNormalize();
    LayerPerceptron(1);  // Output layer for threshold prediction
    NeuralModelThreshold = Train(
        "threshold_model", // Model name
        Features,          // Input features (kernel matrix and volatilities)
        Targets,           // Target outputs (thresholds from historical data)
        NumSamples         // Number of samples
    );

    // Train for sigma adjustment
    LayerPerceptron(10); // 10 nodes in hidden layer
    LayerNormalize();
    LayerPerceptron(1);  // Output layer for kernel width (sigma) prediction
    NeuralModelSigma = Train(
        "sigma_model",    // Model name
        Features,         // Input features (historical volatilities and returns)
        Targets,          // Target outputs (optimal sigma values)
        NumSamples        // Number of samples
    );
}

// Predict dynamic thresholds using the neural network
function predictThresholds(vars kernelMatrix[PAIRS][PAIRS], vars Thresholds[PAIRS]) {
    for (int i = 0; i < PAIRS; i++) {
        Thresholds[i] = Predict(NeuralModelThreshold, kernelMatrix[i]); // Predict for each row in kernel matrix
    }
}

// Predict dynamic sigma using the neural network
function predictSigma(vars volatilities[PAIRS]) {
    return Predict(NeuralModelSigma, volatilities); // Predict sigma dynamically
}

// Calculate a dynamic threshold based on reduced signals
function calculateDynamicThreshold(int topComponents) {
    var sumSquares = 0;
    int count = 0;

    for (int i = 0; i < PAIRS; i++) {
        for (int j = 0; j < topComponents; j++) {
            sumSquares += ReducedMatrix[i][j] * ReducedMatrix[i][j];
            count++;
        }
    }
    return sqrt(sumSquares / count); // Standard deviation as threshold
}

// Smooth signals to reduce noise
function smoothSignals(vars Predictions[PAIRS], vars smoothedSignals[PAIRS]) {
    for (int i = 0; i < PAIRS; i++) {
        smoothedSignals[i] = SMA(series(Predictions[i]), lookback);
    }
}

// Trade logic with neural network-predicted thresholds
function tradeWithNeuralNetwork(vars smoothedSignals[PAIRS], vars Thresholds[PAIRS]) {
    for (int i = 0; i < PAIRS; i++) {
        if (smoothedSignals[i] > Thresholds[i]) {
            enterLong(CurrencyPairs[i]);
        } else if (smoothedSignals[i] < -Thresholds[i]) {
            enterShort(CurrencyPairs[i]);
        }
    }
}

// Main trading function
function run() {
    set(PLOTNOW);

    // Step 1: Calculate volatilities
    calculateVolatilities();

    // Step 2: Predict sigma using the neural network
    sigma = predictSigma(volatilities);

    // Step 3: Compute kernel matrix
    calculateKernelMatrix(volatilities, kernelMatrix, sigma);

    // Step 4: Perform Kernel PCA
    performKernelPCA(kernelMatrix, eigenvalues, eigenvectors);

    // Step 5: Optimize the number of components based on variance explained
    int optimalComponents = optimizeComponents(eigenvalues, PAIRS, 0.90);

    // Step 6: Reduce data using the top principal components
    reduceKernelData(kernelMatrix, eigenvectors, ReducedMatrix, optimalComponents);

    // Step 7: Predict thresholds using the neural network
    predictThresholds(kernelMatrix, Thresholds);

    // Step 8: Generate predictions (e.g., signal strength)
    for (int i = 0; i < PAIRS; i++) {
        Predictions[i] = dotProduct(kernelMatrix[i], Thresholds);
    }

    // Step 9: Smooth predictions for stable signals
    smoothSignals(Predictions, smoothedSignals);

    // Step 10: Execute trades based on predictions and thresholds
    tradeWithNeuralNetwork(smoothedSignals, Thresholds);
}

Last edited by TipmyPip; 01/02/25 09:37.
The Volatility Feedback Arbitrage [Re: TipmyPip] #488512
01/02/25 10:10
01/02/25 10:10
Joined: Sep 2017
Posts: 164
TipmyPip Online OP
Member
TipmyPip  Online OP
Member

Joined: Sep 2017
Posts: 164
Problem Context You are managing a portfolio of 𝑁=28 currency pairs, denoted as 𝐶1,𝐶2,…,𝐶𝑁. Your goal is to identify volatility arbitrage opportunities by analyzing volatility spreads and return correlations, projecting them into a nonlinear feature space, and recursively refining the results to adapt to market dynamics.

Market Dynamics
1. Volatility Spread Calculation The volatility spread between any two currency pairs 𝐶𝑖 and 𝐶𝑗 is given by:

V_ij(t) = sigma_i(t) - sigma_j(t),

where:

𝑠𝑖𝑔𝑚𝑎𝑖(𝑡): Rolling standard deviation of currency pair 𝐶𝑖 over the past 𝐿 time steps.
𝑉𝑖𝑗(𝑡): The relative volatility between 𝐶𝑖 and 𝐶𝑗.
2. Return Correlation The return correlation between 𝐶𝑖 and 𝐶𝑗 evolves according to:

rho_ij(t+1) = rho_ij(t) + eta_ij(t),

where:

𝑟ℎ𝑜𝑖𝑗(𝑡): Pearson correlation coefficient between the returns of 𝐶𝑖 and 𝐶𝑗.
𝑒𝑡𝑎𝑖𝑗(𝑡)∼𝑁(0,𝑛𝑢2): Gaussian noise representing market shocks.

Kernel Transformation
To capture nonlinear relationships, both the volatility spread matrix (𝑉) and the correlation matrix (𝑟ℎ𝑜) are projected into a high-dimensional feature space using Gaussian kernels.

Volatility Kernel:

K^(V)_ij = exp(-((V_ij - V_kl)^2) / (2 * sigma_V^2)),

where 𝑠𝑖𝑔𝑚𝑎𝑉 is the kernel width for volatility features.

Correlation Kernel:

K^(rho)_ij = exp(-((rho_ij - rho_kl)^2) / (2 * sigma_rho^2)),

where 𝑠𝑖𝑔𝑚𝑎𝑟ℎ𝑜 is the kernel width for correlation features.

Recursive Feedback Mechanism
The reduced features from each kernel matrix influence the other in a feedback loop:

Feedback for Volatility Kernel:

K^(V)_ij <- K^(V)_ij + lambda * R^(rho)_ij,

where:

𝑅(𝑟ℎ𝑜)𝑖𝑗: Reduced features from the correlation kernel matrix
𝐾(𝑟ℎ𝑜).

Feedback for Correlation Kernel:
K^(rho)_ij <- K^(rho)_ij + lambda * R^(V)_ij,

where:

𝑅(𝑉)𝑖𝑗​: Reduced features from the volatility kernel matrix 𝐾(𝑉).

Kernel PCA
Each kernel matrix (𝐾(𝑉), 𝐾(𝑟ℎ𝑜) is decomposed using Kernel PCA:

K(t) = sum_{k=1}^m lambda_k * (v_k @ v_k'),

where:

𝑙𝑎𝑚𝑏𝑑𝑎𝑘 : Eigenvalues of the kernel matrix.
𝑣𝑘 : Eigenvectors of the kernel matrix.
𝑚: Number of principal components that explain >=90 of the variance.
The reduced feature matrices are:

R^(V) = sum_{k=1}^m (lambda_k * v_k),
R^(rho) = sum_{k=1}^m (lambda_k * v_k).

Neural Network Predictions
Dynamic Thresholds: Two neural networks predict dynamic thresholds for volatility (𝑇ℎ𝑒𝑡𝑎(𝑉)) and correlation (𝑇ℎ𝑒𝑡𝑎(𝑟ℎ𝑜)):

Theta^(V)_ij = NN_1(R^(V)_ij, context),
Theta^(rho)_ij = NN_2(R^(rho)_ij, context).

Meta-Neural Network: A third neural network combines these predictions to generate final trade signals:

S_ij = MetaNN(Theta^(V)_ij, Theta^(rho)_ij, R^(V)_ij, R^(rho)_ij).

Trade Execution

Signal Normalization: Normalize the signals:

S^_ij = S_ij / sqrt(sum_{k,l} S_kl^2),

ensuring that trade magnitudes are proportional to signal strength.

Trade Conditions: Execute a trade when:

abs(S^_ij) > Theta, where 𝑇ℎ𝑒𝑡𝑎 is a global threshold, dynamically adjusted based on portfolio volatility.

Optimization Goals

Your task is to design a strategy that:

Maximizes the Sharpe Ratio:

Sharpe Ratio = (Mean(Returns) - Risk-Free Rate) / StdDev(Returns).

Minimizes Maximum Drawdown:

Max Drawdown = max(Peak - Trough),

ensuring drawdown stays below 5
Explains Variance in PCA: Ensure 𝑚 components explain at least 90 of the variance:

sum_{k=1}^m lambda_k / sum_{k=1}^N lambda_k >= 0.9.

Complete Problem Workflow

Step 1: Calculate Volatility Spread:

V_ij(t) = sigma_i(t) - sigma_j(t)

Step 2: Update Correlations:

rho_ij(t+1) = rho_ij(t) + eta_ij(t)

Step 3: Compute Kernel Matrices:

Volatility Kernel:

K^(V)_ij = exp(-((V_ij - V_kl)^2) / (2 * sigma_V^2))

Correlation Kernel:

K^(rho)_ij = exp(-((rho_ij - rho_kl)^2) / (2 * sigma_rho^2))

Step 4: Perform Feedback Updates:

Volatility Feedback:

K^(V)_ij <- K^(V)_ij + lambda * R^(rho)_ij

Correlation Feedback:

K^(rho)_ij <- K^(rho)_ij + lambda * R^(V)_ij

Step 5: Decompose Using Kernel PCA:

K = sum_{k=1}^m lambda_k * (v_k @ v_k')

Step 6: Predict Thresholds and Signals:

Dynamic Thresholds:

Theta^(V)_ij = NN_1(R^(V)_ij, context)
Theta^(rho)_ij = NN_2(R^(rho)_ij, context)

Meta-Signal:

S_ij = MetaNN(Theta^(V)_ij, Theta^(rho)_ij, R^(V)_ij, R^(rho)_ij)

Step 7: Execute Trades:

Normalize Signals:

S^_ij = S_ij / sqrt(sum_{k,l} S_kl^2)

Trade Decision:

if abs(S^_ij) > Theta: Execute Trade



Code
#define PAIRS 28 // Number of currency pairs

string CurrencyPairs[PAIRS] = {
    "EURUSD", "GBPUSD", "USDJPY", "GBPJPY", "USDCAD", "EURAUD", "EURJPY",
    "AUDCAD", "AUDJPY", "AUDNZD", "AUDUSD", "CADJPY", "EURCAD", "EURCHF",
    "EURGBP", "EURNZD", "GBPCAD", "GBPCHF", "NZDCAD", "NZDJPY", "NZDUSD",
    "USDCHF", "CHFJPY", "AUDCHF", "GBPNZD", "NZDCHF", "CADCHF", "GBPAUD"
};

vars volatilities[PAIRS];             // Volatility for each pair
vars correlations[PAIRS];             // Correlations for each pair
vars kernelMatrix1[PAIRS][PAIRS];     // Kernel matrix for PCA-1
vars kernelMatrix2[PAIRS][PAIRS];     // Kernel matrix for PCA-2
vars ReducedMatrix1[PAIRS][PAIRS];    // Reduced matrix for PCA-1
vars ReducedMatrix2[PAIRS][PAIRS];    // Reduced matrix for PCA-2
vars FeedbackMatrix1[PAIRS];          // Feedback-influenced matrix for PCA-1
vars FeedbackMatrix2[PAIRS];          // Feedback-influenced matrix for PCA-2
vars Threshold1[PAIRS];               // Thresholds from NN-1
vars Threshold2[PAIRS];               // Thresholds from NN-2
vars MetaFeatures[PAIRS];             // Meta-features for the meta neural network
vars FinalSignals[PAIRS];             // Final signals from the meta neural network
int lookback = 50;                    // Lookback period
int NN1, NN2, MetaNN;                 // Neural network handles
var lambda = 0.5;                     // Cross-influence weight
var sigma1 = 0.5;                     // Initial kernel width for PCA-1
var sigma2 = 0.5;                     // Initial kernel width for PCA-2

// Step 1: Calculate volatilities and correlations
function calculateFeatures() {
    for (int i = 0; i < PAIRS; i++) {
        volatilities[i] = StdDev(series(price(CurrencyPairs[i])), lookback);
        correlations[i] = priceClose(CurrencyPairs[i]) - priceOpen(CurrencyPairs[i]); // Correlation proxy
    }
}

// Step 2: Calculate kernel matrices with initial parameters
function calculateKernelMatrices() {
    for (int i = 0; i < PAIRS; i++) {
        for (int j = 0; j < PAIRS; j++) {
            kernelMatrix1[i][j] = exp(-(pow(volatilities[i] - volatilities[j], 2)) / (2 * pow(sigma1, 2)));
            kernelMatrix2[i][j] = exp(-(pow(correlations[i] - correlations[j], 2)) / (2 * pow(sigma2, 2)));
        }
    }
}

// Step 3: Perform Kernel PCA
function performKernelPCA(vars kernelMatrix[PAIRS][PAIRS], vars eigenvalues, vars eigenvectors) {
    eigenDecomposition(kernelMatrix, eigenvalues, eigenvectors);
}

// Step 4: Reduce kernel data
function reduceKernelData(vars kernelMatrix[PAIRS][PAIRS], vars eigenvectors[PAIRS][PAIRS], vars ReducedMatrix[PAIRS][PAIRS], int components) {
    for (int i = 0; i < PAIRS; i++) {
        for (int j = 0; j < components; j++) {
            ReducedMatrix[i][j] = dotProduct(kernelMatrix[i], eigenvectors[j]);
        }
    }
}

// Step 5: Apply feedback adjustments
function applyFeedbackAdjustments() {
    for (int i = 0; i < PAIRS; i++) {
        FeedbackMatrix1[i] = dotProduct(ReducedMatrix2[i], ReducedMatrix1[i]); // PCA-1 influenced by PCA-2
        FeedbackMatrix2[i] = dotProduct(ReducedMatrix1[i], ReducedMatrix2[i]); // PCA-2 influenced by PCA-1
    }
}

// Step 6: Train and predict neural networks
function trainNeuralNetworks() {
    // Train NN-1
    LayerPerceptron(10);
    LayerNormalize();
    LayerPerceptron(1);
    NN1 = Train("NN1", ReducedMatrix1, Threshold1, PAIRS);

    // Train NN-2
    LayerPerceptron(10);
    LayerNormalize();
    LayerPerceptron(1);
    NN2 = Train("NN2", ReducedMatrix2, Threshold2, PAIRS);

    // Train Meta Neural Network
    LayerPerceptron(5);
    LayerNormalize();
    LayerPerceptron(1);
    MetaNN = Train("MetaNN", [Threshold1, Threshold2, FeedbackMatrix1, FeedbackMatrix2], FinalSignals, PAIRS);
}

function predictSignalsAndThresholds() {
    for (int i = 0; i < PAIRS; i++) {
        Threshold1[i] = Predict(NN1, FeedbackMatrix1[i]);
        Threshold2[i] = Predict(NN2, FeedbackMatrix2[i]);

        // Meta Neural Network combines predictions
        MetaFeatures[i] = [Threshold1[i], Threshold2[i], FeedbackMatrix1[i], FeedbackMatrix2[i]];
        FinalSignals[i] = Predict(MetaNN, MetaFeatures[i]);
    }
}

// Step 7: Execute trades
function executeTrades() {
    for (int i = 0; i < PAIRS; i++) {
        if (FinalSignals[i] > 0.5) {
            enterLong(CurrencyPairs[i]);
        } else if (FinalSignals[i] < -0.5) {
            enterShort(CurrencyPairs[i]);
        }
    }
}

// Main function
function run() {
    set(PLOTNOW);

    // Step 1: Calculate features
    calculateFeatures();

    // Step 2: Compute initial kernel matrices
    calculateKernelMatrices();

    // Step 3: Perform Kernel PCA
    vars eigenvalues1, eigenvectors1;
    vars eigenvalues2, eigenvectors2;
    performKernelPCA(kernelMatrix1, eigenvalues1, eigenvectors1);
    performKernelPCA(kernelMatrix2, eigenvalues2, eigenvectors2);

    // Step 4: Reduce data
    reduceKernelData(kernelMatrix1, eigenvectors1, ReducedMatrix1, 3); // Top 3 components
    reduceKernelData(kernelMatrix2, eigenvectors2, ReducedMatrix2, 3);

    for (int iter = 0; iter < 5; iter++) { // Recursive iterations
        // Step 5: Apply feedback adjustments
        applyFeedbackAdjustments();

        // Step 6: Recompute kernel matrices with feedback
        calculateKernelMatrices();

        // Step 7: Perform Kernel PCA again
        performKernelPCA(kernelMatrix1, eigenvalues1, eigenvectors1);
        performKernelPCA(kernelMatrix2, eigenvalues2, eigenvectors2);

        // Step 8: Predict thresholds and signals
        predictSignalsAndThresholds();

        // Step 9: Execute trades
        executeTrades();
    }
}

Kernelized Profitability Arbitrage [Re: TipmyPip] #488513
01/02/25 10:39
01/02/25 10:39
Joined: Sep 2017
Posts: 164
TipmyPip Online OP
Member
TipmyPip  Online OP
Member

Joined: Sep 2017
Posts: 164
Overview
You are tasked with designing a multi-layer volatility arbitrage strategy for a portfolio of 28 currency pairs. The strategy leverages Kernel PCA to extract latent structures in pairwise volatility relationships and their interplay with profitability signals. The challenge is to optimize the combination of kernel components to maximize risk-adjusted profitability while managing noise and feedback loops between dependent volatility and profitability spaces.

Problem Statement
Volatility Pair Dynamics: Each pair 𝐶𝑖,𝐶𝑗 exhibits a volatility relationship:

V_{i,j}(t) = |σ_i(t) - σ_j(t)|,

where 𝜎𝑖(𝑡) (t) is the volatility of currency pair 𝐶𝑖, computed as:

σ_i(t) = StdDev(P_i(t), \text{lookback}), and 𝑃𝑖(𝑡) is the price series of 𝐶𝑖.

Profitability Metric: For each pair combination
(𝑖,𝑗), define a profitability measure Π𝑖,𝑗(𝑡):

Π_{i,j}(t) = |R_i(t) - R_j(t)|, where 𝑅𝑖(𝑡) is the return:

R_i(t) = P_i(t) / P_i(t-1) - 1.

Kernel Transformation: Use a Gaussian kernel to map volatility and profitability measures into high-dimensional spaces:

K_{V,ij}(t) = exp(- (V_{i,j}(t) - V_{k,l}(t))^2 / (2 * σ_V^2)),
K_{Π,ij}(t) = exp(- (Π_{i,j}(t) - Π_{k,l}(t))^2 / (2 * σ_Π^2)).

Principal Component Extraction: Decompose the kernel matrices
𝐾𝑉 and 𝐾Π into their eigenvalues 𝜆𝑘​ and eigenvectors 𝑣𝑘​ using:

K = Σ_{k=1}^N λ_k * (v_k ⊗ v_k), where 𝑁=378 is the number of pair combinations.

Feedback Loop: The reduced components 𝑅𝑉 and 𝑅Π​ are recursively coupled:

R_{V,i}(t) = Σ_{k=1}^m (dot(K_{V,i}, v_k) * R_{Π,k}(t)),
R_{Π,i}(t) = Σ_{k=1}^m (dot(K_{Π,i}, v_k) * R_{V,k}(t)).

Neural Network Predictions: Train two neural networks to predict dynamic thresholds for trade signals:

Θ𝑉(𝑡) based on 𝑅𝑉 (volatility components).
ΘΠ(𝑡) based on 𝑅Π (profitability components).

Use a meta neural network to combine these predictions:

Θ_{final}(t) = MetaNN(Θ_V(t), Θ_Π(t)).

Trade Execution: Trades are executed based on normalized signals:

S_{i,j}(t) = R_{V,i}(t) / sqrt(Σ_{k=1}^N R_{V,k}(t)^2), with the condition:

|S_{i,j}(t)| > Θ_{final}(t).

Puzzle Constraints

Variance Explained: Select 𝑚, the number of top components, such that:

Σ_{k=1}^m λ_k / Σ_{k=1}^N λ_k ≥ 0.90.

Dynamic Kernel Widths: Adapt kernel widths 𝜎𝑉 and 𝜎Π dynamically:

σ_V(t) = σ_V(0) * (1 + κ * StdDev(V(t))),
σ_Π(t) = σ_Π(0) * (1 + κ * StdDev(Π(t))).

Risk-Adjusted Profitability: Optimize the Sharpe ratio:

Sharpe = E[Π_{i,j}(t)] / StdDev(Π_{i,j}(t)).

Feedback Convergence: Ensure that feedback loops between 𝑅𝑉 and 𝑅Π converge within 10 iterations:

max(|R_V^{(n)} - R_V^{(n-1)}|, |R_Π^{(n)} - R_Π^{(n-1)}|) < ε.

Your Tasks Kernel PCA and Neural Networks:

Implement the kernel matrices 𝐾𝑉 and 𝐾Π. Extract 𝑚-principal components for both matrices.
Train neural networks 𝑁𝑁1, 𝑁𝑁2, and MetaNN.

Recursive Feedback: Establish recursive coupling between 𝑅𝑉 V and 𝑅Π.

Ensure convergence within the specified tolerance.
Profitability Optimization:

Optimize the selection of pair combinations to maximize the cumulative profitability.

Backtesting:

Simulate the strategy over 10,000 ticks and report: Sharpe ratio. Maximum drawdown. Trade frequency.

Code
#define PAIRS 28 // Number of currency pairs
#define COMBINATIONS 378 // Total number of unique pair combinations

string CurrencyPairs[PAIRS] = {
    "EURUSD", "GBPUSD", "USDJPY", "GBPJPY", "USDCAD", "EURAUD", "EURJPY",
    "AUDCAD", "AUDJPY", "AUDNZD", "AUDUSD", "CADJPY", "EURCAD", "EURCHF",
    "EURGBP", "EURNZD", "GBPCAD", "GBPCHF", "NZDCAD", "NZDJPY", "NZDUSD",
    "USDCHF", "CHFJPY", "AUDCHF", "GBPNZD", "NZDCHF", "CADCHF", "GBPAUD"
};

vars volatilities[PAIRS];             // Volatility for each pair
vars volatilityRatios[COMBINATIONS]; // Volatility ratios for pair combinations
vars kernelMatrix1[COMBINATIONS][COMBINATIONS]; // Kernel matrix for PCA-1
vars kernelMatrix2[COMBINATIONS][COMBINATIONS]; // Kernel matrix for PCA-2
vars ReducedMatrix1[COMBINATIONS][COMBINATIONS]; // Reduced matrix for PCA-1
vars ReducedMatrix2[COMBINATIONS][COMBINATIONS]; // Reduced matrix for PCA-2
vars profitabilityMatrix[COMBINATIONS]; // Profitability matrix for pair combinations
vars Threshold1[COMBINATIONS];        // Thresholds from NN-1
vars Threshold2[COMBINATIONS];        // Thresholds from NN-2
vars MetaFeatures[COMBINATIONS];      // Meta-features for MetaNN
vars FinalSignals[COMBINATIONS];      // Final trade signals
int combinationIndex[COMBINATIONS][2]; // Index mapping for combinations
int NN1, NN2, MetaNN;                 // Neural network handles
int lookback = 50;                    // Lookback period

// Step 1: Calculate volatilities for all currency pairs
function calculateVolatilities() {
    for (int i = 0; i < PAIRS; i++) {
        volatilities[i] = StdDev(series(price(CurrencyPairs[i])), lookback);
    }
}

// Step 2: Generate all pair combinations
function generateCombinations() {
    int index = 0;
    for (int i = 0; i < PAIRS - 1; i++) {
        for (int j = i + 1; j < PAIRS; j++) {
            combinationIndex[index][0] = i; // First pair in the combination
            combinationIndex[index][1] = j; // Second pair in the combination
            index++;
        }
    }
}

// Step 3: Calculate volatility ratios for all combinations
function calculateVolatilityRatios() {
    for (int k = 0; k < COMBINATIONS; k++) {
        int i = combinationIndex[k][0];
        int j = combinationIndex[k][1];
        volatilityRatios[k] = abs(volatilities[i] - volatilities[j]); // Volatility difference
    }
}

// Step 4: Compute kernel matrices
function calculateKernelMatrices(var sigma1, var sigma2) {
    for (int i = 0; i < COMBINATIONS; i++) {
        for (int j = 0; j < COMBINATIONS; j++) {
            kernelMatrix1[i][j] = exp(-pow(volatilityRatios[i] - volatilityRatios[j], 2) / (2 * pow(sigma1, 2)));
            kernelMatrix2[i][j] = exp(-pow(profitabilityMatrix[i] - profitabilityMatrix[j], 2) / (2 * pow(sigma2, 2)));
        }
    }
}

// Step 5: Perform Kernel PCA
function performKernelPCA(vars kernelMatrix[COMBINATIONS][COMBINATIONS], vars eigenvalues, vars eigenvectors) {
    eigenDecomposition(kernelMatrix, eigenvalues, eigenvectors);
}

// Step 6: Reduce kernel data
function reduceKernelData(vars kernelMatrix[COMBINATIONS][COMBINATIONS], vars eigenvectors[COMBINATIONS][COMBINATIONS], vars ReducedMatrix[COMBINATIONS][COMBINATIONS], int components) {
    for (int i = 0; i < COMBINATIONS; i++) {
        for (int j = 0; j < components; j++) {
            ReducedMatrix[i][j] = dotProduct(kernelMatrix[i], eigenvectors[j]);
        }
    }
}

// Step 7: Train neural networks
function trainNeuralNetworks() {
    // Train NN-1
    LayerPerceptron(10);
    LayerNormalize();
    LayerPerceptron(1);
    NN1 = Train("NN1", ReducedMatrix1, Threshold1, COMBINATIONS);

    // Train NN-2
    LayerPerceptron(10);
    LayerNormalize();
    LayerPerceptron(1);
    NN2 = Train("NN2", ReducedMatrix2, Threshold2, COMBINATIONS);

    // Train Meta Neural Network
    LayerPerceptron(5);
    LayerNormalize();
    LayerPerceptron(1);
    MetaNN = Train("MetaNN", [Threshold1, Threshold2], FinalSignals, COMBINATIONS);
}

// Step 8: Predict thresholds and signals
function predictThresholdsAndSignals() {
    for (int i = 0; i < COMBINATIONS; i++) {
        Threshold1[i] = Predict(NN1, ReducedMatrix1[i]);
        Threshold2[i] = Predict(NN2, ReducedMatrix2[i]);

        // Meta Neural Network combines predictions
        MetaFeatures[i] = [Threshold1[i], Threshold2[i]];
        FinalSignals[i] = Predict(MetaNN, MetaFeatures[i]);
    }
}

// Step 9: Execute trades
function executeTrades() {
    for (int i = 0; i < COMBINATIONS; i++) {
        int pair1 = combinationIndex[i][0];
        int pair2 = combinationIndex[i][1];

        // Example trade logic based on meta signal
        if (FinalSignals[i] > 0.5) {
            enterLong(CurrencyPairs[pair1]);
            enterShort(CurrencyPairs[pair2]);
        } else if (FinalSignals[i] < -0.5) {
            enterShort(CurrencyPairs[pair1]);
            enterLong(CurrencyPairs[pair2]);
        }
    }
}

// Main function
function run() {
    set(PLOTNOW);

    // Step 1: Calculate volatilities
    calculateVolatilities();

    // Step 2: Generate pair combinations
    generateCombinations();

    // Step 3: Calculate volatility ratios
    calculateVolatilityRatios();

    // Step 4: Compute kernel matrices
    calculateKernelMatrices(0.5, 0.5); // Example sigma values

    // Step 5: Perform Kernel PCA
    vars eigenvalues1, eigenvectors1;
    vars eigenvalues2, eigenvectors2;
    performKernelPCA(kernelMatrix1, eigenvalues1, eigenvectors1);
    performKernelPCA(kernelMatrix2, eigenvalues2, eigenvectors2);

    // Step 6: Reduce kernel data
    reduceKernelData(kernelMatrix1, eigenvectors1, ReducedMatrix1, 3); // Top 3 components
    reduceKernelData(kernelMatrix2, eigenvectors2, ReducedMatrix2, 3);

    // Step 7: Train neural networks
    trainNeuralNetworks();

    // Step 8: Predict thresholds and signals
    predictThresholdsAndSignals();

    // Step 9: Execute trades
    executeTrades();
}

Recursive Kernel PCA with GNN for Profitability Optimization [Re: TipmyPip] #488514
01/02/25 12:39
01/02/25 12:39
Joined: Sep 2017
Posts: 164
TipmyPip Online OP
Member
TipmyPip  Online OP
Member

Joined: Sep 2017
Posts: 164
Problem: Recursive Kernel PCA with GNN for Profitability Optimization

You are tasked to design a trading strategy that integrates two Kernel PCAs (for volatility and profitability relationships) with a Graph Neural Network (GNN). The strategy dynamically updates node embeddings and kernel matrices to extract dominant patterns and optimize trading signals for a portfolio of 𝑛=28 currency pairs.

The goal is to maximize profitability Π, subject to market dynamics and constraints.

Step 1: Graph Construction
Graph Definition: Let the currency pairs be represented as a graph 𝐺=(𝑉,𝐸), where:

𝑉: Nodes, 𝑣𝑖 , represent currency pairs.
𝐸: Edges represent relationships between pairs.
Node Features: Each node 𝑣𝑖 is assigned a feature vector 𝑥𝑖, defined as:

x_i = [σ_i(t), R_i(t), M_i(t), ρ_{i,j}(t)]

where:

𝜎𝑖(𝑡): Volatility of 𝐶𝑖,
𝑅𝑖(𝑡): Return of 𝐶𝑖,𝑀𝑖(𝑡): Momentum of 𝐶𝑖,
𝜌𝑖,𝑗(𝑡): Correlation between 𝐶𝑖 and 𝐶𝑗.

Edge Weights: The weighted adjacency matrix 𝐴 is given by:

A[i][j] = exp(-||x_i - x_j||^2 / (2 * σ^2))

where 𝜎 is a kernel width parameter.

Step 2: Kernel Matrices

Volatility Kernel
𝐾𝑉 : Construct 𝐾𝑉K using a Gaussian kernel:

K_V[i][j] = exp(- (V_{i,j}(t) - V_{k,l}(t))^2 / (2 * σ_V^2))

where 𝑉𝑖,𝑗(𝑡)=∣𝜎𝑖(𝑡)−𝜎𝑗(𝑡)∣ is the volatility spread.

Profitability Kernel 𝐾Π​ : Construct 𝐾Π as:

K_Π[i][j] = exp(- (Π_{i,j}(t) - Π_{k,l}(t))^2 / (2 * σ_Π^2))

where Π𝑖,𝑗(𝑡)=∣𝑅𝑖(𝑡)−𝑅𝑗(𝑡)∣Π i,j​(t) is the profitability difference.

Eigenvalue Decomposition: Decompose 𝐾𝑉​ and 𝐾Π into 𝜆𝑘 (eigenvalues) and 𝑣𝑘 (eigenvectors):

K = Σ_{k=1}^m λ_k * (v_k ⊗ v_k)

where 𝑚 is the number of principal components.

Step 3: GNN Embedding Propagation
Message Passing: For each edge (𝑖,𝑗), compute the message 𝑚𝑖𝑗​:

m_{ij} = A[i][j] * (W * h_j^{(t)})

where 𝑊 is a weight matrix.

Node Updates: Update node embeddings ℎ𝑖(𝑡+1) using:

h_i^{(t+1)} = ReLU(W * Σ_{j∈N(i)} m_{ij} + b)

Recursive Feedback: Use updated embeddings ℎ𝑖(𝑡+1) to refine the kernel matrices:

K_V[i][j] = f(h_i^{(t+1)}, h_j^{(t+1)})
K_Π[i][j] = g(h_i^{(t+1)}, h_j^{(t+1)})

Step 4: Neural Networks
Threshold Prediction: Train two neural networks:

𝑁𝑁1 : Predicts volatility threshold Θ𝑉(𝑡):

Θ_V(t) = NN_1(R_{V,i}(t))

𝑁𝑁2 : Predicts profitability threshold ΘΠ(𝑡):

Θ_Π(t) = NN_2(R_{Π,i}(t))

Meta Neural Network: Combine Θ𝑉(𝑡) and ΘΠ(𝑡) using a meta network:

Θ_{final}(t) = MetaNN(Θ_V(t), Θ_Π(t))

Step 5: Trade Execution
Signal Calculation: Compute the normalized trade signal 𝑆𝑖,𝑗(𝑡):

S_{i,j}(t) = R_{V,i}(t) / sqrt(Σ_{k=1}^n R_{V,k}(t)^2)

Execution Criteria: Execute a trade if:

|S_{i,j}(t)| > Θ_{final}(t)

Constraints
Variance Explained: Select the top 𝑚 components such that:

Σ_{k=1}^m λ_k / Σ_{k=1}^n λ_k ≥ 0.90

Dynamic Kernel Widths: Update kernel widths dynamically:

σ_V(t) = σ_V(0) * (1 + κ * StdDev(V(t)))
σ_Π(t) = σ_Π(0) * (1 + κ * StdDev(Π(t)))

Feedback Convergence: Ensure feedback loops converge within
𝑡𝑚𝑎𝑥=10 iterations:

max(|K_V^{(t+1)} - K_V^{(t)}|, |K_Π^{(t+1)} - K_Π^{(t)}|) < ε

Goal Maximize cumulative profitability Π:

Π = Σ_{t=1}^T Σ_{i,j} (S_{i,j}(t) * Trade_{i,j}(t))

subject to constraints on variance explained, kernel widths, and feedback convergence.

Code
#define PAIRS 28
#define COMBINATIONS 378

string CurrencyPairs[PAIRS] = {
    "EURUSD", "GBPUSD", "USDJPY", "GBPJPY", "USDCAD", "EURAUD", "EURJPY",
    "AUDCAD", "AUDJPY", "AUDNZD", "AUDUSD", "CADJPY", "EURCAD", "EURCHF",
    "EURGBP", "EURNZD", "GBPCAD", "GBPCHF", "NZDCAD", "NZDJPY", "NZDUSD",
    "USDCHF", "CHFJPY", "AUDCHF", "GBPNZD", "NZDCHF", "CADCHF", "GBPAUD"
};

vars volatilities[PAIRS];             // Volatility for each pair
vars profitability[COMBINATIONS];    // Profitability for each pair combination
vars adjacencyMatrix[PAIRS][PAIRS];  // GNN adjacency matrix
vars nodeEmbeddings[PAIRS][5];       // Node embeddings from GNN
vars kernelMatrix1[PAIRS][PAIRS];    // Kernel matrix for PCA-1 (volatility)
vars kernelMatrix2[PAIRS][PAIRS];    // Kernel matrix for PCA-2 (profitability)
vars ReducedMatrix1[PAIRS][PAIRS];   // Reduced components from PCA-1
vars ReducedMatrix2[PAIRS][PAIRS];   // Reduced components from PCA-2
vars Threshold1[PAIRS];              // Thresholds from NN-1
vars Threshold2[PAIRS];              // Thresholds from NN-2
vars MetaFeatures[PAIRS];            // Meta-features for MetaNN
vars FinalSignals[PAIRS];            // Final trade signals
int NN1, NN2, MetaNN;                // Neural network handles
int lookback = 50;                   // Lookback period

// Step 1: Calculate volatility and profitability
function calculateMetrics() {
    for (int i = 0; i < PAIRS; i++) {
        volatilities[i] = StdDev(series(price(CurrencyPairs[i])), lookback);
    }
    for (int k = 0; k < COMBINATIONS; k++) {
        int i = k / PAIRS;
        int j = k % PAIRS;
        profitability[k] = abs(priceClose(CurrencyPairs[i]) - priceClose(CurrencyPairs[j]));
    }
}

// Step 2: Construct adjacency matrix
function constructAdjacencyMatrix() {
    for (int i = 0; i < PAIRS; i++) {
        for (int j = 0; j < PAIRS; j++) {
            if (i != j) {
                adjacencyMatrix[i][j] = exp(-pow(volatilities[i] - volatilities[j], 2)); // Gaussian kernel
            } else {
                adjacencyMatrix[i][j] = 0;
            }
        }
    }
}

// Step 3: Propagate GNN embeddings
function propagateEmbeddings() {
    vars newEmbeddings[PAIRS][5];
    for (int t = 0; t < 3; t++) { // 3 propagation steps
        for (int i = 0; i < PAIRS; i++) {
            for (int k = 0; k < 5; k++) {
                newEmbeddings[i][k] = 0;
                for (int j = 0; j < PAIRS; j++) {
                    newEmbeddings[i][k] += adjacencyMatrix[i][j] * nodeEmbeddings[j][k];
                }
                newEmbeddings[i][k] = ReLU(newEmbeddings[i][k]);
            }
        }
        for (int i = 0; i < PAIRS; i++) {
            for (int k = 0; k < 5; k++) {
                nodeEmbeddings[i][k] = newEmbeddings[i][k];
            }
        }
    }
}

// Step 4: Update kernel matrices using GNN embeddings
function updateKernelMatrices() {
    for (int i = 0; i < PAIRS; i++) {
        for (int j = 0; j < PAIRS; j++) {
            kernelMatrix1[i][j] = exp(-dotProduct(nodeEmbeddings[i], nodeEmbeddings[j]) / 2); // Volatility kernel
            kernelMatrix2[i][j] = exp(-profitability[i * PAIRS + j] / 2);                     // Profitability kernel
        }
    }
}

// Step 5: Perform Kernel PCA
function performKernelPCA(vars kernelMatrix[PAIRS][PAIRS], vars ReducedMatrix[PAIRS][PAIRS]) {
    vars eigenvalues, eigenvectors;
    eigenDecomposition(kernelMatrix, eigenvalues, eigenvectors);
    for (int i = 0; i < PAIRS; i++) {
        for (int j = 0; j < 3; j++) { // Top 3 components
            ReducedMatrix[i][j] = dotProduct(kernelMatrix[i], eigenvectors[j]);
        }
    }
}

// Step 6: Train neural networks
function trainNeuralNetworks() {
    // NN-1: Thresholds from PCA-1
    LayerPerceptron(10);
    LayerNormalize();
    LayerPerceptron(1);
    NN1 = Train("NN1", ReducedMatrix1, Threshold1, PAIRS);

    // NN-2: Thresholds from PCA-2
    LayerPerceptron(10);
    LayerNormalize();
    LayerPerceptron(1);
    NN2 = Train("NN2", ReducedMatrix2, Threshold2, PAIRS);

    // Meta Neural Network
    LayerPerceptron(5);
    LayerNormalize();
    LayerPerceptron(1);
    MetaNN = Train("MetaNN", [Threshold1, Threshold2], FinalSignals, PAIRS);
}

// Step 7: Execute trades
function executeTrades() {
    for (int i = 0; i < PAIRS; i++) {
        if (FinalSignals[i] > 0.5) {
            enterLong(CurrencyPairs[i]);
        } else if (FinalSignals[i] < -0.5) {
            enterShort(CurrencyPairs[i]);
        }
    }
}

// Main function
function run() {
    set(PLOTNOW);

    // Step 1: Calculate metrics
    calculateMetrics();

    // Step 2: Construct adjacency matrix
    constructAdjacencyMatrix();

    // Step 3: Propagate embeddings
    propagateEmbeddings();

    // Step 4: Update kernel matrices
    updateKernelMatrices();

    // Step 5: Perform Kernel PCA
    performKernelPCA(kernelMatrix1, ReducedMatrix1);
    performKernelPCA(kernelMatrix2, ReducedMatrix2);

    // Step 6: Train neural networks
    trainNeuralNetworks();

    // Step 7: Execute trades
    executeTrades();
}

Multi-GNN Recursive Kernel PCA [Re: TipmyPip] #488516
01/02/25 18:41
01/02/25 18:41
Joined: Sep 2017
Posts: 164
TipmyPip Online OP
Member
TipmyPip  Online OP
Member

Joined: Sep 2017
Posts: 164
Multi-GNN Recursive Kernel PCA Problem

You are tasked with designing a multi-layer trading strategy based on recursive Kernel PCA and interdependent 𝑁-Layer GNNs to analyze 𝑛=28 currency pairs. Each GNN layer specializes in capturing different graph dynamics (e.g., volatility, profitability, momentum), while layers interact nonlinearly and recursively. The outputs from each GNN layer influence:

The adjacency matrices for the other layers.
The kernel matrices for volatility (𝐾𝑉) and profitability (𝐾Π).
The node embeddings, which are combined and propagated across layers.
The challenge is to optimize the system by dynamically balancing these interactions while maximizing risk-adjusted profitability.

Enhanced Relationships Between 𝑁-Layer GNNs Interdependent GNN Layers
GNN Layer Types:

GNN1 : Models volatility relationships.
GNN2 : Models profitability dynamics.
GNN3 : Models momentum and correlation patterns.

Cross-Layer Interactions:

Outputs from GNN1 dynamically update edge weights in GNN2 :

A_2^{(t+1)}[i][j] = f(A_2^{(t)}[i][j], h_i^{1,(t)}, h_j^{1,(t)}),

where ℎ𝑖1(𝑡) is the embedding of node 𝑖 from GNN1.

Similarly, outputs from GNN2 influence GNN3 :

A_3^{(t+1)}[i][j] = g(A_3^{(t)}[i][j], h_i^{2,(t)}, h_j^{2,(t)}).

The final embeddings from GNN3 recursively update GNN1 :

A_1^{(t+1)}[i][j] = h(A_1^{(t)}[i][j], h_i^{3,(t)}, h_j^{3,(t)}).

Multi-Scale Aggregation:

Node embeddings are aggregated across all 𝑁 layers:

h_i^{final} = Σ_{l=1}^N W_l \cdot h_i^{l,(t)}.

Step-by-Step Mathematical Representation
Step 1: Node and Edge Features
Node Features (𝑥𝑖):

x_i = [\sigma_i(t), R_i(t), M_i(t), \rho_{i,j}(t)],

where:

𝜎𝑖(𝑡): Volatility.
𝑅𝑖(𝑡): Return.
𝑀𝑖(𝑡): Momentum.
𝜌𝑖,𝑗(𝑡): Correlation.
Edge Features (𝑒𝑖𝑗):

e_{ij} = [| \sigma_i(t) - \sigma_j(t) |, | R_i(t) - R_j(t) |, \rho_{i,j}(t)].

Adjacency Matrices:

For layer 𝑙:

A^{(l)}[i][j] = exp(-||x_i - x_j||^2 / (2 * \sigma^{(l)})).

Step 2: Multi-GNN Propagation Message Passing:

For each GNN layer 𝑙:

m_{ij}^{(l)} = A^{(l)}[i][j] * (W^{(l)} * h_j^{(l)}),

Node Update:

h_i^{(l+1)} = ReLU(Σ_{j ∈ N(i)} m_{ij}^{(l)} + b^{(l)}).

Cross-Layer Feedback:

Update edge weights in other layers:

A_{k}^{(t+1)}[i][j] = f(A_k^{(t)}[i][j], h_i^{l,(t)}, h_j^{l,(t)}),

where 𝑘≠𝑙.

Step 3: Kernel PCA
Kernel Updates:

Use embeddings ℎ𝑖𝑓𝑖𝑛𝑎𝑙 to update kernel matrices:

K_V[i][j] = exp(-||h_i^{final} - h_j^{final}||^2 / (2 * \sigma_V^2)),
K_\Pi[i][j] = exp(-||h_i^{final} - h_j^{final}||^2 / (2 * \sigma_\Pi^2)).

Eigenvalue Decomposition:

K = Σ_{k=1}^m λ_k * (v_k ⊗ v_k),

where 𝜆𝑘 and 𝑣𝑘 are eigenvalues and eigenvectors.

Step 4: Recursive Feedback

Recursive Edge Updates:

A^{(t+1)} = f(A^{(t)}, K_V^{(t)}, K_\Pi^{(t)}).

Recursive Kernel Updates:

K_V^{(t+1)} = g(A^{(t+1)}, K_\Pi^{(t)}),
K_\Pi^{(t+1)} = h(A^{(t+1)}, K_V^{(t)}).

Step 5: Neural Networks
Threshold Predictions:

Θ_V(t) = NN_1(R_{V,i}(t)),
Θ_\Pi(t) = NN_2(R_{\Pi,i}(t)).

Meta Neural Network:

Θ_{final}(t) = MetaNN(Θ_V(t), Θ_\Pi(t)).

Step 6: Trade Execution
Signal Calculation:

S_{i,j}(t) = R_{V,i}(t) / sqrt( Σ_{k=1}^n R_{V,k}(t)^2 ).

Execution Condition:

| S_{i,j}(t) | > Θ_{final}(t).

Constraints
Variance Explained:

Σ_{k=1}^m λ_k / Σ_{k=1}^n λ_k ≥ 0.90.

Dynamic Kernel Widths:

σ_V(t) = σ_V(0) * (1 + κ * StdDev(V(t))),
σ_\Pi(t) = σ_\Pi(0) * (1 + κ * StdDev(Π(t))).

Feedback Convergence:

max(|K_V^{(t+1)} - K_V^{(t)}|, |K_\Pi^{(t+1)} - K_\Pi^{(t)}|) < ε.

Objective
Maximize risk-adjusted profitability by:

Extracting dominant volatility and profitability components with Kernel PCA.
Propagating and aggregating embeddings across 𝑁-interdependent GNN layers.
Dynamically refining kernels and thresholds using recursive feedback.


Code
#define PAIRS 28

string CurrencyPairs[PAIRS] = {
    "EURUSD", "GBPUSD", "USDJPY", "GBPJPY", "USDCAD", "EURAUD", "EURJPY",
    "AUDCAD", "AUDJPY", "AUDNZD", "AUDUSD", "CADJPY", "EURCAD", "EURCHF",
    "EURGBP", "EURNZD", "GBPCAD", "GBPCHF", "NZDCAD", "NZDJPY", "NZDUSD",
    "USDCHF", "CHFJPY", "AUDCHF", "GBPNZD", "NZDCHF", "CADCHF", "GBPAUD"
};

vars volatilities[PAIRS];             // Volatility for each pair
vars adjacencyMatrices[PAIRS][PAIRS][28];  // Adjacency matrices for 28 GNNs
vars nodeEmbeddings[PAIRS][5][28];    // Node embeddings for 28 GNNs
vars updatedEmbeddings[PAIRS][5];     // Final node embeddings after aggregation
vars kernelMatrix[PAIRS][PAIRS];      // Kernel matrix for PCA
vars eigenvalues[PAIRS];              // Eigenvalues from PCA
vars eigenvectors[PAIRS][PAIRS];      // Eigenvectors from PCA
vars reducedMatrix[PAIRS][3];         // Reduced components from PCA
vars thresholds[PAIRS];               // Thresholds for trade execution
vars signals[PAIRS];                  // Final trading signals
int lookback = 50;
int NN;                               // Neural network handle for threshold prediction
int propagationDepth = 3;             // GNN propagation depth

// Step 1: Calculate volatilities for all pairs
function calculateVolatilities() {
    for (int i = 0; i < PAIRS; i++) {
        volatilities[i] = StdDev(series(price(CurrencyPairs[i])), lookback);
    }
}

// Step 2: Construct adjacency matrices for 28 GNNs
function constructAdjacencyMatrices() {
    for (int l = 0; l < 28; l++) { // For each GNN layer
        for (int i = 0; i < PAIRS; i++) {
            for (int j = 0; j < PAIRS; j++) {
                if (i != j) {
                    adjacencyMatrices[i][j][l] = exp(-pow(volatilities[i] - volatilities[j], 2) / (2 * 0.5^2)); // Gaussian kernel
                } else {
                    adjacencyMatrices[i][j][l] = 0; // No self-loops
                }
            }
        }
    }
}

// Step 3: Propagate embeddings for each GNN
function propagateEmbeddings() {
    for (int l = 0; l < 28; l++) { // For each GNN
        for (int t = 0; t < propagationDepth; t++) { // Propagation steps
            vars newEmbeddings[PAIRS][5];
            for (int i = 0; i < PAIRS; i++) {
                for (int k = 0; k < 5; k++) {
                    newEmbeddings[i][k] = 0;
                    for (int j = 0; j < PAIRS; j++) {
                        newEmbeddings[i][k] += adjacencyMatrices[i][j][l] * nodeEmbeddings[j][k][l];
                    }
                    newEmbeddings[i][k] = ReLU(newEmbeddings[i][k]);
                }
            }
            // Update embeddings
            for (int i = 0; i < PAIRS; i++) {
                for (int k = 0; k < 5; k++) {
                    nodeEmbeddings[i][k][l] = newEmbeddings[i][k];
                }
            }
        }
    }
}

// Step 4: Aggregate embeddings across all GNNs
function aggregateEmbeddings() {
    for (int i = 0; i < PAIRS; i++) {
        for (int k = 0; k < 5; k++) {
            updatedEmbeddings[i][k] = 0;
            for (int l = 0; l < 28; l++) {
                updatedEmbeddings[i][k] += nodeEmbeddings[i][k][l];
            }
            updatedEmbeddings[i][k] /= 28; // Average across GNNs
        }
    }
}

// Step 5: Update kernel matrix using aggregated embeddings
function updateKernelMatrix() {
    for (int i = 0; i < PAIRS; i++) {
        for (int j = 0; j < PAIRS; j++) {
            kernelMatrix[i][j] = exp(-dotProduct(updatedEmbeddings[i], updatedEmbeddings[j]) / (2 * 0.5^2)); // Gaussian kernel
        }
    }
}

// Step 6: Perform Kernel PCA
function performKernelPCA() {
    eigenDecomposition(kernelMatrix, eigenvalues, eigenvectors);
    for (int i = 0; i < PAIRS; i++) {
        for (int j = 0; j < 3; j++) { // Top 3 components
            reducedMatrix[i][j] = dotProduct(kernelMatrix[i], eigenvectors[j]);
        }
    }
}

// Step 7: Train neural network for threshold prediction
function trainNeuralNetwork() {
    LayerPerceptron(10);  // Hidden layer
    LayerNormalize();
    LayerPerceptron(1);   // Output layer
    NN = Train("ThresholdNN", reducedMatrix, thresholds, PAIRS);
}

// Step 8: Generate trade signals
function generateSignals() {
    for (int i = 0; i < PAIRS; i++) {
        signals[i] = Predict(NN, reducedMatrix[i]);
    }
}

// Step 9: Execute trades
function executeTrades() {
    for (int i = 0; i < PAIRS; i++) {
        if (signals[i] > 0.5) {
            enterLong(CurrencyPairs[i]);
        } else if (signals[i] < -0.5) {
            enterShort(CurrencyPairs[i]);
        }
    }
}

// Main function
function run() {
    set(PLOTNOW);

    // Step 1: Calculate volatilities
    calculateVolatilities();

    // Step 2: Construct adjacency matrices
    constructAdjacencyMatrices();

    // Step 3: Propagate embeddings for each GNN
    propagateEmbeddings();

    // Step 4: Aggregate embeddings
    aggregateEmbeddings();

    // Step 5: Update kernel matrix
    updateKernelMatrix();

    // Step 6: Perform Kernel PCA
    performKernelPCA();

    // Step 7: Train neural network for thresholds
    trainNeuralNetwork();

    // Step 8: Generate trade signals
    generateSignals();

    // Step 9: Execute trades
    executeTrades();
}

Volatility-Driven Graph Signal Strategy [Re: TipmyPip] #488525
01/07/25 11:17
01/07/25 11:17
Joined: Sep 2017
Posts: 164
TipmyPip Online OP
Member
TipmyPip  Online OP
Member

Joined: Sep 2017
Posts: 164
Problem Description
1. Input Data Each currency pair 𝐶𝑖 for 𝑖=1,2,...,28 has:

Volatility 𝑣𝑖 computed as:
v_i = StdDev(p_i(t), lookback)

where:

𝑝𝑖(𝑡) : Price series of pair 𝐶𝑖, lookback: Window size for the standard deviation.
Adjacency Matrix 𝐴 to model relationships:

A_ij = exp(-((v_i - v_j)^2) / (2 * sigma^2)) for i != j
A_ii = 0

where:

𝑠𝑖𝑔𝑚𝑎: Kernel bandwidth parameter.
2. GNN Propagation Each node embedding 𝐻𝑖(𝑙) at layer 𝑙 is updated using:

H_i^(l+1) = ReLU(Sum_{j=1}^28 A_ij * H_j^(l) * W^(l))

where:

𝐴𝑖𝑗 : Adjacency matrix element,
𝑊(𝑙) : Trainable weight matrix for layer
𝑙,𝑅𝑒𝐿𝑈(𝑥) : Max(0, x).
3. Dimensionality Reduction After 𝐿 GNN layers, embeddings 𝐻𝑖 are reduced in dimensionality:

Kernel PCA Compute the kernel matrix:

K_ij = exp(-(||H_i - H_j||^2) / (2 * sigma^2))

Perform eigenvalue decomposition:

K = V * Lambda * V^T
Project data onto top-k eigenvectors:

Z_i = Sum_{j=1}^k Lambda_j * V_j

Autoencoder PCA Encoder:

Z_i = H_i * W_enc

Decoder:

H_i_hat = Z_i * W_dec

Minimize reconstruction loss:

L = Sum_{i=1}^28 ||H_i - H_i_hat||^2

4. Trading Signals
Using the reduced features 𝑍𝑖, generate trading signals 𝑠𝑖 :

s_i = 1 if Z_i1 > mu_Z + k * sigma_Z
-1 if Z_i1 < mu_Z - k * sigma_Z
0 otherwise

where:

𝑚𝑢𝑍 : Mean of 𝑍𝑖1,
𝑠𝑖𝑔𝑚𝑎𝑍 : StdDev of 𝑍𝑖1,
𝑘 : Threshold multiplier.

Puzzle Questions
Graph Construction:

How does the kernel bandwidth 𝑠𝑖𝑔𝑚𝑎 affect the sparsity of 𝐴𝑖𝑗?

A_ij -> sparse as |v_i - v_j| increases relative to sigma.

Dimensionality Reduction:

What happens if the eigenvalues 𝐿𝑎𝑚𝑏𝑑 𝑎𝑗 decay too quickly in Kernel PCA?

Z_i -> dominated by 1st few components, losing information.

Signal Sensitivity:

How does 𝑘 affect the number of Buy (𝑠𝑖=1) or Sell (𝑠𝑖=−1) trades?

Large k -> fewer trades, high confidence.
Small k -> more trades, lower confidence.

Performance: Evaluate trading performance using:

Sharpe_Ratio = Mean(Returns) / StdDev(Returns)

Code
#define PAIRS 28
#define CHUNK_SIZE 5
#define LATENT_DIM 3

// Currency pairs
string CurrencyPairs[PAIRS] = {
    "EURUSD", "GBPUSD", "USDJPY", "GBPJPY", "USDCAD", "EURAUD", "EURJPY",
    "AUDCAD", "AUDJPY", "AUDNZD", "AUDUSD", "CADJPY", "EURCAD", "EURCHF",
    "EURGBP", "EURNZD", "GBPCAD", "GBPCHF", "NZDCAD", "NZDJPY", "NZDUSD",
    "USDCHF", "CHFJPY", "AUDCHF", "GBPNZD", "NZDCHF", "CADCHF", "GBPAUD"
};

// Variables
vars volatilities[PAIRS];
vars adjacencyMatrices[PAIRS][PAIRS];
vars nodeEmbeddings[PAIRS][5];
vars kernelMatrix[PAIRS][PAIRS];
vars eigenvalues[PAIRS];
vars eigenvectors[PAIRS][PAIRS];
vars reducedMatrix[PAIRS][3];
vars cumulativeCovariance[5][5];
vars encoderWeights[5][LATENT_DIM];
vars decoderWeights[LATENT_DIM][5];
vars signals[PAIRS];
double kernelSigma = 0.5;
double thresholdMultiplier = 1.0;
double learningRate = 0.01;
int lookback = 50;
int totalSamples = 0;

// Step 1: Calculate volatilities
function calculateVolatilities() {
    for (int i = 0; i < PAIRS; i++) {
        volatilities[i] = StdDev(series(price(CurrencyPairs[i])), lookback);
    }
}

// Step 2: Construct adjacency matrix for GNN
function constructAdjacencyMatrix() {
    for (int i = 0; i < PAIRS; i++) {
        for (int j = 0; j < PAIRS; j++) {
            if (i != j) {
                adjacencyMatrices[i][j] = exp(-pow(volatilities[i] - volatilities[j], 2) / (2 * pow(kernelSigma, 2)));
            } else {
                adjacencyMatrices[i][j] = 0;
            }
        }
    }
}

// Step 3: Propagate embeddings for GNN
function propagateEmbeddings() {
    vars newEmbeddings[PAIRS][5];
    for (int i = 0; i < PAIRS; i++) {
        for (int k = 0; k < 5; k++) {
            newEmbeddings[i][k] = 0;
            for (int j = 0; j < PAIRS; j++) {
                newEmbeddings[i][k] += adjacencyMatrices[i][j] * nodeEmbeddings[j][k];
            }
        }
    }
    for (int i = 0; i < PAIRS; i++) {
        for (int k = 0; k < 5; k++) {
            nodeEmbeddings[i][k] = ReLU(newEmbeddings[i][k]);
        }
    }
}

// Step 4: Incremental PCA
function updateIncrementalPCA(vars embeddings[PAIRS][5]) {
    for (int i = 0; i < PAIRS; i += CHUNK_SIZE) {
        for (int a = 0; a < 5; a++) {
            for (int b = 0; b < 5; b++) {
                cumulativeCovariance[a][b] = 0;
                for (int j = i; j < i + CHUNK_SIZE && j < PAIRS; j++) {
                    cumulativeCovariance[a][b] += embeddings[j][a] * embeddings[j][b];
                }
            }
        }
        totalSamples += CHUNK_SIZE;
    }
    for (int a = 0; a < 5; a++) {
        for (int b = 0; b < 5; b++) {
            cumulativeCovariance[a][b] /= totalSamples;
        }
    }
}

// Step 5: Autoencoder PCA
function initializeAutoencoder() {
    for (int i = 0; i < 5; i++) {
        for (int j = 0; j < LATENT_DIM; j++) {
            encoderWeights[i][j] = random() * 0.1;
            decoderWeights[j][i] = random() * 0.1;
        }
    }
}

function forwardPass(vars input[5], vars latent[LATENT_DIM], vars output[5]) {
    for (int j = 0; j < LATENT_DIM; j++) {
        latent[j] = 0;
        for (int i = 0; i < 5; i++) {
            latent[j] += input[i] * encoderWeights[i][j];
        }
    }
    for (int i = 0; i < 5; i++) {
        output[i] = 0;
        for (int j = 0; j < LATENT_DIM; j++) {
            output[i] += latent[j] * decoderWeights[j][i];
        }
    }
}

function backpropagate(vars input[5], vars output[5], vars latent[LATENT_DIM]) {
    for (int i = 0; i < 5; i++) {
        for (int j = 0; j < LATENT_DIM; j++) {
            double gradEncoder = (input[i] - output[i]) * decoderWeights[j][i];
            double gradDecoder = (input[i] - output[i]) * latent[j];
            encoderWeights[i][j] += learningRate * gradEncoder;
            decoderWeights[j][i] += learningRate * gradDecoder;
        }
    }
}

function trainAutoencoder(vars embeddings[PAIRS][5]) {
    for (int epoch = 0; epoch < 50; epoch++) {
        for (int i = 0; i < PAIRS; i++) {
            vars latent[LATENT_DIM];
            vars output[5];
            forwardPass(embeddings[i], latent, output);
            backpropagate(embeddings[i], output, latent);
        }
    }
}

// Step 6: Generate trading signals
function generateSignals() {
    for (int i = 0; i < PAIRS; i++) {
        double mean = 0, stddev = 0;
        for (int j = 0; j < 3; j++) {
            mean += reducedMatrix[i][j];
            stddev += pow(reducedMatrix[i][j] - mean, 2);
        }
        stddev = sqrt(stddev / 3);
        double threshold = mean + thresholdMultiplier * stddev;
        if (reducedMatrix[i][0] > threshold) signals[i] = 1;
        else if (reducedMatrix[i][0] < -threshold) signals[i] = -1;
        else signals[i] = 0;
    }
}

// Step 7: Execute trades
function executeTrades() {
    for (int i = 0; i < PAIRS; i++) {
        if (signals[i] > 0) enterLong(CurrencyPairs[i]);
        else if (signals[i] < 0) enterShort(CurrencyPairs[i]);
    }
}

// Main function
function run() {
    set(PLOTNOW);
    calculateVolatilities();
    constructAdjacencyMatrix();
    propagateEmbeddings();
    updateIncrementalPCA(nodeEmbeddings);
    trainAutoencoder(nodeEmbeddings);
    generateSignals();
    executeTrades();
}

Last edited by TipmyPip; 01/07/25 11:18.
Volatility Graph Dynamics [Re: TipmyPip] #488526
01/07/25 12:10
01/07/25 12:10
Joined: Sep 2017
Posts: 164
TipmyPip Online OP
Member
TipmyPip  Online OP
Member

Joined: Sep 2017
Posts: 164
You are tasked with designing an advanced algorithmic trading strategy that integrates Kernel PCA and Graph Neural Networks (GNNs) to uncover hidden trading opportunities in a highly interconnected financial system of 𝑁=28 currency pairs. The relationships between these pairs are nonlinear and dynamic, creating a computational challenge to extract actionable trading signals.

The Problem
1. Volatility Web Each currency pair 𝐶𝑖 has a volatility 𝑉𝑖(𝑡) at time 𝑡, evolving as:

Vᵢ(t) = αᵢ ⋅ sin((π / 2) ⋅ Vⱼ(t-1)) + βᵢ ⋅ cos((π / 3) ⋅ Vₖ(t-2)) + γᵢ

Where:

𝑗≠𝑘≠𝑖 and 𝑗,𝑘∈[1,28] j,k∈[1,28],𝛼𝑖,𝛽𝑖,𝛾𝑖 : Randomly initialized coefficients influenced by historical price movements,
𝑡: Discrete time steps.

2. Kernel Transformation
The relationships between currency pairs are represented using a Gaussian kernel matrix
𝐾(𝑡), defined as:

Kᵢⱼ(t) = exp(-((Vᵢ(t) - Vⱼ(t))²) / (2 ⋅ σ²))

Where:

𝐾𝑖𝑗(𝑡): Similarity between volatilities
𝑉𝑖 and 𝑉𝑗, 𝜎: Kernel bandwidth, dynamically adjusted based on volatility clustering:

σ(t) = σ₀ ⋅ (1 + κ ⋅ StdDev(V(t)))

3. Enhanced PCA Extract principal components from 𝐾(𝑡) using Kernel PCA:

Eigenvalue Decomposition:

K(t) = V(t) ⋅ Λ(t) ⋅ V(t)^T

Where:

Λ(𝑡): Diagonal matrix of eigenvalues
𝜆𝑘(𝑡),𝑉(𝑡): Matrix of eigenvectors 𝑣𝑘(𝑡).

Projection: Compute the PCA-reduced features 𝑍𝑖(𝑡) for each currency pair:

Zᵢ(t) = [⟨Kᵢ(t), v₁⟩, ⟨Kᵢ(t), v₂⟩, ⟨Kᵢ(t), v₃⟩]

Where
𝑍𝑖(𝑡)∈R3 are the top-3 principal components.

4. Graph Construction
Construct 28 graphs, one for each GNN, using PCA-reduced features
𝑍𝑖(𝑡) :

Adjacency Matrices:

Aᵢⱼⁿ(t) = exp(-||Zᵢ(t) - Zⱼ(t)||² / (2 ⋅ σ²))

Where
𝐴𝑖𝑗𝑛(𝑡) is the weight of the edge between
𝐶𝑖 and 𝐶𝑗 in the 𝑛𝑡ℎ graph.

Normalization: Normalize 𝐴𝑖𝑗𝑛(𝑡) row-wise:

Aᵢⱼⁿ(t) = Aᵢⱼⁿ(t) / ∑ⱼ Aᵢⱼⁿ(t)

5. GNN Refinement Refine
𝑍𝑖(𝑡) using a 2-layer Graph Neural Network:

Initialization:

Hᵢⁿ(0)(t) = Zᵢ(t)

Propagation: Update features at each layer 𝑙:

Hᵢⁿ(l+1)(t) = ReLU(∑ⱼ Aᵢⱼⁿ(t) ⋅ Hⱼⁿ(l)(t) ⋅ Wⁿ(l))

Where:

𝑊𝑛(𝑙): Trainable weight matrix for the 𝑛𝑡ℎ GNN.
Output: After 𝐿=2 layers, obtain the refined features:

Hᵢⁿ(L)(t)

6. Signal Generation

Generate trading signals using the first refined feature
𝐻 𝑖𝑛 (𝐿)(𝑡)[0]: Dynamic Threshold: Calculate a dynamic threshold Θ(𝑡):

Θ(t) = 𝔼[Hᵢⁿ(L)(t)[0]] + δ ⋅ StdDev(Hᵢⁿ(L)(t)[0])

Signals: Assign signals 𝑆𝑖𝑛(𝑡) for each GNN 𝑛:

Sᵢⁿ(t) ={ +1 if Hᵢⁿ(L)(t)[0] > Θ(t), -1 if Hᵢⁿ(L)(t)[0] < -Θ(t), 0 otherwise }

Final Signal Aggregation: Combine signals across all 28 GNNs:

Sᵢ(t) = Median(Sᵢⁿ(t) ∀ n ∈ [1,28])

Optimization Tasks
Parameter Optimization: Tune 𝜎0,𝜅,𝛿,𝑊𝑛(𝑙)σ 0​ ,κ,δ,W n (l) to maximize profitability.

Noise Robustness: Account for noise 𝜂𝑘(𝑡) in eigenvalues:

λₖ'(t) = λₖ(t) ⋅ (1 + ηₖ(t))

Where 𝜂𝑘(𝑡)∼𝒩(0,𝜆𝑘(𝑡)⋅𝜈)η k​ (t)∼N(0,λ k​ (t)⋅ν), and 𝜈 is a noise coefficient.

Sharpe Ratio: Maximize the Sharpe ratio:

Sharpe = Mean(Returns) / StdDev(Returns)

Puzzle Objective

Simulate 𝑉𝑖(𝑡) over 1000 ticks for all 28 currency pairs.
Construct 28 GNNs using PCA-reduced features and adjacency matrices.
Refine features through GNN propagation.
Generate trading signals based on dynamic thresholds.
Maximize cumulative returns with a Sharpe ratio above 2.0.


Code
#define PAIRS 28
#define COMPONENTS 3 // Number of principal components from PCA
#define GNN_LAYERS 2 // Number of GNN layers per GNN

string CurrencyPairs[PAIRS] = {
    "EURUSD", "GBPUSD", "USDJPY", "GBPJPY", "USDCAD", "EURAUD", "EURJPY",
    "AUDCAD", "AUDJPY", "AUDNZD", "AUDUSD", "CADJPY", "EURCAD", "EURCHF",
    "EURGBP", "EURNZD", "GBPCAD", "GBPCHF", "NZDCAD", "NZDJPY", "NZDUSD",
    "USDCHF", "CHFJPY", "AUDCHF", "GBPNZD", "NZDCHF", "CADCHF", "GBPAUD"
};

// Variables
vars volatilities[PAIRS];             // Volatility for each pair
vars adjacencyMatrices[PAIRS][PAIRS][28];  // Adjacency matrices for 28 GNNs
vars pcaReducedFeatures[PAIRS][COMPONENTS]; // PCA-reduced features
vars gnnWeights[28][GNN_LAYERS][COMPONENTS][COMPONENTS]; // GNN weights for each GNN
vars gnnRefinedFeatures[PAIRS][COMPONENTS]; // Refined features from GNNs
vars kernelMatrix[PAIRS][PAIRS];      // Kernel matrix for PCA
vars eigenvalues[PAIRS];              // Eigenvalues from PCA
vars eigenvectors[PAIRS][PAIRS];      // Eigenvectors from PCA
vars reducedMatrix[PAIRS][COMPONENTS]; // Final reduced components
vars signals[PAIRS];                  // Final trading signals
int lookback = 50;
int propagationDepth = 3;             // GNN propagation depth

// Step 1: Calculate volatilities for all pairs
function calculateVolatilities() {
    for (int i = 0; i < PAIRS; i++) {
        volatilities[i] = StdDev(series(price(CurrencyPairs[i])), lookback);
    }
}

// Step 2: Construct adjacency matrices for each GNN
function constructAdjacencyMatrices(vars features[PAIRS][COMPONENTS]) {
    for (int gnn = 0; gnn < 28; gnn++) { // Each GNN has its own adjacency matrix
        for (int i = 0; i < PAIRS; i++) {
            for (int j = 0; j < PAIRS; j++) {
                double distance = 0;
                for (int k = 0; k < COMPONENTS; k++) {
                    distance += pow(features[i][k] - features[j][k], 2);
                }
                adjacencyMatrices[i][j][gnn] = exp(-distance / (2 * pow(0.5, 2))); // Gaussian kernel
            }
        }
    }
}

// Step 3: Initialize GNN weights
function initializeGNNWeights() {
    for (int gnn = 0; gnn < 28; gnn++) {
        for (int l = 0; l < GNN_LAYERS; l++) {
            for (int i = 0; i < COMPONENTS; i++) {
                for (int j = 0; j < COMPONENTS; j++) {
                    gnnWeights[gnn][l][i][j] = random() * 0.1; // Small random initialization
                }
            }
        }
    }
}

// Step 4: Propagate features through each GNN
function propagateGNN(vars features[PAIRS][COMPONENTS]) {
    vars tempFeatures[PAIRS][COMPONENTS];
    for (int gnn = 0; gnn < 28; gnn++) { // Process each GNN
        for (int t = 0; t < propagationDepth; t++) { // Propagation steps
            for (int i = 0; i < PAIRS; i++) {
                for (int k = 0; k < COMPONENTS; k++) {
                    tempFeatures[i][k] = 0;
                    for (int j = 0; j < PAIRS; j++) {
                        for (int m = 0; m < COMPONENTS; m++) {
                            tempFeatures[i][k] += adjacencyMatrices[i][j][gnn] * features[j][m] * gnnWeights[gnn][t][m][k];
                        }
                    }
                    tempFeatures[i][k] = max(0, tempFeatures[i][k]); // ReLU activation
                }
            }
            // Update features for the next layer
            for (int i = 0; i < PAIRS; i++) {
                for (int k = 0; k < COMPONENTS; k++) {
                    features[i][k] = tempFeatures[i][k];
                }
            }
        }
    }
    // Copy the refined features
    for (int i = 0; i < PAIRS; i++) {
        for (int k = 0; k < COMPONENTS; k++) {
            gnnRefinedFeatures[i][k] = features[i][k];
        }
    }
}

// Step 5: Perform Kernel PCA
function performKernelPCA() {
    eigenDecomposition(kernelMatrix, eigenvalues, eigenvectors);
    for (int i = 0; i < PAIRS; i++) {
        for (int j = 0; j < COMPONENTS; j++) { // Use top COMPONENTS
            reducedMatrix[i][j] = dotProduct(kernelMatrix[i], eigenvectors[j]);
        }
    }
}

// Step 6: Generate trading signals
function generateSignals() {
    double mean = 0, stddev = 0;
    for (int i = 0; i < PAIRS; i++) {
        mean += reducedMatrix[i][0];
    }
    mean /= PAIRS;
    for (int i = 0; i < PAIRS; i++) {
        stddev += pow(reducedMatrix[i][0] - mean, 2);
    }
    stddev = sqrt(stddev / PAIRS);
    double threshold = mean + 1.0 * stddev;

    for (int i = 0; i < PAIRS; i++) {
        if (reducedMatrix[i][0] > threshold) signals[i] = 1; // Buy
        else if (reducedMatrix[i][0] < -threshold) signals[i] = -1; // Sell
        else signals[i] = 0; // Hold
    }
}

// Main function
function run() {
    set(PLOTNOW);

    // Step 1: Calculate volatilities
    calculateVolatilities();

    // Step 2: Perform Kernel PCA to reduce features
    performKernelPCA();

    // Step 3: Construct adjacency matrices for GNNs
    constructAdjacencyMatrices(reducedMatrix);

    // Step 4: Initialize GNN weights
    initializeGNNWeights();

    // Step 5: Apply GNN propagation
    propagateGNN(reducedMatrix);

    // Step 6: Generate trading signals
    generateSignals();

    // Step 7: Execute trades
    for (int i = 0; i < PAIRS; i++) {
        if (signals[i] > 0) enterLong(CurrencyPairs[i]);
        else if (signals[i] < 0) enterShort(CurrencyPairs[i]);
    }
}

Synergistic Graph-PCA Arbitrage (SGPA) [Re: TipmyPip] #488537
01/09/25 17:57
01/09/25 17:57
Joined: Sep 2017
Posts: 164
TipmyPip Online OP
Member
TipmyPip  Online OP
Member

Joined: Sep 2017
Posts: 164
In the realm of market energies, 28 currency pairs, each represented as a celestial entity 𝐶1,𝐶2,…,𝐶28, dance to the rhythms of volatility and interconnectedness. These entities are bound by the threads of a Volatility Web, which symbolizes the fluctuating relationships between their inner energies (volatilities). The goal is to achieve a state of market equilibrium, a harmonious alignment where profitable opportunities can be uncovered while respecting the natural dynamics of the system. ( Attached problem description file.)


Code
#define PAIRS 28
#define COMPONENTS 3  // Number of PCA components
#define GNN_LAYERS 2  // Number of GNN layers
#define ACTIONS 3     // Buy, Sell, Hold

// Variables
string CurrencyPairs[PAIRS] = {
    "EURUSD", "GBPUSD", "USDJPY", "GBPJPY", "USDCAD", "EURAUD", "EURJPY",
    "AUDCAD", "AUDJPY", "AUDNZD", "AUDUSD", "CADJPY", "EURCAD", "EURCHF",
    "EURGBP", "EURNZD", "GBPCAD", "GBPCHF", "NZDCAD", "NZDJPY", "NZDUSD",
    "USDCHF", "CHFJPY", "AUDCHF", "GBPNZD", "NZDCHF", "CADCHF", "GBPAUD"
};

// Enhanced PCA, GNN, and Signal Variables
vars kernelMatrix[PAIRS][PAIRS];            // Kernel matrix for PCA
vars pcaReducedFeatures[PAIRS][COMPONENTS]; // PCA-reduced features for each pair
vars adjacencyMatrices[PAIRS][PAIRS];       // Adjacency matrices for GNNs
vars gnnWeights[GNN_LAYERS][COMPONENTS][COMPONENTS]; // GNN weights
vars gnnOutputs[PAIRS][ACTIONS];            // GNN probabilities for Buy/Sell/Hold
vars similarityMatrix[PAIRS][PAIRS];        // Cross-Entropy similarity matrix
vars refinedOutputs[PAIRS][ACTIONS];        // Refined GNN probabilities
vars signals[PAIRS];                        // Final trading signals

// Step 1: Perform Kernel PCA
function performKernelPCA() {
    eigenDecomposition(kernelMatrix, eigenvalues, eigenvectors);
    for (int i = 0; i < PAIRS; i++) {
        for (int j = 0; j < COMPONENTS; j++) { // Use top COMPONENTS
            pcaReducedFeatures[i][j] = dotProduct(kernelMatrix[i], eigenvectors[j]);
        }
    }
}

// Step 2: Initialize GNN Weights
function initializeGNNWeights() {
    for (int l = 0; l < GNN_LAYERS; l++) {
        for (int i = 0; i < COMPONENTS; i++) {
            for (int j = 0; j < COMPONENTS; j++) {
                gnnWeights[l][i][j] = random() * 0.1; // Small random initialization
            }
        }
    }
}

// Step 3: GNN Propagation
function propagateGNN() {
    vars tempFeatures[PAIRS][COMPONENTS];
    for (int l = 0; l < GNN_LAYERS; l++) { // GNN propagation layers
        for (int i = 0; i < PAIRS; i++) {
            for (int k = 0; k < COMPONENTS; k++) {
                tempFeatures[i][k] = 0;
                for (int j = 0; j < PAIRS; j++) {
                    for (int m = 0; m < COMPONENTS; m++) {
                        tempFeatures[i][k] += adjacencyMatrices[i][j] * pcaReducedFeatures[j][m] * gnnWeights[l][m][k];
                    }
                }
                tempFeatures[i][k] = max(0, tempFeatures[i][k]); // ReLU activation
            }
        }
        // Update PCA features for the next layer
        for (int i = 0; i < PAIRS; i++) {
            for (int k = 0; k < COMPONENTS; k++) {
                pcaReducedFeatures[i][k] = tempFeatures[i][k];
            }
        }
    }
    // Generate probabilities (Buy/Sell/Hold) based on final GNN outputs
    for (int i = 0; i < PAIRS; i++) {
        for (int k = 0; k < ACTIONS; k++) {
            gnnOutputs[i][k] = random() * 0.1; // Placeholder for GNN probability outputs
        }
    }
}

// Step 4: Compute Cross-Entropy Similarity
function computeCrossEntropySimilarity() {
    for (int i = 0; i < PAIRS; i++) {
        for (int j = 0; j < PAIRS; j++) {
            similarityMatrix[i][j] = 0;
            for (int k = 0; k < ACTIONS; k++) {
                similarityMatrix[i][j] -= gnnOutputs[i][k] * log(gnnOutputs[j][k] + 1e-8);
            }
        }
    }
}

// Step 5: Refine GNN Outputs Using Similarity
function refineGNNOutputs() {
    for (int i = 0; i < PAIRS; i++) {
        for (int k = 0; k < ACTIONS; k++) {
            refinedOutputs[i][k] = 0;
            double weightSum = 0;
            for (int j = 0; j < PAIRS; j++) {
                refinedOutputs[i][k] += similarityMatrix[i][j] * gnnOutputs[j][k];
                weightSum += similarityMatrix[i][j];
            }
            refinedOutputs[i][k] /= (weightSum + 1e-8); // Normalize
        }
    }
}

// Step 6: Generate Trading Signals
function generateSignals() {
    for (int i = 0; i < PAIRS; i++) {
        signals[i] = refinedOutputs[i][0] - refinedOutputs[i][1]; // Buy-Sell difference
    }
}

// Step 7: Execute Trades
function executeTrades() {
    for (int i = 0; i < PAIRS; i++) {
        if (signals[i] > 0) enterLong(CurrencyPairs[i]);
        else if (signals[i] < 0) enterShort(CurrencyPairs[i]);
    }
}

// Main Function
function run() {
    set(PLOTNOW);

    // Step 1: Perform Kernel PCA
    performKernelPCA();

    // Step 2: Initialize GNN weights
    initializeGNNWeights();

    // Step 3: Propagate GNN
    propagateGNN();

    // Step 4: Compute Cross-Entropy Similarity
    computeCrossEntropySimilarity();

    // Step 5: Refine GNN outputs
    refineGNNOutputs();

    // Step 6: Generate trading signals
    generateSignals();

    // Step 7: Execute trades
    executeTrades();
}

Attached Files
Last edited by TipmyPip; 01/09/25 18:09.
Decoding the Dynamics of Volatility Interdependencies [Re: TipmyPip] #488538
01/09/25 18:20
01/09/25 18:20
Joined: Sep 2017
Posts: 164
TipmyPip Online OP
Member
TipmyPip  Online OP
Member

Joined: Sep 2017
Posts: 164
A Game-Theoretic and Abstract Perspective: The Dance of Volatility

In the ever-unfolding theater of financial markets, imagine a dynamic stage where 28 players—currency pairs—engage in an intricate and ceaseless dance. Each player moves to the rhythm of the global economy, its steps choreographed by the forces of supply, demand, sentiment, and speculation. Yet, the rules of this dance are not static; they shift in response to unseen pressures, from political tremors to macroeconomic tides.

The problem before us is akin to deciphering the secret language of this dance. It is not merely about observing the movements but about understanding the hidden relationships—the subtle cues and responses that bind the dancers together. Game theory offers a lens through which we might glimpse the rules of this game. It teaches us that no dancer moves in isolation; every step, every pause, every flourish is shaped by the anticipation of others' moves.

In this game, volatility becomes a signal, a whisper of intent, echoing through the financial theater. Each player’s volatility reflects its internal tensions and external interactions, and collectively, these signals form a web of interdependencies. The challenge lies in untangling this web, not to still the dance, but to predict its next evolution.

The problem is also an abstraction of mathematical elegance. Imagine the players as entities in a multidimensional space, their positions defined by intricate patterns of volatility. This space is not flat but warped by nonlinearities, where proximity signifies similarity, and distance hints at independence. Yet, even the closest dancers may diverge when the music shifts.

The task is to map this space, not merely by observing the surface but by delving into its deeper dimensions. Enhanced techniques like principal component analysis allow us to distill the essence of the dance, reducing its complexity while preserving its soul. Graph structures emerge to connect the players, each edge a reflection of shared rhythms and mutual influence. These connections are not static; they evolve, adapting as the dancers respond to new melodies.

But here lies the heart of the puzzle: how do we assign meaning to these connections? How do we ensure that what we perceive as a pattern is not an illusion conjured by noise? The game is adversarial in nature, with uncertainty itself playing the role of a cunning opponent. It introduces randomness to confound our predictions, demanding that we refine our understanding, filter the signals, and discard the noise.

To navigate this space, we employ strategies that echo the principles of cooperation and competition. Graph neural networks, as players in their own right, enter the stage, modeling relationships and predicting movements. Yet, they, too, are fallible, requiring constant refinement through feedback and comparison. Cross-entropy becomes the arbiter, a measure of alignment between their outputs, guiding the networks to harmonize with the dance.

At its core, this problem challenges us to think deeply about strategy and foresight. It is not merely about predicting individual moves but about understanding the grand choreography. What happens when the music changes? When the connections between players weaken or intensify? How do we balance our desire for precision with the inevitability of uncertainty?

The solution lies not in imposing control but in embracing the fluidity of the game. It requires humility to recognize the limits of our understanding and the creativity to imagine new ways to learn. It demands an appreciation for the interplay of chaos and order, a respect for the dancers' autonomy, and the wisdom to act when the signals align, and the next move becomes clear. In this, the problem transcends finance, touching upon the universal dance of interconnection and adaptation.


Code
#define PAIRS 28
#define COMPONENTS 3  // Number of PCA components
#define GNN_LAYERS 2  // Number of GNN layers
#define ACTIONS 3     // Buy, Sell, Hold

// Variables
string CurrencyPairs[PAIRS] = {
    "EURUSD", "GBPUSD", "USDJPY", "GBPJPY", "USDCAD", "EURAUD", "EURJPY",
    "AUDCAD", "AUDJPY", "AUDNZD", "AUDUSD", "CADJPY", "EURCAD", "EURCHF",
    "EURGBP", "EURNZD", "GBPCAD", "GBPCHF", "NZDCAD", "NZDJPY", "NZDUSD",
    "USDCHF", "CHFJPY", "AUDCHF", "GBPNZD", "NZDCHF", "CADCHF", "GBPAUD"
};

// Kernel PCA, GNN, and Signal Variables
vars kernelMatrix[PAIRS][PAIRS];            // Kernel matrix for PCA
vars pcaReducedFeatures[PAIRS][COMPONENTS]; // PCA-reduced features for each pair
vars adjacencyMatrices[PAIRS][PAIRS];       // Adjacency matrices for GNNs
vars gnnWeights[GNN_LAYERS][COMPONENTS][COMPONENTS]; // GNN weights
vars gnnOutputs[PAIRS][ACTIONS];            // GNN probabilities for Buy/Sell/Hold
vars similarityMatrix[PAIRS][PAIRS];        // Cross-Entropy similarity matrix
vars refinedOutputs[PAIRS][ACTIONS];        // Refined GNN probabilities
vars signals[PAIRS];                        // Final trading signals

// Step 1: Perform Kernel PCA
function performKernelPCA() {
    eigenDecomposition(kernelMatrix, eigenvalues, eigenvectors);
    for (int i = 0; i < PAIRS; i++) {
        for (int j = 0; j < COMPONENTS; j++) { // Use top COMPONENTS
            pcaReducedFeatures[i][j] = dotProduct(kernelMatrix[i], eigenvectors[j]);
        }
    }
}

// Step 2: Initialize GNN Weights
function initializeGNNWeights() {
    for (int l = 0; l < GNN_LAYERS; l++) {
        for (int i = 0; i < COMPONENTS; i++) {
            for (int j = 0; j < COMPONENTS; j++) {
                gnnWeights[l][i][j] = random() * 0.1; // Small random initialization
            }
        }
    }
}

// Step 3: GNN Propagation
function propagateGNN() {
    vars tempFeatures[PAIRS][COMPONENTS];
    for (int l = 0; l < GNN_LAYERS; l++) { // GNN propagation layers
        for (int i = 0; i < PAIRS; i++) {
            for (int k = 0; k < COMPONENTS; k++) {
                tempFeatures[i][k] = 0;
                for (int j = 0; j < PAIRS; j++) {
                    for (int m = 0; m < COMPONENTS; m++) {
                        tempFeatures[i][k] += adjacencyMatrices[i][j] * pcaReducedFeatures[j][m] * gnnWeights[l][m][k];
                    }
                }
                tempFeatures[i][k] = max(0, tempFeatures[i][k]); // ReLU activation
            }
        }
        // Update PCA features for the next layer
        for (int i = 0; i < PAIRS; i++) {
            for (int k = 0; k < COMPONENTS; k++) {
                pcaReducedFeatures[i][k] = tempFeatures[i][k];
            }
        }
    }
    // Generate probabilities (Buy/Sell/Hold) based on final GNN outputs
    for (int i = 0; i < PAIRS; i++) {
        for (int k = 0; k < ACTIONS; k++) {
            gnnOutputs[i][k] = random() * 0.1; // Placeholder for GNN probability outputs
        }
    }
}

// Step 4: Compute Cross-Entropy Similarity
function computeCrossEntropySimilarity() {
    for (int i = 0; i < PAIRS; i++) {
        for (int j = 0; j < PAIRS; j++) {
            similarityMatrix[i][j] = 0;
            for (int k = 0; k < ACTIONS; k++) {
                similarityMatrix[i][j] -= gnnOutputs[i][k] * log(gnnOutputs[j][k] + 1e-8);
            }
        }
    }
}

// Step 5: Refine GNN Outputs Using Similarity
function refineGNNOutputs() {
    for (int i = 0; i < PAIRS; i++) {
        for (int k = 0; k < ACTIONS; k++) {
            refinedOutputs[i][k] = 0;
            double weightSum = 0;
            for (int j = 0; j < PAIRS; j++) {
                refinedOutputs[i][k] += similarityMatrix[i][j] * gnnOutputs[j][k];
                weightSum += similarityMatrix[i][j];
            }
            refinedOutputs[i][k] /= (weightSum + 1e-8); // Normalize
        }
    }
}

// Step 6: Generate Trading Signals
function generateSignals() {
    for (int i = 0; i < PAIRS; i++) {
        signals[i] = refinedOutputs[i][0] - refinedOutputs[i][1]; // Buy-Sell difference
    }
}

// Step 7: Execute Trades
function executeTrades() {
    for (int i = 0; i < PAIRS; i++) {
        if (signals[i] > 0) enterLong(CurrencyPairs[i]);
        else if (signals[i] < 0) enterShort(CurrencyPairs[i]);
    }
}

// Main Function
function run() {
    set(PLOTNOW);

    // Step 1: Perform Kernel PCA
    performKernelPCA();

    // Step 2: Initialize GNN weights
    initializeGNNWeights();

    // Step 3: Propagate GNN
    propagateGNN();

    // Step 4: Compute Cross-Entropy Similarity
    computeCrossEntropySimilarity();

    // Step 5: Refine GNN outputs
    refineGNNOutputs();

    // Step 6: Generate trading signals
    generateSignals();

    // Step 7: Execute trades
    executeTrades();
}

Attached Files
Quantum Market Entanglement Problem [Re: TipmyPip] #488539
01/09/25 18:48
01/09/25 18:48
Joined: Sep 2017
Posts: 164
TipmyPip Online OP
Member
TipmyPip  Online OP
Member

Joined: Sep 2017
Posts: 164
The Puzzle of Currency Waves 🌊💸
Imagine you're standing at a beach and watching the waves. Each wave is like a currency pair (e.g., EUR/USD or GBP/JPY) in the financial market. Some waves are big, some are small, and they all move differently. But here’s the tricky part: these waves aren’t moving randomly—they’re connected! 🌐

Now, let’s pretend you’re a wave scientist. Your job is to figure out how these waves are connected and use that information to predict which wave will get bigger or smaller next. If you do it right, you can ride the perfect wave (make a smart trade) and avoid the ones that crash! 🏄‍♂️

The Finance Game 🎮
In the real world of finance, these "waves" are actually something called volatility. Volatility tells us how much the prices of currency pairs are changing. Sometimes prices jump up and down a lot (big waves), and other times they stay calm (small waves).

But here's the big mystery: what makes one wave affect another?

The Big Question 🧠
Let’s say we have 28 waves, one for each currency pair. If the EUR/USD wave gets bigger, does it make the GBP/JPY wave smaller? Or does it push all the waves to get bigger? Your job is to figure this out, just like a detective solving a mystery. 🕵️‍♀️

To solve this puzzle, you’ll use a magical tool called Relative Volatility Spread Similarity. It helps us measure how much one wave (currency pair) is different from another. For example:

If EUR/USD is very calm and GBP/JPY is very wild, they’re very different.
If both are wild, they’re similar.

Using the Magic Glasses 🔍✨
To make sense of all this, we use something like magic glasses called Enhanced PCA. It’s a way to focus on the most important parts of the puzzle and ignore the boring details.

Once we have the important pieces, we send them to a group of really smart robots called Graph Neural Networks (GNNs). These robots:

Look at how all the waves are connected.
Share their findings with each other.
Give us advice on whether to buy, sell, or hold currencies.

The Secret Sauce 🥣
But we don’t stop there! We check if the robots are working well by making them talk to each other. If two robots give very different advice, we ask, “Why are you so different?” and help them refine their answers. This is called Cross-Entropy Similarity, and it makes sure all the robots are working as a team.

The Goal 🎯
The goal of this whole game is to:

Predict the best waves (currency pairs) to ride.
Make money while keeping risks low (don’t fall off your surfboard!). 🏄‍♀️💰

Code
#define PAIRS 28
#define COMPONENTS 3     // Number of PCA components
#define GNN_LAYERS 2     // Number of GNN layers
#define ACTIONS 3        // Buy, Sell, Hold
#define LOOKBACK 100     // Lookback period for volatility calculation
#define VOL_KERNEL_WIDTH 0.5 // Width for Gaussian kernel
#define SIMILARITY_ALPHA 0.1 // Smoothing factor for similarity updates

// Declare currency pairs
string CurrencyPairs[PAIRS] = {
    "EURUSD", "GBPUSD", "USDJPY", "GBPJPY", "USDCAD", "EURAUD", "EURJPY",
    "AUDCAD", "AUDJPY", "AUDNZD", "AUDUSD", "CADJPY", "EURCAD", "EURCHF",
    "EURGBP", "EURNZD", "GBPCAD", "GBPCHF", "NZDCAD", "NZDJPY", "NZDUSD",
    "USDCHF", "CHFJPY", "AUDCHF", "GBPNZD", "NZDCHF", "CADCHF", "GBPAUD"
};

// Global Variables
vars kernelMatrix[PAIRS][PAIRS];            // Kernel matrix for PCA
vars pcaReducedFeatures[PAIRS][COMPONENTS]; // PCA-reduced features for each pair
vars adjacencyMatrices[PAIRS][PAIRS];       // Adjacency matrices for GNNs
vars gnnWeights[GNN_LAYERS][COMPONENTS][COMPONENTS]; // GNN weights
vars gnnOutputs[PAIRS][ACTIONS];            // GNN probabilities for Buy/Sell/Hold
vars similarityMatrix[PAIRS][PAIRS];        // Cross-Entropy similarity matrix
vars refinedOutputs[PAIRS][ACTIONS];        // Refined GNN probabilities
vars signals[PAIRS];                        // Final trading signals

// Utility Function: Calculate Rolling Volatility
function calcVolatility(string pair) {
    asset(pair);
    vars returns = series(log(priceClose() / priceClose(1)));
    return StdDev(returns, LOOKBACK);
}

// Step 1: Compute Kernel Matrix
function computeKernelMatrix() {
    for (int i = 0; i < PAIRS; i++) {
        for (int j = 0; j < PAIRS; j++) {
            vars sigma_i = series(calcVolatility(CurrencyPairs[i]));
            vars sigma_j = series(calcVolatility(CurrencyPairs[j]));
            vars volatilitySpread = series(abs(sigma_i[0] - sigma_j[0]));
            kernelMatrix[i][j] = exp(-volatilitySpread[0]^2 / (2 * VOL_KERNEL_WIDTH^2));
        }
    }
}

// Step 2: Perform Enhanced PCA
function performEnhancedPCA() {
    eigenDecomposition(kernelMatrix, eigenvalues, eigenvectors);
    for (int i = 0; i < PAIRS; i++) {
        for (int j = 0; j < COMPONENTS; j++) {
            pcaReducedFeatures[i][j] = dotProduct(kernelMatrix[i], eigenvectors[j]);
        }
    }
}

// Step 3: Initialize GNN Weights
function initializeGNNWeights() {
    for (int l = 0; l < GNN_LAYERS; l++) {
        for (int i = 0; i < COMPONENTS; i++) {
            for (int j = 0; j < COMPONENTS; j++) {
                gnnWeights[l][i][j] = random() * 0.1; // Small random initialization
            }
        }
    }
}

// Step 4: GNN Propagation
function propagateGNN() {
    vars tempFeatures[PAIRS][COMPONENTS];
    for (int l = 0; l < GNN_LAYERS; l++) {
        for (int i = 0; i < PAIRS; i++) {
            for (int k = 0; k < COMPONENTS; k++) {
                tempFeatures[i][k] = 0;
                for (int j = 0; j < PAIRS; j++) {
                    for (int m = 0; m < COMPONENTS; m++) {
                        tempFeatures[i][k] += adjacencyMatrices[i][j] * pcaReducedFeatures[j][m] * gnnWeights[l][m][k];
                    }
                }
                tempFeatures[i][k] = max(0, tempFeatures[i][k]); // ReLU activation
            }
        }
        // Update PCA features for the next layer
        for (int i = 0; i < PAIRS; i++) {
            for (int k = 0; k < COMPONENTS; k++) {
                pcaReducedFeatures[i][k] = tempFeatures[i][k];
            }
        }
    }
    // Generate probabilities (Buy/Sell/Hold) based on final GNN outputs
    for (int i = 0; i < PAIRS; i++) {
        for (int k = 0; k < ACTIONS; k++) {
            gnnOutputs[i][k] = random() * 0.1; // Placeholder for GNN probability outputs
        }
    }
}

// Step 5: Compute Cross-Entropy Similarity
function computeCrossEntropySimilarity() {
    for (int i = 0; i < PAIRS; i++) {
        for (int j = 0; j < PAIRS; j++) {
            similarityMatrix[i][j] = 0;
            for (int k = 0; k < ACTIONS; k++) {
                similarityMatrix[i][j] -= gnnOutputs[i][k] * log(gnnOutputs[j][k] + 1e-8);
            }
        }
    }
}

// Step 6: Refine GNN Outputs Using Similarity
function refineGNNOutputs() {
    for (int i = 0; i < PAIRS; i++) {
        for (int k = 0; k < ACTIONS; k++) {
            refinedOutputs[i][k] = 0;
            double weightSum = 0;
            for (int j = 0; j < PAIRS; j++) {
                refinedOutputs[i][k] += similarityMatrix[i][j] * gnnOutputs[j][k];
                weightSum += similarityMatrix[i][j];
            }
            refinedOutputs[i][k] /= (weightSum + 1e-8); // Normalize
        }
    }
}

// Step 7: Generate Trading Signals
function generateSignals() {
    for (int i = 0; i < PAIRS; i++) {
        signals[i] = refinedOutputs[i][0] - refinedOutputs[i][1]; // Buy-Sell difference
    }
}

// Step 8: Execute Trades
function executeTrades() {
    for (int i = 0; i < PAIRS; i++) {
        if (signals[i] > 0) enterLong(CurrencyPairs[i]);
        else if (signals[i] < 0) enterShort(CurrencyPairs[i]);
    }
}

// Main Function
function run() {
    set(PLOTNOW);

    // Step 1: Compute Kernel Matrix
    computeKernelMatrix();

    // Step 2: Perform Enhanced PCA
    performEnhancedPCA();

    // Step 3: Initialize GNN weights
    initializeGNNWeights();

    // Step 4: Propagate GNN
    propagateGNN();

    // Step 5: Compute Cross-Entropy Similarity
    computeCrossEntropySimilarity();

    // Step 6: Refine GNN outputs
    refineGNNOutputs();

    // Step 7: Generate trading signals
    generateSignals();

    // Step 8: Execute trades
    executeTrades();
}

Attached Files
Last edited by TipmyPip; 01/09/25 18:48.
Stochastic Interdependent Volatility-Adaptive Signal [Re: TipmyPip] #488540
01/09/25 19:46
01/09/25 19:46
Joined: Sep 2017
Posts: 164
TipmyPip Online OP
Member
TipmyPip  Online OP
Member

Joined: Sep 2017
Posts: 164
The Dynamic Exam Study Plan

Imagine you are part of a study group of 28 students (representing the 28 currency pairs), and each student has their own unique strengths and weaknesses in different subjects like Math, Science, and Literature (representing market volatility). Each student’s performance changes over time depending on how much they study, external distractions, and collaboration with others in the group (representing market dynamics and interdependencies). Your goal is to create a dynamic and adaptive study plan that helps the whole group excel in an upcoming exam, even though you have limited time and resources.

Key Elements of the Problem
Study Stress as Volatility:

Each student’s stress level represents their volatility. Some students get very stressed (high volatility), while others are calm and steady (low volatility).
Stress changes over time based on how difficult the subject is and how much preparation they’ve done recently (like rolling standard deviations of their past "study returns").
The Principal of PCA (Principal Component Analysis):

You notice that not all topics are equally hard for everyone. Some topics like Algebra (or "Eigenvalues" in our original problem) affect most students' stress levels more than others, such as Creative Writing.
To simplify the problem, you identify a few key topics that cause the most stress for the group. These topics act like "Principal Components," and focusing on them can explain most of the group’s challenges.
The GNN (Study Group Network):

Some students are better at helping others because they understand connections between topics well (like connecting Algebra to Physics). These connections form a "network" of study helpers.
The stronger the connections between two students (measured by how often they work together and help each other), the better the group performs.
Entropy (Confidence in Preparation):

After each study session, you give every student three choices for their confidence: "I’m ready," "I’m nervous," or "I’m not sure" (representing Buy, Sell, Hold in trading).
If students are very confident in their knowledge, their "entropy" is low (less uncertainty). If everyone is unsure, entropy is high.
You use this entropy to adjust how much focus you should put on each student in the study plan.
Dynamic Thresholds (Study Plan):

You realize that every student needs a unique threshold for when they are ready to move on to a new topic. Some students need more time (higher threshold), while others can switch topics quickly (lower threshold).
The thresholds are dynamic and depend on:
How much stress they’ve reduced recently (volatility change rate).
How critical the topic is for the group (PCA contribution).
How confident they feel (entropy of their confidence).
The Exam as the Market:

The final test acts like the real-world trading market. If the study group can align their efforts (similar to finding trading signals), they maximize their overall performance (similar to the Sharpe Ratio for returns).

The Goal:

Help every student perform their best (like maximizing profits in trading).
Minimize unnecessary stress and avoid overloading anyone (like minimizing risks and variance).
Adapt the study plan dynamically to new challenges and feedback (like responding to changing market conditions).

The Problem:

Each student's stress level follows a random path (like a stochastic process) and is influenced by their natural abilities and study habits.
The group's success depends on finding the optimal way to balance individual efforts and group collaboration.
How can you create a study plan that dynamically adapts to each student’s needs, while also ensuring the group collectively performs well?


Code
#define PAIRS 28
#define COMPONENTS 3   // Number of PCA components
#define GNN_LAYERS 2   // Number of GNN layers
#define ACTIONS 3      // Buy, Sell, Hold

// Define currency pairs
string CurrencyPairs[PAIRS] = {
    "EURUSD", "GBPUSD", "USDJPY", "GBPJPY", "USDCAD", "EURAUD", "EURJPY",
    "AUDCAD", "AUDJPY", "AUDNZD", "AUDUSD", "CADJPY", "EURCAD", "EURCHF",
    "EURGBP", "EURNZD", "GBPCAD", "GBPCHF", "NZDCAD", "NZDJPY", "NZDUSD",
    "USDCHF", "CHFJPY", "AUDCHF", "GBPNZD", "NZDCHF", "CADCHF", "GBPAUD"
};

// Variables for PCA, GNN, and signals
vars volatilities[PAIRS];                   // Current volatilities
vars volChangeRate[PAIRS];                  // Volatility change rate
vars kernelMatrix[PAIRS][PAIRS];            // Kernel matrix for PCA
vars pcaReducedFeatures[PAIRS][COMPONENTS]; // PCA-reduced features
vars adjacencyMatrices[PAIRS][PAIRS];       // GNN adjacency matrices
vars gnnWeights[GNN_LAYERS][COMPONENTS][COMPONENTS]; // GNN weights
vars gnnOutputs[PAIRS][ACTIONS];            // GNN probabilities (Buy/Sell/Hold)
vars signals[PAIRS];                        // Final trading signals
vars eigenvalues[COMPONENTS];               // Eigenvalues from PCA

// Step 1: Calculate Volatility and Change Rate
function calculateVolatility() {
    for (int i = 0; i < PAIRS; i++) {
        asset(CurrencyPairs[i]);
        vars logReturns = series(priceClose(0) / priceClose(1) - 1); // Log returns
        volatilities[i] = sqrt(SMA(pow(logReturns, 2), 20));        // 20-bar rolling std dev
        volChangeRate[i] = volatilities[i] - volatilities[i+1];     // Change rate
    }
}

// Step 2: Perform Kernel PCA
function performKernelPCA() {
    // Construct Kernel Matrix
    for (int i = 0; i < PAIRS; i++) {
        for (int j = 0; j < PAIRS; j++) {
            kernelMatrix[i][j] = exp(-pow(volatilities[i] - volatilities[j], 2) / (2 * 0.1 * 0.1)); // Gaussian kernel
        }
    }

    // Perform eigen decomposition (simplified example)
    vars eigenvectors;
    eigenDecomposition(kernelMatrix, eigenvalues, eigenvectors);
    
    // Reduce dimensions using top COMPONENTS
    for (int i = 0; i < PAIRS; i++) {
        for (int j = 0; j < COMPONENTS; j++) {
            pcaReducedFeatures[i][j] = dotProduct(kernelMatrix[i], eigenvectors[j]);
        }
    }
}

// Step 3: Initialize GNN Weights
function initializeGNNWeights() {
    for (int l = 0; l < GNN_LAYERS; l++) {
        for (int i = 0; i < COMPONENTS; i++) {
            for (int j = 0; j < COMPONENTS; j++) {
                gnnWeights[l][i][j] = random() * 0.1; // Small random initialization
            }
        }
    }
}

// Step 4: GNN Propagation
function propagateGNN() {
    vars tempFeatures[PAIRS][COMPONENTS];
    for (int l = 0; l < GNN_LAYERS; l++) {
        for (int i = 0; i < PAIRS; i++) {
            for (int k = 0; k < COMPONENTS; k++) {
                tempFeatures[i][k] = 0;
                for (int j = 0; j < PAIRS; j++) {
                    for (int m = 0; m < COMPONENTS; m++) {
                        tempFeatures[i][k] += adjacencyMatrices[i][j] * pcaReducedFeatures[j][m] * gnnWeights[l][m][k];
                    }
                }
                tempFeatures[i][k] = max(0, tempFeatures[i][k]); // ReLU activation
            }
        }
        // Update PCA features for the next layer
        for (int i = 0; i < PAIRS; i++) {
            for (int k = 0; k < COMPONENTS; k++) {
                pcaReducedFeatures[i][k] = tempFeatures[i][k];
            }
        }
    }
    // Final probabilities for Buy, Sell, Hold
    for (int i = 0; i < PAIRS; i++) {
        for (int k = 0; k < ACTIONS; k++) {
            gnnOutputs[i][k] = softmax(tempFeatures[i][k]); // Example output
        }
    }
}

// Step 5: Generate Trading Signals with PCA and GNN Thresholds
function generateSignals() {
    // Calculate PCA Variance Contribution
    var totalVariance = 0;
    for (int k = 0; k < COMPONENTS; k++) {
        totalVariance += eigenvalues[k];
    }
    var pcaContribution = eigenvalues[0] / totalVariance; // Contribution of the top component

    // Calculate GNN Entropy for each pair
    vars gnnEntropy[PAIRS];
    for (int i = 0; i < PAIRS; i++) {
        gnnEntropy[i] = 0;
        for (int k = 0; k < ACTIONS; k++) {
            gnnEntropy[i] -= gnnOutputs[i][k] * log(gnnOutputs[i][k] + 1e-8);
        }
    }

    // Calculate Dynamic Thresholds with PCA and GNN
    vars dynamicThresholdBuy[PAIRS];
    vars dynamicThresholdSell[PAIRS];
    for (int i = 0; i < PAIRS; i++) {
        var meanVolChangeRate = SMA(volChangeRate, PAIRS);
        var stddevVolChangeRate = StdDev(volChangeRate, PAIRS);

        // Adjust thresholds with PCA and GNN contributions
        dynamicThresholdBuy[i] = meanVolChangeRate + 0.5 * stddevVolChangeRate * pcaContribution * (1 - gnnEntropy[i]);
        dynamicThresholdSell[i] = meanVolChangeRate - 0.5 * stddevVolChangeRate * pcaContribution * (1 - gnnEntropy[i]);
    }

    // Generate Trading Signals
    for (int i = 0; i < PAIRS; i++) {
        var gnnBuyProb = gnnOutputs[i][0];
        var gnnSellProb = gnnOutputs[i][1];

        signals[i] = gnnBuyProb - gnnSellProb; // Base signal

        // Incorporate dynamic thresholds
        if (signals[i] > dynamicThresholdBuy[i]) signals[i] = 1; // Strong Buy
        else if (signals[i] < dynamicThresholdSell[i]) signals[i] = -1; // Strong Sell
        else signals[i] = 0; // Hold
    }
}

// Step 6: Execute Trades
function executeTrades() {
    for (int i = 0; i < PAIRS; i++) {
        if (signals[i] == 1) enterLong(CurrencyPairs[i]); // Strong Buy
        else if (signals[i] == -1) enterShort(CurrencyPairs[i]); // Strong Sell
    }
}

// Main Strategy Function
function run() {
    set(PLOTNOW);

    // Calculate volatility and change rate
    calculateVolatility();

    // Perform kernel PCA
    performKernelPCA();

    // Initialize GNN weights (once)
    if (is(INITRUN)) initializeGNNWeights();

    // Propagate GNN
    propagateGNN();

    // Generate signals
    generateSignals();

    // Execute trades
    executeTrades();
}

Attached Files
Time-Series Volatility Clustering and Adaptive Trading Signals [Re: TipmyPip] #488541
01/09/25 20:10
01/09/25 20:10
Joined: Sep 2017
Posts: 164
TipmyPip Online OP
Member
TipmyPip  Online OP
Member

Joined: Sep 2017
Posts: 164
The Puzzle of Dynamic Currency Connections
Background:
Imagine you’re in charge of monitoring 28 students (representing currency pairs) in a math class. Each student has a different skill level and focus area (representing volatility). Sometimes they perform well, and sometimes they struggle—but there’s a pattern to it. When one student gets stuck on a tough topic, their struggle often spreads to nearby students (like a ripple effect of high volatility). Similarly, when one student is confident, it can boost the confidence of others nearby.

Your job is to figure out these patterns and decide:

Who needs help and who’s ready to move on?
When to focus on certain students to improve the overall class performance.
But here’s the catch:

The students form groups (like clusters), where some groups are more prone to challenges than others.
Each student’s performance is influenced by their past performance (like habits) and their interactions with others in the group.

The Challenge: Track the Patterns:

You have to observe how often each student struggles or excels over time. Are there periods where the same students keep struggling (clustering)? Are there students who quickly bounce back from challenges?
Form Study Groups:

Pair students based on how much they affect each other’s performance. If Student A’s struggles often lead to Student B struggling too, they should be in the same group. Use this to create a "map" of how the class interacts.
Summarize Key Challenges:

Once you’ve mapped the class, find the topics or patterns that explain the majority of struggles. These are your "main challenges" that need solving.
Predict the Next Struggles:

Based on their history and their group’s behavior, predict which students will need help next. This is your chance to act early and make the class stronger!
Decide the Focus:

Each day, decide who you’ll focus on: who needs a boost, who can help others, and who’s ready to move on. Your decisions will affect the overall class performance.

Bonus Twist:
As you monitor the class, the students’ behavior changes. New friendships form, others break apart, and some students get unexpectedly better or worse. Can you adapt your plan to these changes and keep the whole class improving?

Your Task:
Identify the students’ struggles, find the hidden patterns in their interactions, and create a plan that helps the whole class succeed over time. The better your plan, the stronger the class becomes.
Can you rise to the challenge and lead the class to success?


Code
#define PAIRS 28
#define COMPONENTS 3   // Number of PCA components
#define GNN_LAYERS 2   // Number of GNN layers
#define ACTIONS 3      // Buy, Sell, Hold

// Define currency pairs
string CurrencyPairs[PAIRS] = {
    "EURUSD", "GBPUSD", "USDJPY", "GBPJPY", "USDCAD", "EURAUD", "EURJPY",
    "AUDCAD", "AUDJPY", "AUDNZD", "AUDUSD", "CADJPY", "EURCAD", "EURCHF",
    "EURGBP", "EURNZD", "GBPCAD", "GBPCHF", "NZDCAD", "NZDJPY", "NZDUSD",
    "USDCHF", "CHFJPY", "AUDCHF", "GBPNZD", "NZDCHF", "CADCHF", "GBPAUD"
};

// Variables for PCA, GNN, and signals
vars volatilities[PAIRS];                   // Current volatilities
vars volClustering[PAIRS];                  // Volatility clustering scores
vars kernelMatrix[PAIRS][PAIRS];            // Kernel matrix for PCA
vars pcaReducedFeatures[PAIRS][COMPONENTS]; // PCA-reduced features
vars timeSeriesFeatures[PAIRS];             // Time-series features (e.g., autocorrelation)
vars timeDependentFeatures[PAIRS];          // Time-dependent features (e.g., volatility lag)
vars adjacencyMatrices[PAIRS][PAIRS];       // GNN adjacency matrices
vars gnnWeights[GNN_LAYERS][COMPONENTS][COMPONENTS]; // GNN weights
vars gnnOutputs[PAIRS][ACTIONS];            // GNN probabilities (Buy/Sell/Hold)
vars signals[PAIRS];                        // Final trading signals
vars eigenvalues[COMPONENTS];               // Eigenvalues from PCA

// Softmax function
var softmax(vars logits[], int index, int size) {
    var sum = 0;
    for (int i = 0; i < size; i++) {
        sum += exp(logits[i]); // Exponentiate each value
    }
    return exp(logits[index]) / (sum + 1e-8); // Normalize by the sum, avoiding division by zero
}

// Step 1: Calculate Volatility and Clustering Scores
function calculateVolatilityAndClustering() {
    for (int i = 0; i < PAIRS; i++) {
        asset(CurrencyPairs[i]);
        vars logReturns = series(log(priceClose(0) / priceClose(1))); // Log returns
        volatilities[i] = sqrt(SMA(pow(logReturns, 2), 20));          // 20-bar rolling std dev
        
        // Calculate clustering using past volatilities
        vars pastVolatilities = series(volatilities[i]);              // Volatility series
        volClustering[i] = SMA(pastVolatilities, 10) / stddev(pastVolatilities, 10); // Example metric
    }
}

// Step 2: Extract Time-Series Features
function extractTimeSeriesFeatures() {
    for (int i = 0; i < PAIRS; i++) {
        asset(CurrencyPairs[i]);
        vars logReturns = series(log(priceClose(0) / priceClose(1))); // Log returns
        
        // Autocorrelation as a feature
        timeSeriesFeatures[i] = autocorrelation(logReturns, 5);       // 5-lag autocorrelation
        
        // Time-dependent feature: Volatility lag
        timeDependentFeatures[i] = volatilities[i - 1];               // Previous volatility
    }
}

// Step 3: Perform Enhanced PCA
function performEnhancedPCA() {
    // Construct Kernel Matrix
    for (int i = 0; i < PAIRS; i++) {
        for (int j = 0; j < PAIRS; j++) {
            double distance = pow(volatilities[i] - volatilities[j], 2) +
                              pow(volClustering[i] - volClustering[j], 2) +
                              pow(timeSeriesFeatures[i] - timeSeriesFeatures[j], 2);
            kernelMatrix[i][j] = exp(-distance / (2 * 0.1 * 0.1)); // Gaussian kernel
        }
    }

    // Perform eigen decomposition
    vars eigenvectors;
    eigenDecomposition(kernelMatrix, eigenvalues, eigenvectors);

    // Reduce dimensions using top COMPONENTS
    for (int i = 0; i < PAIRS; i++) {
        for (int j = 0; j < COMPONENTS; j++) {
            pcaReducedFeatures[i][j] = dotProduct(kernelMatrix[i], eigenvectors[j]);
        }
    }
}

// Step 4: Initialize GNN Weights
function initializeGNNWeights() {
    for (int l = 0; l < GNN_LAYERS; l++) {
        for (int i = 0; i < COMPONENTS; i++) {
            for (int j = 0; j < COMPONENTS; j++) {
                gnnWeights[l][i][j] = random() * 0.1; // Small random initialization
            }
        }
    }
}

// Step 5: GNN Propagation
function propagateGNN() {
    vars tempFeatures[PAIRS][COMPONENTS];
    for (int l = 0; l < GNN_LAYERS; l++) {
        for (int i = 0; i < PAIRS; i++) {
            for (int k = 0; k < COMPONENTS; k++) {
                tempFeatures[i][k] = 0;
                for (int j = 0; j < PAIRS; j++) {
                    for (int m = 0; m < COMPONENTS; m++) {
                        tempFeatures[i][k] += adjacencyMatrices[i][j] * pcaReducedFeatures[j][m] * gnnWeights[l][m][k];
                    }
                }
                tempFeatures[i][k] = max(0, tempFeatures[i][k]); // ReLU activation
            }
        }
    }

    // Final probabilities for Buy, Sell, Hold using softmax
    for (int i = 0; i < PAIRS; i++) {
        vars logits[ACTIONS]; // Placeholder for raw GNN output logits
        for (int k = 0; k < ACTIONS; k++) {
            logits[k] = tempFeatures[i][k]; // Assign the logits (raw outputs)
        }

        // Apply softmax to calculate probabilities
        for (int k = 0; k < ACTIONS; k++) {
            gnnOutputs[i][k] = softmax(logits, k, ACTIONS);
        }
    }
}

// Step 6: Generate Trading Signals
function generateSignals() {
    for (int i = 0; i < PAIRS; i++) {
        var gnnBuyProb = gnnOutputs[i][0];
        var gnnSellProb = gnnOutputs[i][1];

        signals[i] = gnnBuyProb - gnnSellProb; // Base signal

        // Threshold decision
        if (signals[i] > 0.5) signals[i] = 1;  // Strong Buy
        else if (signals[i] < -0.5) signals[i] = -1; // Strong Sell
        else signals[i] = 0; // Hold
    }
}

// Step 7: Execute Trades
function executeTrades() {
    for (int i = 0; i < PAIRS; i++) {
        if (signals[i] == 1) enterLong(CurrencyPairs[i]); // Strong Buy
        else if (signals[i] == -1) enterShort(CurrencyPairs[i]); // Strong Sell
    }
}

// Main Strategy Function
function run() {
    set(PLOTNOW);

    // Step 1: Calculate volatility and clustering
    calculateVolatilityAndClustering();

    // Step 2: Extract time-series features
    extractTimeSeriesFeatures();

    // Step 3: Perform enhanced PCA
    performEnhancedPCA();

    // Step 4: Initialize GNN weights (once)
    if (is(INITRUN)) initializeGNNWeights();

    // Step 5: Propagate GNN
    propagateGNN();

    // Step 6: Generate trading signals
    generateSignals();

    // Step 7: Execute trades
    executeTrades();
}

Attached Files
Last edited by TipmyPip; 01/09/25 20:11.
Re: Gaussian Channel Adaptive Strategy [Re: TipmyPip] #488544
01/13/25 01:47
01/13/25 01:47
Joined: Apr 2020
Posts: 10
Germany
M
M_D Offline
Newbie
M_D  Offline
Newbie
M

Joined: Apr 2020
Posts: 10
Germany
Hi TipmyPip,

during searching something i stumbled over your thread. Looks very nice and impressive, started to read from page 1. I dont have optionsdata so i skipped that part. But your Gaussian channel adaptive moving average strategy made me curious.
I copied your code and tried it on my Zorro. unfortunately i get a lot of compiler errors. Did you run those scripts with zorro? Or are they only fictional ZorroGPT generated codes based on your (very interesting) prompts?

partially the script is not in lite-c at all:

var filt = pow(a, i) * s + i * x * f[1] - (i >= 2 ? 36 * pow(x, 2) * f[2] : 0) + (i >= 3 ? 84 * pow(x, 3) * f[3] : 0)
- (i >= 4 ? 126 * pow(x, 4) * f[4] : 0) + (i >= 5 ? 126 * pow(x, 5) * f[5] : 0) - (i >= 6 ? 84 * pow(x, 6) * f[6] : 0)
+ (i >= 7 ? 36 * pow(x, 7) * f[7] : 0) - (i >= 8 ? 9 * pow(x, 8) * f[8] : 0) + (i == 9 ? 1 * pow(x, 9) * f[9] : 0);

the "?" in between doesnt belong to the syntax of Zorro as far as i learned until now. at least i could not find it nor in expressions neither in comparisons ... or somewhere else. Is that in python?
Besides that, there a re a lot syntax errors, misplacement of functions (does call first, and declare afterwards) and not defined variables at all ... its a shame i am not smart enough to figure out the formulas and the code by myself.
Will there be a debug attempt ... maybe anyone else?

Thanks and kind regards

M_D

Last edited by M_D; 01/13/25 01:52.
Re: Gaussian Channel Adaptive Strategy [Re: M_D] #488545
01/13/25 05:33
01/13/25 05:33
Joined: Sep 2017
Posts: 164
TipmyPip Online OP
Member
TipmyPip  Online OP
Member

Joined: Sep 2017
Posts: 164
Thank you M_D for your kind words and interest. I am happy that you are sharing your thoughts so others might see some of your interests as inspiration for future developments,

I do understand that the code is not that perfect, but the errors, are to be solved with ZorroGPT, if you have any specific problem you feel you are unable to solve with ZorroGPT, please let me know, I will do my best to help you, Yeah and one more important improvement for our Zorro Platform, as we all use GPT for express developments, it will be only logical to solve bugs with the help of GPT, so you can use it more, and have a better understanding of your strategies, and ways to improve them.

Code
// Declare variables
var a, s, x;             // Scalars used in calculations
int i;                   // Index variable
vars f[10];              // Array for coefficients or signal values

// Ensure the array 'f' has values before computation
f[0] = 1; f[1] = 2; f[2] = 3; f[3] = 4; f[4] = 5; 
f[5] = 6; f[6] = 7; f[7] = 8; f[8] = 9; f[9] = 10; // Example initialization

// Compute 'filt' using ifelse() instead of ternary operators
var filt = pow(a, i) * s 
    + ifelse(i >= 1, i * x * f[1], 0) 
    - ifelse(i >= 2, 36 * pow(x, 2) * f[2], 0) 
    + ifelse(i >= 3, 84 * pow(x, 3) * f[3], 0)
    - ifelse(i >= 4, 126 * pow(x, 4) * f[4], 0) 
    + ifelse(i >= 5, 126 * pow(x, 5) * f[5], 0) 
    - ifelse(i >= 6, 84 * pow(x, 6) * f[6], 0)
    + ifelse(i >= 7, 36 * pow(x, 7) * f[7], 0) 
    - ifelse(i >= 8, 9 * pow(x, 8) * f[8], 0) 
    + ifelse(i == 9, pow(x, 9) * f[9], 0);

// Output result
printf("The value of filt is: %f", filt);

Here you will find a little bit information about Trinary operators Lite-C does not support: https://zorro-project.com/manual/en/litec_c.htm
I am sorry for the previous post, I resolved the problem, in addition there is another version of the code at : https://opserver.de/ubb7/ubbthreads...ords=TipmyPip&Search=true#Post488326

Can you tell me why do you think the syntax of the flit variable doesn't seem to be right, you can also use GPT to resolve it...
I used the correction and explained to GPT that the Trinary operators Lite-C does not support the comparison ? expression : expression; syntax. Use the ifelse statement instead.
x = (x<0 ? -1 : 1); // C/C++
x = ifelse(x<0,-1,1); // lite-C

and within less than 3 seconds the above code was produced.

If you sit with the ZorroGPT, any formula you seem to have difficulty in understanding, ZorroGPT can assist you with no problem and Think of having a PhD level programmer, helping you in any problem you have 24/7, and resolve any problem you have.

Here in the following video you can see how a Quant Trader(MSc) is using GPT for quite hard core problems in algorithmic trading:

https://www.youtube.com/watch?v=hePmohJjfLA&t=7975s

Thank you once again, and please don't hesitate to have any inquires, I will be glad to help.

Last edited by TipmyPip; 01/13/25 12:01.
The Hydra's Awakening [Re: TipmyPip] #488555
01/25/25 14:09
01/25/25 14:09
Joined: Sep 2017
Posts: 164
TipmyPip Online OP
Member
TipmyPip  Online OP
Member

Joined: Sep 2017
Posts: 164
(Very Soon I am going to share a new level of Algorithmic Trading Ideas : but for Inspiration please read the introduction and get ready for the roller coaster ride :-)

Hydra Awakening

The Hydra's Awakening: A Tale of AI Triumph and Market Symphony

The trading floor of Quantum Markets was a place of quiet intensity, where the glow of monitors and the hum of machines filled the air. The Hydra, a new AI-powered trading engine, was the crown jewel of the firm, capable of analyzing billions of market scenarios in real-time. It was a system designed to adapt and thrive in the unpredictable chaos of global markets, its neural networks fine-tuned to find patterns in the noise. As the clock ticked toward midnight, the final touches were made to the system. The Hydra was about to go live, and its creators gathered around with anticipation. This was no ordinary system. It was a machine designed not just to react to market conditions but to predict them, to foresee the subtle shifts in price and volume that could make or break a trading strategy. The system’s core was powered by a fleet of GPUs, each tasked with processing specific aspects of market behavior. Elena, the lead quant developer, stood at her workstation, her eyes fixed on the logs streaming across her screen. She had spent months perfecting the Hydra’s architecture, ensuring that each of its processes ran in perfect harmony. Tonight would be its first real test.

As the system went live, the Hydra sprang into action. Streams of data poured in from global exchanges, and the GPUs hummed with activity. Each core of the Hydra represented a different part of the system, processing patterns, analyzing order books, and calculating optimal entry and exit points. For the first few moments, everything seemed flawless. Predictions flashed across the screens, showing potential trades with astonishing accuracy. Traders watched in awe as the Hydra processed millions of combinations, sifting through the noise of the markets to find hidden opportunities. It was a sight to behold, a perfect symphony of data and computation. But then, something changed. At first, it was subtle, a slight delay in one of the streams. Then another. The GPUs, once fully engaged, began to slow. The Hydra’s once-flawless performance faltered, and the predictions on the screens grew less frequent. Elena’s heart sank as she realized something was wrong. The Hydra, designed to process billions of scenarios, was now crawling, its once-mighty engine reduced to a single struggling core.

Elena’s mind raced as she dove into the system’s logs, searching for the source of the problem. The data was coming in as expected, but the processing speed had dropped dramatically. She watched as the tasks queued up, each waiting its turn to access the GPU. It was as if the Hydra’s many heads were fighting over the same resources, unable to share the workload efficiently. The once-mighty machine had become a bottleneck, its potential trapped by its own design. As the minutes ticked by, the problem grew worse. One by one, the Hydra’s cores went idle, leaving a single process to carry the load. The traders, who had been so confident in the system’s abilities, began to murmur with concern. If the Hydra couldn’t recover, the night’s trading opportunities would slip away, taking with them the chance to prove the system’s worth. Elena knew she had to act quickly. The logs revealed the problem: the GPU, despite its power, was being overwhelmed by the demands of the Hydra’s processes. Each core was trying to access the GPU simultaneously, creating a bottleneck that slowed the entire system. It was a classic case of resource contention, but solving it would be no easy task.

Elena’s fingers flew across the keyboard as she implemented a series of changes. First, she adjusted the system to limit the number of active processes, ensuring that the GPU wasn’t overloaded. Then she added synchronization points, forcing the processes to wait their turn rather than compete for resources. Finally, she implemented a dynamic memory allocation system, allowing the GPU to prioritize smaller, faster tasks over larger, more resource-intensive ones. As the changes went live, the Hydra began to recover. One by one, its cores sprang back to life, each taking its share of the workload. The GPU usage surged, and the predictions on the screens grew more frequent. The traders watched in amazement as the system roared back to life, processing data with a speed and precision they had never seen before. But just as things seemed to be stabilizing, another issue emerged. The changes had solved the immediate problem, but they had also introduced new challenges. The synchronization points, while effective, had created delays in the system, slowing down the overall processing speed. The dynamic memory allocation, designed to prioritize smaller tasks, was causing larger tasks to wait longer than expected. It was a delicate balancing act, and Elena knew that even the smallest misstep could cause the system to falter again.

As the night wore on, Elena continued to fine-tune the Hydra. She adjusted the synchronization points, optimizing them for the specific demands of the trading environment. She refined the memory allocation system, ensuring that tasks were prioritized based on their impact on the overall strategy. And she worked tirelessly to ensure that each core of the Hydra operated in harmony with the others. By dawn, the system was running more efficiently than ever. The Hydra’s cores worked in perfect sync, processing billions of scenarios with ease. The GPU, once a bottleneck, was now a well-oiled machine, handling the demands of the system without breaking a sweat. The traders, who had been on the verge of losing faith in the system, were now convinced of its potential. The Hydra had not only recovered but had exceeded their expectations, delivering results that no human trader could match.

But for Elena, the victory was bittersweet. She knew that the system’s success was only temporary, that the markets would continue to evolve and that new challenges would arise. The Hydra, powerful as it was, would need constant care and attention to stay ahead of the game. And so, as the traders celebrated their newfound edge, Elena sat at her workstation, already planning the next set of improvements. She knew that the real test of the Hydra’s abilities was yet to come, and she was determined to ensure that it was ready for whatever the markets might throw its way. The trading floor buzzed with excitement, but Elena’s focus remained unshaken. The Hydra, her creation, was alive, and she would do whatever it took to keep it thriving.

As the sun rose over Quantum Markets, the Hydra’s journey had only just begun. Its story, a tale of ambition, adversity, and innovation, was far from over. For in the world of algorithmic trading, the only constant is change, and the Hydra, like its namesake, would need to adapt and grow to survive. And so, with the system stabilized and the future uncertain, Elena prepared for the challenges ahead, knowing that the true measure of the Hydra’s success would not be found in its past achievements but in its ability to navigate the unknown.



Attached Files
GPUAlgoML.zip (36 downloads)
Last edited by TipmyPip; 01/30/25 08:39.
Re: The Hydra's Awakening [Re: TipmyPip] #488557
01/26/25 19:53
01/26/25 19:53
Joined: Sep 2017
Posts: 164
TipmyPip Online OP
Member
TipmyPip  Online OP
Member

Joined: Sep 2017
Posts: 164
It is kind of very hard to work with a platform that has very tight memory management, while memory management doesn't have any detailed documentation, and the whole Zorro platform is closed, no direct access to how data structures are organized, which makes it very hard to work with other utilities being used by other platforms like Python... and synchronizing between them for HFT, is a very serious challenge. I think the amount of time invested in developing the platform in relation to how many people have the ability to advance faster, without having too many challenges with interfacing different platforms together, is a serious limit on the amount of users that can take of the advantages of Zorro Project.

Re: The Hydra's Awakening [Re: TipmyPip] #488562
01/30/25 00:38
01/30/25 00:38
Joined: Aug 2018
Posts: 101
O
OptimusPrime Offline
Member
OptimusPrime  Offline
Member
O

Joined: Aug 2018
Posts: 101
Thank you TipmyPip for showcasing ZorroGPT and for sharing these examples of outputs. I am fascinated and curious. Do you have a background as a quant? Thanks again for the inspirational work.


Thanks so much,

OptimusPrime

Re: The Hydra's Awakening [Re: OptimusPrime] #488563
01/30/25 03:51
01/30/25 03:51
Joined: Sep 2017
Posts: 164
TipmyPip Online OP
Member
TipmyPip  Online OP
Member

Joined: Sep 2017
Posts: 164
Dear OptimusPrime, a very creative name, Thank you so much for your kind words, the short answer would be, my imagination is never limited by definitions, but that would be too serious for quite a demonstration. I am very knowledgeable thanks to AI, if I convey to you how the system of definitions would try to limit your success, you would be amazed how fast your mind can adapt, while true adaptation comes from very hard struggles. I am very fascinated by Graph Theory, Chaos, And Neural Networks.
(Gee, You can't imagine how much I appreciate your kind words.)

Thank you once again from your time investment in our common interests...
ZorroGPT.

But I would like to add, because of Zorro Trader, some of my ambitions have become even higher. Thanks to this marvelous project, and the team of Zorro Developers.
It is really surprising how open source projects can change the world, While DeepSeek is just one proof of the concept, Linux would be a milestone, But If ZorroTrader becomes the next big thing, even for the most curious minds, a Commercial version with full source code, would make the team of developers raise to new heights, and be famous even more than Microsoft.

Last edited by TipmyPip; 01/30/25 04:32.
Re: Zorro Trader GPT [Re: TipmyPip] #488564
01/30/25 04:43
01/30/25 04:43
Joined: Sep 2017
Posts: 164
TipmyPip Online OP
Member
TipmyPip  Online OP
Member

Joined: Sep 2017
Posts: 164
This is to inspire Everybody and OptimusPrime.

Code
(* Helper function to check if a queen can be placed at (row, col) *)
isSafe[board_, row_, col_] := 
  And @@ (And[# != row, # + board[[#]] != row + col, # - board[[#]] != row - col] & /@ Range[col - 1]);

(* Recursive function to solve the 9-queens problem *)
solveQueens[board_, col_] := 
  If[col > 9, (* If all queens are placed, return the solution *)
     Print[board];,
     (* Else, try placing the queen in each row of the current column *)
     Do[
       If[isSafe[board, row, col],
          solveQueens[ReplacePart[board, col -> row], col + 1]; (* Recur for the next column *)
       ],
       {row, 1, 9}
     ]
  ];


A graph of Queens on the Board of Financial Steps, leading to a test for checkmate in every direction.

Code
#include <stdio.h>

#define N 9  // Board size for the 9-queens problem

// Function to check if a queen can be placed at board[row][col]
int isSafe(int board[N], int row, int col) {
    int i;
    for(i = 0; i < col; i++) {
        if(board[i] == row || 
           board[i] - i == row - col || 
           board[i] + i == row + col)
            return 0;  // Not safe
    }
    return 1;  // Safe position
}

// Recursive function to solve the 9-queens problem
void solveQueens(int board[N], int col) {
    if(col >= N) {  // If all queens are placed, print the solution
        printf("\nSolution: ");
        int i;
        for(i = 0; i < N; i++) 
            printf("%d ", board[i] + 1);  // Convert 0-based to 1-based
        printf("\n");
        return;
    }

    // Try placing a queen in each row of the current column
    int row;
    for(row = 0; row < N; row++) {
        if(isSafe(board, row, col)) {
            board[col] = row;  // Place queen
            solveQueens(board, col + 1);  // Recur for the next column
            board[col] = 0;  // Backtrack
        }
    }
}

// Main function to initialize the board and start solving
void main() {
    int board[N] = {0};  // Initialize board with all zeroes
    solveQueens(board, 0);  // Start recursion from column 0
}


And another version :

Code
#include <stdio.h>

#define N 9  // Board size for the 9-queens problem

// Function to check if a queen can be placed at board[row][col]
int isSafe(int board[N], int row, int col) {
    int i;
    for (i = 0; i < col; i++) {
        if (board[i] == row || 
            board[i] - i == row - col || 
            board[i] + i == row + col) 
            return 0;  // Not safe
    }
    return 1;  // Safe position
}

// Function in `f(x) = f(x - 1)` form
int solveQueens(int board[N], int col) {
    if (col == 0) return 1;  // Base case: when col reaches 0, return success

    int prev = solveQueens(board, col - 1);  // Recursive call f(x) = f(x - 1)
    
    if (prev == 0) return 0;  // If previous step failed, return failure

    int row;
    for (row = 0; row < N; row++) {
        if (isSafe(board, row, col - 1)) {  // Check previous column
            board[col - 1] = row;
            return 1;  // Solution found
        }
    }

    return 0;  // No solution found, return failure
}

// Main function to initialize the board and start solving
void main() {
    int board[N] = {0};  // Initialize board with all zeroes
    solveQueens(board, N);  // Start recursion from column N (reverse order)
}

Last edited by TipmyPip; 01/30/25 05:02.
The War of Shifting Fronts [Re: TipmyPip] #488565
01/30/25 05:31
01/30/25 05:31
Joined: Sep 2017
Posts: 164
TipmyPip Online OP
Member
TipmyPip  Online OP
Member

Joined: Sep 2017
Posts: 164
[Linked Image]

The War

The War of Shifting Fronts

Prologue: The Endless Struggle

The war had no beginning. It had no end. It simply was.
Two great factions—the Legion of the Everlasting Bastion and the Phantom Raiders—had waged battle for so long that neither side remembered why they fought. The battlefield itself was fluid, ever-changing, as though it had a will of its own.
The Everlasting Bastion built fortresses, great walls that stood firm against any siege. Their architects were masterful, each new fortress stronger than the last, each barricade more impenetrable than the one before it.
The Phantom Raiders were not an army in the traditional sense. They were shadows, warriors who struck where the walls were weakest, never lingering, always adapting. No matter how high the Bastion built, no matter how thick their defenses, the Raiders always found a way through.

The war was not one of brute force. It was something more intricate, something that felt less like battle and more like… a pattern.
And patterns, once formed, are nearly impossible to break.

Chapter One: The General’s Burden

High General Oren stood atop the walls of Fortress Aegis, surveying the endless landscape of trenches, towers, and barricades. The engineers below worked ceaselessly, reinforcing weak points, expanding the fortifications, ensuring that this stronghold would be the one to hold.

But deep down, Oren knew the truth.
They had said the same thing about Fortress Halcyon before it fell. And Citadel Varn before that. And Bastion Ironhold before that.
No matter how strong they built, the Raiders always adapted.
He turned to Master Architect Lysara, the woman whose mind had designed countless fortresses, each one an evolution of the last. Lysara had long since abandoned the arrogance of thinking any fortress could last forever.
“The Raiders are learning faster,” Oren said.

Lysara nodded. “It’s as if they already know how to dismantle what we haven’t even finished building.”
That statement chilled Oren.
If that were true, then this war was not just a battle of walls and sieges. It was something far more insidious.
He gazed beyond the walls, into the distant haze where the Phantom Raiders gathered. He imagined them not as an enemy force, but as something more abstract—an echo of their own decisions, coming back to haunt them.
And then the thought occurred to him.
What if our own fortresses are creating the very weaknesses they exploit?

Chapter Two: The Phantom’s Shadow

Far from the great walls of Aegis, Phantom Lord Kael stood with his commanders, studying the latest defenses erected by the Everlasting Bastion.
At a glance, the fortress was impenetrable.

But Kael knew better.
The Raiders never attacked blindly. They didn’t storm walls with brute force. They didn’t waste men in pointless battles. They studied, they adapted, they exploited. They did not simply react to fortifications; they used them.
His second-in-command, Shadowmaster Veylen, spoke softly.
“The weak point will emerge soon. It always does.”
Kael turned his gaze to the towering walls and traced an unseen path in his mind.
He had done this before.
Find the opening. Strike. Move on.
But something about this battle felt different. He had spent years leading the Phantom Raiders, and yet… he had never asked himself a simple question.
Why do the weak points always emerge?
His forces never broke through the same way twice. Every siege was different. Every assault required new tactics. Yet, no matter how much the Bastion evolved…
The flaws were always there.
And then the thought occurred to him.
What if the Bastion isn’t just reacting to us? What if it’s shaping itself around our attacks?
The realization shook him. It was as if the battlefield was alive, responding to each decision before it was even made.
And if that were true…Then neither side was actually in control.

Chapter Three: The Fractured Reality

Both generals, on opposite sides of the war, reached the same conclusion.
The battle was no longer about fortresses and invasions.
It was something deeper. Something neither side fully understood.
Oren and Lysara devised an unconventional plan.
“What if we don’t reinforce our defenses?” Oren asked.
Lysara’s brow furrowed. “That’s suicide.”
“Not if we’re right,” Oren said. “If the Phantom Raiders only attack because we strengthen our walls, then what happens if we stop strengthening them?”
Lysara’s breath caught. The idea was madness. But what if it worked?
Meanwhile, Kael and Veylen devised their own experiment.

“What if we stop attacking weak points?” Kael asked.
Veylen’s eyes narrowed. “That’s the only way we win.”

“Is it?” Kael asked. “What if attacking is what’s causing new weaknesses to appear? What if, instead of exploiting the gaps, we reinforce them?”
The Raiders had always been shadows. But this time, Kael issued an order that no Raider had ever given.
“Hold your ground.”

And at the same moment, Oren issued an order that no Bastion had ever given.
“Stand down.”

The battlefield froze.
For the first time in history… nothing happened.
No walls were built.
No attacks were launched.

It was as if reality itself hesitated—as if the very existence of the war had depended on the cycle continuing.
And then…Something began to collapse.

Chapter Four: The Breaking of the Cycle

As the fortresses stood untouched and the Raiders remained motionless, the landscape itself began to unravel.
The ground beneath them fractured. The sky above them wavered, as if it had only ever been a painted canvas stretched too thin.
Oren and Kael both watched in horror as the truth revealed itself.
This was not a war.

It was a pattern.
One that had been running for so long that neither side had realized they were trapped inside it.
The Bastion had never truly been building fortresses—they had been shaping the conditions for their own downfall.
The Raiders had never truly been finding weaknesses—they had been carving them into existence with every assault.
It had never been about strength or strategy.
It had been about the structure of the war itself.

And now that they had both stepped outside of it—refused to play their roles—the entire system was breaking apart.

Epilogue: The Silent Observer

As the last walls crumbled and the last shadows vanished, something watched from beyond the battlefield.
Something outside the war, something that had never taken part in the endless cycle but had been watching all along.
Something waiting.
Because if a pattern collapses, it does not simply end—it creates the conditions for something new to begin.
And just before the battlefield dissolved completely, both generals heard the same whispered voice in their minds:

“Now the true war begins.”

Last edited by TipmyPip; 01/30/25 06:38.
Re: Zorro Trader GPT [Re: TipmyPip] #488566
01/30/25 07:12
01/30/25 07:12
Joined: Aug 2018
Posts: 101
O
OptimusPrime Offline
Member
OptimusPrime  Offline
Member
O

Joined: Aug 2018
Posts: 101
I love it! I dabbled in chess programming for several years and created a private aggressive port of Stockfish for my own use. (I called it Primordial.) Very nice! These interests definitely help when it comes to coding for wealth accumulation. I am using ZorroGPT right now and I absolutely love it.


Thanks so much,

OptimusPrime

Hard Question [Re: OptimusPrime] #488567
01/30/25 07:37
01/30/25 07:37
Joined: Sep 2017
Posts: 164
TipmyPip Online OP
Member
TipmyPip  Online OP
Member

Joined: Sep 2017
Posts: 164
Do you see something in the story, you didn't see before? it seems always if you look deeper, you might find that your solution doesn't seem to be noticed because we all are limited by the light we create to see. But it is not the only solution that hides within your own creation, but I suppose because you love it, it would be quite a struggle to see why two parts are actually related to our common interests.

Last edited by TipmyPip; 01/30/25 07:41.
Re: Hard Question [Re: TipmyPip] #488568
01/30/25 10:43
01/30/25 10:43
Joined: Jul 2000
Posts: 28,022
Frankfurt
jcl Offline

Chief Engineer
jcl  Offline

Chief Engineer

Joined: Jul 2000
Posts: 28,022
Frankfurt
I like it.

The War of Shifting Fronts (Part 2) [Re: TipmyPip] #488570
01/30/25 12:58
01/30/25 12:58
Joined: Sep 2017
Posts: 164
TipmyPip Online OP
Member
TipmyPip  Online OP
Member

Joined: Sep 2017
Posts: 164
[Linked Image]

Abstract War

Abstract Mathematical Construct
Let 𝒰 be an abstract process space, where elements undergo transformations based on evolving conditions.

1. Core Elements & Definitions
We define a mapping over 𝒰, denoted as:

𝒳:𝒰×𝑅×𝑁→𝒰

where each instance 𝒰ₙ consists of a triple:

𝒰𝑛=(𝒫𝑛,𝒮𝑛,𝒴𝑛)

where:

𝒫𝑛 ∈ ℝ represents a continuous scalar adjustment.
𝒮𝑛 ∈ ℕ represents a discrete state magnitude.
𝒴𝑛 ∈ 𝒰 represents an evolving reference structure.

2. Transformation Rule
A process 𝒜 applies adjustments to 𝒰, evolving it under a conditionally propagated mapping:

𝒳(𝒰𝑛,𝒫𝑛,𝒮𝑛)={ ∅, 𝒮𝑛 ≤0 otherwise (𝒫𝑛,𝒮𝑛,𝒴𝑛) }
This transformation continues under the presence of a binary condition.

3. Conditional Evolution
A transition function 𝒯 is introduced, acting within a probabilistic structure:

𝒰𝑛+1={ 𝒳(𝒰𝑛,𝒫𝑛−𝛿,⌊𝒮𝑛/2⌋) -> 𝑋𝑛=1 otherwise 𝒰𝑛 -> 𝑋𝑛=0 }
​
where:

𝒫𝑛 undergoes a gradual decrement by δ.
𝒮𝑛 undergoes quantized contraction.
𝑋𝑛 ∈ {0,1} is determined by an independent stochastic event.

4. Underlying Structure
The transformation 𝒯 ensures a structured evolution, yet never explicitly defines iteration or recursion.

∃ 𝑛 0∈𝑁, ∀ 𝑛>𝑛0, 𝑃(𝒰𝑛=∅)=1

This ensures that, over an extended progression, the transformation reaches a terminal state, albeit through non-deterministic yet structured steps.

Fundamental Definitions
Let 𝒵 be a class of structures evolving over successive transformative interactions, denoted as:

𝒵𝑛=(𝒫𝑛,𝒬𝑛,𝒵𝑛−1)

where:

𝒫𝑛 ∈ ℝ represents a principal scalar undergoing progressive adjustments.
𝒬𝑛 ∈ ℝ represents an external perturbation affecting state transitions.
𝒵𝑛{n-1} ∈ 𝒵 provides an implicit reference to prior evolutionary states.
A transformation 𝒮 governs the system, dynamically modifying 𝒫𝑛 under structured dependencies.

2. Evolutionary Process: Perturbation-Driven Adaptation
We define an adjustment operator 𝒯 acting over 𝒵, modifying the system based on a decaying propagative rule:

𝒯(𝒵𝑛,𝒫𝑛,𝒬𝑛)={𝒫𝑛 -> 𝑛=0 otherwise 𝒯(𝒵𝑛−1,𝒫𝑛−1,𝒬𝑛−1)+(Δ−𝒵𝑛𝜀)−𝒬𝑛 -> 𝑛>0 }

where:

𝒫𝑛 recursively inherits the prior state 𝒵𝑛{n-1}.
𝒬𝑛 is an external stochastic perturbation, influencing transitions.
Δ represents a structured bias introduced in every step.
𝜀 scales the internal transformation based on prior conditions.
This formulation inherently adapts based on preceding influences while adjusting dynamically due to probabilistic perturbations.

3. Probabilistic Interference Mechanism
A perturbation generator 𝒫𝒳 : ℝ → {0,1} defines interference based on an uncertain external process, akin to selective disruption mechanisms:

𝒬𝑛={ 𝜆,𝑃(𝑋𝑛=1)=𝑝0, otherwise 𝑃(𝑋𝑛=0)=1−𝑝 }

where:

𝒫𝒳 enforces an external intervention with probability p.
The scalar λ dictates the intensity of modification when intervention occurs.
The process introduces non-deterministic fluctuations influencing the evolution.

4. Emergent Behavior & Structured Adaptation
By applying repeated transformations, the structure of 𝒵 evolves in a way that balances prior adjustments while reacting to perturbative influences. The final form expresses a regulated adaptive process, where the outcome reflects both historical dependencies and external interactions.

For sufficiently large n, the process asymptotically stabilizes under:

𝑛
lim⁡ 𝒫𝑛= ∑ (Δ−𝜀𝒵𝑘−𝒬𝑘)
𝑛→∞ 𝑘=1

where the cumulative perturbations regulate the ultimate adjustment.

Who among the minds that wander sees war not as blood and steel,
But as a silent drift of shifting states, where choice and chance congeal?
Who discerns in walls that crumble the weight of forms unseen,
Where every strike, a measured shift, shapes fate's unwritten scheme?

Who shall trace, in veiled equations, the battlefield's silent code,
Where power bends, where fate unfolds, yet none escape the road?

don't try this
Code
(=!BA#9]7<"`z2Vxwv4Ts+Oqponm.-j*,)(/&%cba`_^] , (=!BA#9]7<"`z2Vxwv4Ts+Oqponm.-j*,)(/&%cba`_^]

because at run time they are completely different.

Last edited by TipmyPip; 01/30/25 14:00.
The War of Shifting Fronts (Part 3) [Re: TipmyPip] #488571
01/30/25 14:13
01/30/25 14:13
Joined: Sep 2017
Posts: 164
TipmyPip Online OP
Member
TipmyPip  Online OP
Member

Joined: Sep 2017
Posts: 164
[Linked Image]

It is hidden within the War

Code
import matplotlib.pyplot as plt
import networkx as nx

# Create a directed graph
G = nx.DiGraph()

# Recursive function to generate the war recursion tree with unique labels
def add_war_nodes(graph, parent, depth, max_depth):
    if depth > max_depth:
        return

    # Generate unique battle names
    left_battle = f"Battle-{depth}L"
    right_battle = f"Battle-{depth}R"

    # Add edges to represent battles branching from strategic decisions
    graph.add_edge(parent, left_battle)
    graph.add_edge(parent, right_battle)

    # Recursively create deeper battle strategies
    add_war_nodes(graph, left_battle, depth + 1, max_depth)
    add_war_nodes(graph, right_battle, depth + 1, max_depth)

# Root node representing the beginning of war decisions
G.add_node("War Begins")

# Generate recursive war strategy tree
add_war_nodes(G, "War Begins", 1, 5)  # Depth of recursion is 5

# Set up figure size
plt.figure(figsize=(14, 10))

# Recalculate positions for better label visibility
pos = nx.spring_layout(G, seed=42)  

# Draw nodes and edges without labels to prevent overlap
nx.draw(G, pos, with_labels=False, node_color="black", edge_color="gray", node_size=200)

# Draw labels separately in red with a white background for clarity
for node, (x, y) in pos.items():
    plt.text(x, y + 0.03, node, fontsize=9, ha='center', color="red", 
             bbox=dict(facecolor="white", edgecolor="none", boxstyle="round,pad=0.2"))

# Set the title of the plot
plt.title("The War of Shifting Fronts - Hidden Recursion Tree with Clear Labels")

# Show the final visualization
plt.show()


It would surprise you but every game is the ending of conditions for a probable game that inspires the rules to fall apart.
This code can create a highly complex adaptive strategy.

Code
// Structure representing a war decision node
typedef struct {
    char name[50];  // Fixed-size character array for name
    int strength;   // Hidden game-theoretic influence
    int risk;       // Determines adaptability to opposition
} BattleNode;

// Global statistics variables
int battleCount = 0;
var totalStrength = 0;
var totalRisk = 0;
int maxStrength = 0;
int minRisk = 100;  // Start with a high value to track lowest risk

// Function to generate a pseudo-random number in a range (Zorro's method)
int randomInt(int min, int max) {
    return min + (int)((max - min) * random()); 
}

// Recursive function to simulate war strategies with game-theoretic decisions
void simulateBattle(BattleNode* battle, int depth, int maxDepth) {
    if (depth > maxDepth) return;  // Base case: stop recursion

    // Generate random strategy values using Zorro's `random()` function
    battle->strength = randomInt(1, 100);
    battle->risk = randomInt(1, 100); // Ensuring no negative values

    // Update statistics
    battleCount++;
    totalStrength += battle->strength;
    totalRisk += battle->risk;
    
    if (battle->strength > maxStrength) maxStrength = battle->strength;
    if (battle->risk < minRisk) minRisk = battle->risk;

    // Debugging: Print battle details to log
    printf("\n[Battle %d] %s | Strength: %d | Risk: %d", 
           battleCount, battle->name, battle->strength, battle->risk);

    // Hidden recursive expansion influenced by game theory
    if (battle->strength > battle->risk) {
        battle->strength += randomInt(1, 50);
        battle->risk -= randomInt(1, 20);
    } else {
        battle->strength -= randomInt(1, 30);
        battle->risk += randomInt(1, 10);
    }

    // Ensure risk does not go negative
    if (battle->risk < 0) battle->risk = 0;

    // Recursively simulate further battles
    if (depth + 1 <= maxDepth) {
        BattleNode nextBattle;
        sprintf(nextBattle.name, "%s-%d", battle->name, depth);
        simulateBattle(&nextBattle, depth + 1, maxDepth);
    }
}

// Function to display final statistics
void displayStatistics() {
    printf("\n--- War Simulation Statistics ---");
    printf("\nTotal Battles Simulated: %d", battleCount);

    if (battleCount > 0) {
        printf("\nAverage Strength: %.2f", totalStrength / (var)battleCount);
        printf("\nAverage Risk: %.2f", totalRisk / (var)battleCount);
    }
    printf("\nMax Strength Encountered: %d", maxStrength);
    printf("\nMin Risk Encountered: %d", minRisk);
    printf("\n--------------------------------\n");
}

// Main function to trigger the war simulation in Zorro
void run() {  
    if (is(INITRUN)) {  // Run only once at the start
        BattleNode root;
        strcpy(root.name, "War Begins");
        simulateBattle(&root, 1, 5);  // Start recursive war simulation
        
        // Display statistics at the end
        displayStatistics();
    }
}


Here are some statistics to test :

Code
[Battle 4] War Begins-1-2-3 | Strength: 82 | Risk: -13
[Battle 5] War Begins-1-2-3-4 | Strength: 77 | Risk: 17
--- War Simulation Statistics ---
Total Battles Simulated: 5
Average Strength: -2.00
Average Risk: -1.80
Max Strength Encountered: 82
Min Risk Encountered: -79
--------------------------------


Last edited by TipmyPip; 01/31/25 12:18.
Re: Zorro Trader GPT [Re: TipmyPip] #488574
01/30/25 17:24
01/30/25 17:24
Joined: Sep 2017
Posts: 164
TipmyPip Online OP
Member
TipmyPip  Online OP
Member

Joined: Sep 2017
Posts: 164
[Linked Image]

The War Begins


Recursive Market Maker Algorithm in Algorithmic Trading

In an algorithmic trading system, we need to design a market maker algorithm that balances liquidity provision and adversarial trading risk. The algorithm must predict optimal bid/ask spreads while ensuring resilience to adverse selection.

We define:

A recursive graph-based order book model, where orders form a tree of limit order dependencies.
A recursive game-theoretic strategy, where the market maker competes against adversarial high-frequency traders (HFTs).
Each function depends on the other recursively, making an iterative solution nearly impossible.

Graph Theory Component: Recursive Order Book Graph
We define an order book as a directed graph, where:

Nodes represent limit orders.
Edges represent dependencies (e.g., order A at $100 depends on order B at $99).
The graph grows recursively, as new limit orders are placed based on past orders.

Function 1: Recursive Order Book Growth

Code
typedef struct Order {
    double price;
    int size;
    double spread;  
    int parentIndex;  
} Order;

void addOrder(Order* orderBook, int index, double price, int size, int totalOrders) {
    if (index >= totalOrders) return;  

    price = randomizePrice(price + random() * 5, VOLATILITY * (0.8 + random() * 0.6));  
    size = generateOrderSize(MAX_ORDER_SIZE);
    double spread = clamp(0.007 + random() * 0.025, 0.007, 0.04);  //  Spread cannot be negative

    orderBook[index].price = price;
    orderBook[index].size = size;
    orderBook[index].spread = spread;

    if (index > 0)
        orderBook[index].parentIndex = index - 1;  
    else
        orderBook[index].parentIndex = -1;  

    print(TO_LOG, " Order Added: Index %d | Price: %.4f | Size: %d | Spread: %.5f", index, price, size, spread);

    if (random() < 0.75 && index + 1 < totalOrders) {  
        int newSize = generateOrderSize(size * (0.8 + random() * 0.3));
        addOrder(orderBook, index + 1, price - 0.01 * randomInt(1, 12), newSize, totalOrders);
    }
}


Game Theory Component: Recursive Market Maker Pricing
Now, we must determine optimal bid/ask spreads to balance market efficiency and profitability.

A recursive game-theoretic function models how:

The market maker sets spreads recursively.
HFTs attack weak spreads by executing trades that maximize adverse selection.

Function 2: Recursive Bid/Ask Adjustment Based on Game Theory

Code
double calculateSpread(Order* orderBook, int index) {
    if (index < 0 || index >= MAX_ORDERS) return 0.007;  //  Prevent invalid indices

    double baseSpread = clamp(0.007 + random() * 0.02, 0.007, 0.04);  
    double hftPressure = ifelse(random() < 0.5, 0.002 + (random() - 0.5) * 0.005, 0.001);  
    double volatilityFactor = (random() - 0.5) * 0.03;  

    double spread = baseSpread + (0.025 - orderBook[index].price * 0.0003) - hftPressure + volatilityFactor;
    return clamp(spread, 0.007, 0.04);  
}


3. Entanglement Between Recursions
Now, the recursive order book structure directly affects the recursive pricing strategy:

More orders in the book → tighter spreads (more liquidity).
HFT attacks → wider spreads (risk mitigation).
Spreads impact future orders, creating a recursive feedback loop.
Final Algorithm: Recursive Market Maker Strategy.

Code
void addOrder(Order* orderBook, int index, double price, int size, int totalOrders) {
    if (index >= totalOrders) return;  

    price = randomizePrice(price + random() * 5, VOLATILITY * (0.8 + random() * 0.6));  
    size = generateOrderSize(MAX_ORDER_SIZE);
    double spread = clamp(0.007 + random() * 0.025, 0.007, 0.04);  //  Spread cannot be negative

    orderBook[index].price = price;
    orderBook[index].size = size;
    orderBook[index].spread = spread;

    if (index > 0)
        orderBook[index].parentIndex = index - 1;  
    else
        orderBook[index].parentIndex = -1;  

    print(TO_LOG, " Order Added: Index %d | Price: %.4f | Size: %d | Spread: %.5f", index, price, size, spread);

    if (random() < 0.75 && index + 1 < totalOrders) {  
        int newSize = generateOrderSize(size * (0.8 + random() * 0.3));
        addOrder(orderBook, index + 1, price - 0.01 * randomInt(1, 12), newSize, totalOrders);
    }
}


Here is a working code, but anyone who wants to improve the simulator, please do, share your suggestions please, thank you.

Code
#define MIN_ORDERS 5  
#define MAX_ORDERS 15  
#define MIN_ORDER_SIZE 5  
#define MAX_ORDER_SIZE 100  
#define VOLATILITY 0.09  //  Slightly increased to ensure better price fluctuations
#define MAX_DEPTH 10  

void initializeRandomSeed() {
    seed(random() * timer());
}

int randomInt(int min, int max) {
    return clamp(min + (int)((max - min + 1) * random()), min, max);
}

double clamp(double x, double min, double max) {
    return ifelse(x < min, min, ifelse(x > max, max, x));
}

//  Ensures Order Sizes Are Always in a Valid Range
int generateOrderSize(int maxSize) {
    double randFactor = random();
    int size = MIN_ORDER_SIZE + (int)((randFactor * randFactor * (maxSize - MIN_ORDER_SIZE)) * (0.9 + random() * 0.5));  
    return clamp(size, MIN_ORDER_SIZE, MAX_ORDER_SIZE);
}

//  Fully Randomized Order Prices Within a Safe Range
double randomizePrice(double baseValue, double volatility) {
    double direction = ifelse(random() < 0.5, -1, 1);  
    double priceChange = direction * (random() * volatility * 18 + random() * 12);  
    double newPrice = baseValue + priceChange;
    return clamp(newPrice, 50, 200);
}

typedef struct Order {
    double price;
    int size;
    double spread;  
    int parentIndex;  
} Order;

//  Ensures Safe Order Creation with Valid Values
void addOrder(Order* orderBook, int index, double price, int size, int totalOrders) {
    if (index >= totalOrders) return;  

    price = randomizePrice(price + random() * 5, VOLATILITY * (0.8 + random() * 0.6));  
    size = generateOrderSize(MAX_ORDER_SIZE);
    double spread = clamp(0.007 + random() * 0.025, 0.007, 0.04);  //  Spread cannot be negative

    orderBook[index].price = price;
    orderBook[index].size = size;
    orderBook[index].spread = spread;

    if (index > 0)
        orderBook[index].parentIndex = index - 1;  
    else
        orderBook[index].parentIndex = -1;  

    print(TO_LOG, " Order Added: Index %d | Price: %.4f | Size: %d | Spread: %.5f", index, price, size, spread);

    if (random() < 0.75 && index + 1 < totalOrders) {  
        int newSize = generateOrderSize(size * (0.8 + random() * 0.3));
        addOrder(orderBook, index + 1, price - 0.01 * randomInt(1, 12), newSize, totalOrders);
    }
}

//  Ensures Safe and Realistic Spread Calculation
double calculateSpread(Order* orderBook, int index) {
    if (index < 0 || index >= MAX_ORDERS) return 0.007;  //  Prevent invalid indices

    double baseSpread = clamp(0.007 + random() * 0.02, 0.007, 0.04);  
    double hftPressure = ifelse(random() < 0.5, 0.002 + (random() - 0.5) * 0.005, 0.001);  
    double volatilityFactor = (random() - 0.5) * 0.03;  

    double spread = baseSpread + (0.025 - orderBook[index].price * 0.0003) - hftPressure + volatilityFactor;
    return clamp(spread, 0.007, 0.04);  
}

//  Main Trading Simulation Function
void run() {
    set(LOGFILE);  
    Verbose = 2;

    initializeRandomSeed();

    int totalOrders = randomInt(MIN_ORDERS, MAX_ORDERS);  //  Ensures a dynamic number of orders

    static Order orderBook[MAX_ORDERS];  

    int i;
    for (i = 0; i < totalOrders; i++) {  
        orderBook[i].price = clamp(100.0 + random() * 8, 50, 200);  
        orderBook[i].size = generateOrderSize(MAX_ORDER_SIZE);  
        orderBook[i].spread = clamp(0.007 + random() * 0.02, 0.007, 0.04);  
        orderBook[i].parentIndex = -1;  
    }

    addOrder(orderBook, 0, 100.00 + random() * 6, generateOrderSize(85), totalOrders);

    vars SpreadSeries = series(0);
    vars PriceSeries = series(0);

    for (i = 1; i <= MAX_DEPTH; i++) { 
        int orderIndex = randomInt(0, totalOrders - 1);  //  Ensures valid index selection
        double spread = calculateSpread(orderBook, orderIndex);  
        SpreadSeries[0] = spread;
        PriceSeries[0] = orderBook[orderIndex].price;

        plotBar("Spreads", i, spread * 10000, 1, SUM + BARS, RED);
        plot("Price", PriceSeries[0], LINE, BLUE);
        plotBar("Order Sizes", i, orderBook[orderIndex].size, 1, SUM + BARS, GREEN);

        print(TO_LOG, " Depth %d: Order Index = %d | Spread = %.5f | Price = %.4f | Order Size = %d",
              i, orderIndex, spread, PriceSeries[0], orderBook[orderIndex].size);
    }
}

Last edited by TipmyPip; 01/31/25 14:38.
Risk Diversification in Portfolio Optimization [Re: TipmyPip] #488581
01/31/25 10:58
01/31/25 10:58
Joined: Sep 2017
Posts: 164
TipmyPip Online OP
Member
TipmyPip  Online OP
Member

Joined: Sep 2017
Posts: 164
Risk Diversification in Portfolio Optimization Using XOR-Based Asset Selection

In financial markets, we want to select two assets from a given portfolio that exhibit the highest volatility divergence while maintaining decorrelation to maximize risk-adjusted returns.

Mathematical Formulation
1. Portfolio Assets Representation Let 𝐴 be a set of assets in a portfolio:

𝐴={𝑎1,𝑎2,...,𝑎𝑛} Each asset 𝑎𝑖 is associated with a historical return vector 𝑅𝑖 , where: 𝑅𝑖 = (𝑟𝑖,1,𝑟𝑖,2,...,𝑟𝑖,𝑇) for 𝑇 time periods.

Each asset return sequence is represented in binary encoding (Bitwise representation of normalized returns), denoted by:

𝐵𝑖=𝑓(𝑅𝑖), 𝐵𝑖∈𝑍 2𝑇​ where 𝑓(𝑅𝑖) is a transformation that converts returns into binary sequences.

2. Objective: Find the Maximum XOR Pair We define the XOR distance metric between two asset return sequences:

XOR(𝐵𝑖,𝐵𝑗) = 𝐵𝑖⊕𝐵𝑗
​
where ⊕ represents the bitwise XOR operation.

The objective is to maximize the XOR value over all pairs (𝑖,𝑗):(𝑖∗,𝑗∗) = arg⁡max⁡ 𝑖,𝑗∈𝐴,𝑖≠𝑗

XOR(𝐵𝑖,𝐵𝑗)(i ∗ ,j ∗ )=arg max(i,j∈A,i≠j)​ such that: Corr(𝑅𝑖∗,𝑅𝑗∗)<𝜏

where Corr(𝑅𝑖,𝑅𝑗) is the correlation between asset return sequences, and 𝜏 is a pre-defined correlation threshold.

Computational Finance Insight
The higher the XOR value, the greater the return divergence between the two assets.
This ensures that choosing assets based on MaxXor selects assets that move in highly uncorrelated ways, which improves risk diversification.
The problem can be efficiently solved using a Trie-based MaxXor algorithm in
𝑂(𝑁) time instead of 𝑂(𝑁2) brute force.

Here is an example for the above problem. For all those who didn't have patience for my childish play to produce proper code for lite-c, I promise you my childish play is improving very fast :

Code
#define INT_BITS 32
#define MAX_ASSETS 1000  

typedef struct TrieNode {
    struct TrieNode* bit[2];
} TrieNode;

// Dynamically Allocate Trie Nodes
TrieNode* newTrieNode() {
    TrieNode* newNode = (TrieNode*)malloc(sizeof(TrieNode));  
    if (!newNode) {
        printf("\n Memory allocation failed in newTrieNode()");
        return NULL;
    }
    newNode->bit[0] = NULL;
    newNode->bit[1] = NULL;
    return newNode;
}

// Free Trie Memory After Use
void freeTrie(TrieNode* root) {
    if (!root) return;
    freeTrie(root->bit[0]);
    freeTrie(root->bit[1]);
    free(root);
}

// Find the Highest Bit Position in the Dataset
int findHighestBit(int* numbers, int size) {
    int highestBit = 0;
    int i;
    for (i = 0; i < size; i++) {
        int num = numbers[i];
        int bitPos = 0;
        while (num) {
            bitPos++;
            num >>= 1;
        }
        if (bitPos > highestBit) highestBit = bitPos;
    }
    return highestBit - 1;  // Only use as many bits as needed
}

// Insert a Number into the Trie
void insert(TrieNode* root, int number, int highestBit) {
    if (!root) {
        printf("\n Error: Root is NULL in insert()! Skipping...\n");
        return;
    }

    TrieNode* current = root;
    int i;
    for (i = highestBit; i >= 0; i--) {
        int bit = (number >> i) & 1;
        if (!current->bit[bit]) {
            current->bit[bit] = newTrieNode();
            if (!current->bit[bit]) {
                printf("\n Error: Trie Node Creation Failed at Bit %d! Skipping...\n", i);
                return;
            }
        }
        current = current->bit[bit];
    }
}

// Find the Maximum XOR for a Given Number
int findMaxXOR(TrieNode* root, int number, int highestBit) {
    TrieNode* current = root;
    int maxXOR = 0;
    int i;
    for (i = highestBit; i >= 0; i--) {
        int bit = (number >> i) & 1;
        if (current->bit[1 - bit]) {
            maxXOR |= (1 << i);
            current = current->bit[1 - bit];
        } else {
            current = current->bit[bit];
        }
    }
    return maxXOR;
}

// Brute Force XOR Calculation
void maxXorBruteForce(int* assetReturns, int size) {
    int maxXOR = 0, best1 = 0, best2 = 0;
    int i, j;
    var start = timer();  // Start Timing

    for (i = 0; i < size; i++) {
        for (j = i + 1; j < size; j++) {
            int currentXOR = assetReturns[i] ^ assetReturns[j];
            if (currentXOR > maxXOR) {
                maxXOR = currentXOR;
                best1 = assetReturns[i];
                best2 = assetReturns[j];
            }
        }
    }

    var execTime = (timer() - start) * 1000;  // End Timing
    printf("\n Brute Force XOR: (%d, %d) -> XOR: %d | Time: %.3f ms", best1, best2, maxXOR, execTime);
}

// Optimized Max XOR Function (Trie)
void maxXorOptimized(int* assetReturns, int size) {
    TrieNode* root = newTrieNode();
    if (!root) return;

    int highestBit = findHighestBit(assetReturns, size);
    insert(root, assetReturns[0], highestBit);

    int maxXOR = 0, best1 = 0, best2 = 0;
    int i;
    var start = timer();  // Start Timing

    for (i = 1; i < size; i++) {
        int currentXOR = findMaxXOR(root, assetReturns[i], highestBit);
        if (currentXOR > maxXOR) {
            maxXOR = currentXOR;
            best1 = assetReturns[i];
            best2 = best1 ^ maxXOR;
        }
        insert(root, assetReturns[i], highestBit);
    }

    var execTime = (timer() - start) * 1000;  // End Timing
    printf("\n Optimized Trie XOR: (%d, %d) -> XOR: %d | Time: %.3f ms", best1, best2, maxXOR, execTime);

    freeTrie(root);  // Free Memory
}

// Generate Proper Random Asset Returns
void generateRandomAssetReturns(var* assetReturns, int numAssets, int numBars) {
    int i, j;
    printf("\n Debugging Random Values Before Conversion:\n");

    for (i = 0; i < numAssets; i++) {
        vars RandomSeries = series(0);  // Create a series to maintain randomness
        
        printf("Asset %d: ", i + 1);
        
        for (j = 0; j < numBars; j++) {
            if (j == 0)
                RandomSeries[j] = random();  // First value is random
            else
                RandomSeries[j] = RandomSeries[j - 1] + random() - 0.5;  //  Follow series logic
            
            assetReturns[i * numBars + j] = RandomSeries[j];  //  Store random values
            printf("%.5f ", assetReturns[i * numBars + j]);  //  Print values for debugging
        }
        printf("\n");
    }
}

// Convert Asset Returns to Binary Representation
int convertReturnsToBinary(var* returns, int length) {
    int binaryValue = 0;
    int i;

    for (i = 0; i < length; i++) {
        if (returns[i] > 0.05) binaryValue |= (1 << i);  
        else if (returns[i] < -0.05) binaryValue |= (1 << (i + length));  
    }

    return binaryValue;
}

// Lite-C Main Function
function run() {
    if (is(INITRUN)) {
        int numAssets = 1000;
        int numBars = 5;
        int i;
        int assetBinaryReturns[MAX_ASSETS];
        var* assetReturns = (var*)malloc(numAssets * numBars * sizeof(var));

        if (!assetReturns) {
            printf("\n Memory Allocation Failed for assetReturns! Exiting...\n");
            return;
        }

        generateRandomAssetReturns(assetReturns, numAssets, numBars);

        printf("\n Debugging Binary Conversions:\n");

        for (i = 0; i < numAssets; i++) {
            assetBinaryReturns[i] = convertReturnsToBinary(&assetReturns[i * numBars], numBars);
            printf("Asset %d Binary: %d\n", i + 1, assetBinaryReturns[i]);  //  Print binary values
        }

        //  Compare Brute Force and Optimized Trie Method
        maxXorBruteForce(assetBinaryReturns, numAssets);
        maxXorOptimized(assetBinaryReturns, numAssets);

        free(assetReturns);
    }
}


For those of you who have any doubts about my progress please run the code and see the performance:
(I believe that even if you had any doubts about my abilities, your doubts were a blessing in disguise for my progress and contribution to you. Thank you.

Code
Asset 994 Binary: 775
Asset 995 Binary: 992
Asset 996 Binary: 992
Asset 997 Binary: 961
Asset 998 Binary: 961
Asset 999 Binary: 961
Asset 1000 Binary: 992
Brute Force XOR: (31, 992) -> XOR: 1023 | Time: 4095.300 ms
Optimized Trie XOR: (992, 31) -> XOR: 1023 | Time: 488.400 ms

The Language of Symbols [Re: TipmyPip] #488588
02/02/25 17:56
02/02/25 17:56
Joined: Sep 2017
Posts: 164
TipmyPip Online OP
Member
TipmyPip  Online OP
Member

Joined: Sep 2017
Posts: 164
[Linked Image]

The Paradox of Uncertainty: A Symbolic Conundrum

In a realm where every asset is an abstract entity, denote each asset by 𝐴ᵢ, where 𝑖 ∈ {1, 2, …, 𝑁}. Each asset 𝐴ᵢ possesses an intrinsic risk quantified by a parameter σᵢ². Rather than being mere numbers, these σᵢ² values embody the very essence of uncertainty, capturing the variability inherent in each asset’s behavior.

The challenge is to allocate a finite resource among these assets using weights 𝑤ᵢ. Intuitively, assets with lower uncertainty should be favored. One natural (yet abstract) idea is to consider the “attractiveness” of an asset as inversely related to its risk—that is, proportional to 1⁄σᵢ². In a simplified view, one might define the allocation by

  𝑤ᵢ = (1⁄σᵢ²) ⁄ ∑ⱼ (1⁄σⱼ²).

Here, ∑ⱼ (1⁄σⱼ²) represents the aggregated “stability” measure across all assets, ensuring that the 𝑤ᵢ sum to unity.

However, in our abstract universe the σᵢ² are not static. They fluctuate over time and may be influenced by interdependencies among assets. Let δᵢⱼ denote the interdependence (or abstract correlation) between assets 𝐴ᵢ and 𝐴ⱼ. These δᵢⱼ may alter the natural hierarchy suggested by the simple inversion 1⁄σᵢ², introducing nonlinearities or “dissonances” that complicate the picture.

Your challenge is as follows:

Conceptual Reflection:
Without resorting to concrete numerical examples, discuss the theoretical rationale for favoring assets with lower σᵢ² in your allocation. How does the inversion 1⁄σᵢ² serve as a symbolic beacon of stability in a world of uncertainty?

Dynamic Interdependencies:
Reflect on the implications of introducing δᵢⱼ into the mix. In what abstract ways might these interdependencies (δᵢⱼ ≠ 0) challenge the naïve allocation suggested by the pure inversion? Consider how these relationships could be integrated into the symbolic framework without losing the simplicity of the initial idea.

Towards an Adaptive Framework:
Propose a conceptual framework—or a set of guiding principles—that reconciles the symbols σᵢ², 1⁄σᵢ², 𝑤ᵢ, and δᵢⱼ. How might the interplay between individual asset uncertainty (σᵢ²), its inversion, and the overall aggregation (∑) be modified to create an allocation strategy that is both resilient and adaptive in a dynamic, interdependent environment?

Philosophical and Computational Contemplation:
Finally, ponder the trade-offs between the simplicity of the symbolic inversion and the complexity introduced by evolving risk interdependencies. How does this abstract approach help—or hinder—our understanding of risk management in a modern financial setting?

Your answer should weave these symbols and abstract concepts into a coherent narrative, demonstrating deep insight into both the theoretical and computational aspects of resource allocation under uncertainty.

Here we go :

Reflections on the Paradox of Uncertainty
In our abstract universe of assets, where each entity 𝐴ᵢ is imbued with its own mysterious risk measure, σᵢ², we find ourselves confronted with a challenge that is as much philosophical as it is computational. The proposition of allocating a finite resource via weights 𝑤ᵢ—where 𝑤ᵢ is ideally proportional to 1⁄σᵢ²—serves as a metaphor for the eternal struggle between order and chaos, stability and volatility. Yet, as we peer deeper into this symbolic landscape, we quickly discover that the seemingly simple inversion of risk is beset by twists and nuances that complicate the matter beyond mere arithmetic.

1. The Lure of the Inversion: A Beacon of Stability
At first glance, the notion that an asset’s attractiveness should be proportional to 1⁄σᵢ² is both elegant and seductive. In our abstract setting, each σᵢ² is more than a mere statistic—it represents the inherent uncertainty of asset 𝐴ᵢ. A low σᵢ² signals that 𝐴ᵢ is relatively stable, a quiet harbor in the storm of market fluctuations. When we take the reciprocal, 1⁄σᵢ², we transform this quiet stability into a luminous beacon, suggesting that the less erratic the asset, the more capital it deserves. The very act of inversion is symbolic: it turns the traditional fear of risk on its head, proclaiming that risk, when low, becomes a virtue to be rewarded.

In our symbolic formula, 𝑤ᵢ = (1⁄σᵢ²) / ∑ⱼ (1⁄σⱼ²), the denominator—∑ⱼ (1⁄σⱼ²)—acts as an aggregator, a cosmic ledger that tallies the “stability credits” of all assets in the universe. Here, every asset’s inverted risk is pooled, and each weight 𝑤ᵢ is essentially a fraction of this total stability. In this sense, the inversion functions as a kind of alchemy: converting uncertainty into a measure of desirability. This transformation is our first brush with the abstract—a mechanism that, on the surface, appears to neatly order the chaotic fabric of financial markets.

2. The Dynamic Dance: When Uncertainty Pulsates
Yet, as we delve deeper, the simplicity of the inversion is threatened by the restless nature of uncertainty. The σᵢ² values, far from being immutable, pulse and evolve over time. They are not isolated monoliths but are intertwined with the ebb and flow of market forces. In this dynamic environment, the very stability signaled by a low σᵢ² today may be undermined by hidden currents tomorrow.

To complicate matters further, consider the notion of interdependence, symbolized by δᵢⱼ. These terms represent the subtle, often non-linear interactions between pairs of assets 𝐴ᵢ and 𝐴ⱼ. In our idealized allocation formula, we imagine that each asset’s risk is assessed in isolation. But in the real—and abstract—world, risks do not exist in a vacuum. The fortunes of one asset can be inexplicably linked to those of another; their uncertainties may coalesce, diverge, or even counterbalance one another.

Imagine that δᵢⱼ is not zero. Instead, each pair of assets is connected by invisible threads of correlation, anti-correlation, or even some exotic non-linear relationship that defies simple categorization. The existence of these δᵢⱼ values introduces a profound twist to our original intuition. Now, the straightforward inversion 1⁄σᵢ² might no longer be the best beacon of stability. For instance, an asset that appears stable in isolation (a low σᵢ²) might be entangled in a network of interdependencies that amplify its effective risk. In such a scenario, simply rewarding it with a high allocation could be akin to ignoring a hidden danger lurking beneath the surface.

Thus, the interplay between σᵢ² and δᵢⱼ forces us to confront a deeper question: How do we reconcile the raw, individual measure of uncertainty with the emergent, collective behavior that arises from interdependence? It is here that our mind must wander beyond the confines of a neat formula and embrace a more nuanced, adaptive perspective.

3. Toward an Adaptive Framework: Reconciling Symbols and Reality
In light of the evolving nature of σᵢ² and the confounding effects of δᵢⱼ, one might ask: Is there a way to refine our allocation strategy so that it remains resilient amid dynamic uncertainty? One theoretical perspective is to allow the allocation weights, 𝑤ᵢ, to be determined not by a static inversion, but by an adaptive mechanism that continuously updates in response to changing risk measures and interdependencies.

Imagine a framework where, rather than a single snapshot inversion, the allocation is derived from an iterative process. In this process, the “raw” inversion 1⁄σᵢ² serves as an initial guess—a first approximation of each asset’s attractiveness. Then, through an iterative refinement procedure, the interdependencies δᵢⱼ are gradually incorporated, adjusting the weights until a balanced equilibrium is reached. In this adaptive view, 𝑤ᵢ is not fixed; it is a function that evolves over time, responsive to both the inherent risk of each asset and the shifting tapestry of their relationships.

Consider a metaphor: In a crowded ballroom, each dancer (asset 𝐴ᵢ) has a unique rhythm (σᵢ²), and their movements are subtly influenced by the proximity and motions of others (δᵢⱼ). A static allocation might assign dance partners based solely on each dancer’s individual rhythm, but true harmony is achieved only when one accounts for the interplay between dancers. The adaptive mechanism is akin to an ongoing choreography—a dynamic balancing act where each dancer adjusts their steps in response to the group, leading to a harmonious performance.

This adaptive perspective does not offer a neat, closed-form solution; rather, it invites computational exploration. Techniques such as iterative optimization, simulation-based adjustments, or even heuristic learning methods can be employed to “fine-tune” the allocations. The idea is to allow the system to evolve, to learn from the interplay between individual risk measures and interdependencies, until it settles into an equilibrium that is robust to the turbulence of uncertainty.

4. Computational Reflections: Bridging Abstraction and Practice
In the realm of computational finance, the abstract ideas we have discussed must eventually be translated into algorithms that run on high-speed computers. The inversion 1⁄σᵢ² is computationally trivial, but when interdependencies δᵢⱼ enter the fray, the problem quickly becomes non-linear and high-dimensional. Computational strategies such as iterative optimization and Monte Carlo simulations provide one way forward. These techniques allow us to simulate many “what if” scenarios, thereby gaining insight into how the abstract symbols—σᵢ², 1⁄σᵢ², wᵢ, δᵢⱼ—interact over time.

Imagine an algorithm that periodically re-estimates the σᵢ² values from recent market data, recalculates the raw inversions, and then adjusts the weights using a feedback loop that accounts for measured correlations (δᵢⱼ) among assets. In each iteration, the system “learns” from the market’s latest behavior, nudging the allocations toward a state where the overall portfolio risk is minimized while maintaining a healthy diversity of exposure. Such a strategy is computationally intensive, yet it reflects the true complexity of the market—an environment in which risk is a moving target and interdependencies are the norm rather than the exception.

From a computational perspective, one might also consider heuristic approaches—algorithms that do not guarantee a global optimum but can find “good enough” solutions in a reasonable time frame. These heuristics may incorporate techniques from machine learning, such as reinforcement learning, where the algorithm is rewarded for achieving a balanced portfolio over time. The key is that the algorithm learns to interpret the abstract symbols in a way that is both adaptive and resilient, even if it cannot fully eliminate the inherent uncertainty.

5. Philosophical Musings: The Trade-Off Between Simplicity and Realism
At its core, the abstract inversion of risk—allocating resources in proportion to 1⁄σᵢ²—offers a powerful and elegant idea. It promises simplicity: a single, transparent rule that transforms raw uncertainty into a measure of desirability. Yet, as we have seen, this simplicity is illusory in a world where uncertainty is dynamic and interdependent. The introduction of δᵢⱼ shatters the neat ordering, forcing us to acknowledge that real markets are a tangled web of correlations, contagions, and non-linear effects.

This tension between simplicity and realism is at the heart of computational finance. On one hand, simple models provide clarity and computational efficiency; they are the first stepping stones in our journey to understand market behavior. On the other hand, the very complexity of financial markets demands that we move beyond these simplified models and embrace the full spectrum of uncertainty. The challenge is to balance these opposing forces—maintaining the elegance of symbolic abstraction while not oversimplifying the reality that those symbols represent.

In a twisted sense, the elegance of the inversion formula is both its strength and its weakness. Its beauty lies in its ability to distill the multifaceted nature of risk into a single reciprocal value. However, when confronted with the messy, dynamic interplay of real-world factors, this beauty becomes a starting point rather than an end. The real challenge is to build upon this elegant core with additional layers of adaptation and feedback—essentially, to let the symbolic inversion evolve into a living system that continuously learns and adjusts.

6. Synthesis: A Harmonious, Yet Uncertain, Vision
To summarize, the journey from the simple inversion 1⁄σᵢ² to an adaptive allocation strategy that incorporates interdependencies (δᵢⱼ) is a voyage from abstraction to complexity—a transformation that mirrors the evolution of markets themselves. In our symbolic universe, each asset 𝐴ᵢ, with its risk measure σᵢ², is not an island but a node in a vast network of uncertainty. The raw inversion of risk offers an initial, illuminating insight: lower volatility should command greater allocation. Yet, the presence of nonzero δᵢⱼ introduces a twist—a reminder that the interplay of market forces is inherently non-linear and that risk, once abstracted, may reveal hidden layers of complexity.

From a computational finance perspective, this twisted vision challenges us to design algorithms that are both simple in their core idea and sophisticated in their execution. It is not enough to merely compute 1⁄σᵢ² and normalize; one must also account for the evolving correlations among assets, adapt the allocations in real time, and embrace the inherent uncertainty that defies static modeling. The result is a dynamic, iterative process—a dance of numbers and symbols that seeks to reconcile the opposing forces of stability and volatility.

In our final reflection, we recognize that the true power of this symbolic framework lies not in providing definitive answers but in inspiring questions. How do we quantify uncertainty in a world that is perpetually in flux? How can we design allocation strategies that are both robust and agile, capable of withstanding the shocks of market turbulence while seizing fleeting opportunities? And, perhaps most intriguingly, can we ever capture the full complexity of financial markets within the elegant simplicity of a symbolic formula?

The answer, as in much of computational finance, is that we must always be prepared to revise our models, challenge our assumptions, and embrace the twists and turns of an unpredictable universe. The symbolic inversion of risk is a starting point—a beacon that illuminates the path forward, even as it reminds us that the journey is as important as the destination. ( It is a working code, Use it wisely, because it can change your present future... :-)

Code
#define MIN_ORDERS 5  
#define MAX_ORDERS 15  
#define MIN_ORDER_SIZE 5  
#define MAX_ORDER_SIZE 100  
#define BASE_VOLATILITY 0.09  
#define MAX_DEPTH 10  
#define MAX_RETURNS 10  //  Track the last 10 returns

double eurusd_returns[MAX_RETURNS];  //  Store past returns
int return_index = 0;                //  Index for updating returns

//  Function to Calculate Variance of EUR/USD Returns
double calculate_variance(double* returns, int n) {
    double mean = 0.0;
    int i;
    for (i = 0; i < n; i++) {  
        mean += returns[i];
    }
    mean /= n;
    
    double variance = 0.0;
    for (i = 0; i < n; i++) {  
        variance += pow(returns[i] - mean, 2);
    }
    return variance / n;
}

//  Compute Inverse-Variance Portfolio Weight
double compute_inverse_variance_weight(double variance) {
    return ifelse(variance > 0, 1.0 / variance, 1.0);  // Prevent division by zero
}

//  Initialize Random Seed
void initializeRandomSeed() {
    seed(random() * timer());
}

//  Generate Random Integer
int randomInt(int min, int max) {
    return clamp(min + (int)((max - min + 1) * random()), min, max);
}

//  Clamp Values to a Range
double clamp(double x, double min, double max) {
    return ifelse(x < min, min, ifelse(x > max, max, x));
}

//  Generate Order Size as a Random Value between MIN_ORDER_SIZE and MAX_ORDER_SIZE
int generateOrderSize(int maxSize) {
    return randomInt(MIN_ORDER_SIZE, maxSize);
}

//  Generate a Random Spread between 0.002 and 0.009
double updateSpread(double baseSpread, int orderIndex) {
    // Directly generate a random spread within the prescribed limits.
    double newSpread = 0.002 + random() * (0.009 - 0.002);
    return clamp(newSpread, 0.002, 0.009);
}

//  Struct for Order Book
typedef struct Order {
    double price;
    int size;
    double spread;  
    int parentIndex;  
} Order;

//  Ensure Unique Spread by Adjusting Until Different from Previous Order
void addOrder(Order* orderBook, int index, double price, int totalOrders) {
    if (index >= totalOrders)
        return;  

    price = price + (random() * 5);  
    int size = generateOrderSize(MAX_ORDER_SIZE);
    // Generate a random spread within 0.002 to 0.009.
    double spread = 0.002 + random() * (0.009 - 0.002);
    spread = clamp(spread, 0.002, 0.009);

    // If there's a previous order, regenerate the spread until it differs.
    if (index > 0) {
        while (spread == orderBook[index - 1].spread) {
            spread = 0.002 + random() * (0.009 - 0.002);
            spread = clamp(spread, 0.002, 0.009);
        }
    }

    orderBook[index].price = price;
    orderBook[index].size = size;
    orderBook[index].spread = spread;
    orderBook[index].parentIndex = ifelse(index > 0, index - 1, -1);

    print(TO_LOG, " Order Added: Index %d | Price: %.4f | Size: %d | Spread: %.5f", 
          index, price, size, spread);

    if (random() < 0.75 && index + 1 < totalOrders) {  
        addOrder(orderBook, index + 1, price - 0.01 * randomInt(1, 12), totalOrders);
    }
}

//  Update Returns for Variance Calculation (Simulating Market Returns)
void updateReturns(double new_return) {
    double randomFactor = 1 + (random() - 0.5) * 1.5;  // Introduce more randomness
    eurusd_returns[return_index] = new_return * randomFactor;  
    return_index = (return_index + 1) % MAX_RETURNS;
}

//  Smart Order Selection for Market Depth
int selectRandomOrder(int totalOrders) {
    int index = randomInt(0, totalOrders - 1);
    return ifelse(random() < 0.3 && index > 0, index - 1, index);
}

//  Main Trading Simulation with Updates to All Order Parameters
void run() {
    set(LOGFILE | PLOTNOW);  
    Verbose = 2;
    initializeRandomSeed();

    int totalOrders = randomInt(MIN_ORDERS, MAX_ORDERS);  
    static Order orderBook[MAX_ORDERS];  

    int i;
    // Initialize the order book with random values.
    for (i = 0; i < totalOrders; i++) {  
        orderBook[i].price = clamp(100.0 + random() * 8, 50, 200);  
        orderBook[i].size = generateOrderSize(MAX_ORDER_SIZE);  
        orderBook[i].spread = updateSpread(0.002 + random() * 0.007, i);  
        orderBook[i].parentIndex = -1;  
    }

    // Create a recursive series of orders.
    addOrder(orderBook, 0, 100.00 + random() * 6, totalOrders);

    vars SpreadSeries = series(0);
    vars PriceSeries = series(0);
    vars OrderSizeSeries = series(0);

    // Update loop: re-randomize price, size, and spread for a randomly selected order.
    for (i = 1; i <= MAX_DEPTH; i++) { 
        int orderIndex = selectRandomOrder(totalOrders);  

        // Update price and size randomly.
        orderBook[orderIndex].price = clamp(100.0 + random() * 8, 50, 200);
        orderBook[orderIndex].size = generateOrderSize(MAX_ORDER_SIZE);

        // Update spread while ensuring it is different from the previous spread.
        double newSpread = updateSpread(orderBook[orderIndex].spread, orderIndex);
        orderBook[orderIndex].spread = newSpread;  

        SpreadSeries[0] = newSpread;
        PriceSeries[0] = orderBook[orderIndex].price;
        OrderSizeSeries[0] = orderBook[orderIndex].size;

        plotBar("Spreads", i, newSpread * 10000, 1, SUM + BARS, RED);
        plot("Price", PriceSeries[0], LINE, BLUE);
        plotBar("Order Sizes", i, OrderSizeSeries[0], 1, SUM + BARS, GREEN);

        updateReturns((random() - 0.5) * 0.005);
    }
}

Last edited by TipmyPip; 02/02/25 17:58.
Portfolio Dynamically Allocates Capita [Re: TipmyPip] #488598
02/11/25 15:37
02/11/25 15:37
Joined: Sep 2017
Posts: 164
TipmyPip Online OP
Member
TipmyPip  Online OP
Member

Joined: Sep 2017
Posts: 164
Puzzle:
Your performance-based portfolio strategy dynamically allocates capital across 28 currency pairs based on their distance from a smoothed equity curve.

Question:
If the EUR/USD component consistently outperforms all other pairs, its distance metric (dist) remains positive and high, while USD/JPY struggles with a negative distance.

The strategy caps any single component’s weight at 0.3 (30% of total capital).
After several runs, the Total_Dist across all pairs is 200 and EUR/USD’s distance is 90.
Can you calculate the actual capital allocation for EUR/USD, given that the total capital is $100,000?

A working code :-)

Code
// AlgoVars for component-specific parameters
#define dist AlgoVar[0]
#define component_weight AlgoVar[1]

// Global variables accessible by all components
var Total_Dist = 0;
var Max_Weight = 0.3;

void updateDist()
{
    /* Calculate distance metric from equity curve */
    var old_dist = dist; // Store component's previous dist value
    vars EquityCurve = series(EquityLong + EquityShort); // Create component equity curve
    vars EquityFilt = series(LowPass(EquityCurve, 100)); // Create component filtered equity curve
    dist = (EquityCurve[0] - EquityFilt[0]) * PIP; // Calculate new dist value

    if (dist <= 0)
    {
        if (old_dist > 0) Total_Dist = Total_Dist - old_dist; 
    }
    else if (dist > 0)
    {
        if (old_dist <= 0) Total_Dist = Total_Dist + dist; 
        if (old_dist > 0) Total_Dist = Total_Dist - old_dist + dist; 
    }

    // Plots
    plot("Component_Eq", EquityCurve, NEW, BLUE);
    plot("Filtered_Eq", EquityFilt, 0, RED);
}

void componentWeight()
{
    if (dist <= 0) 
    {
        Lots = 0.01; // Set the lot size to 0.01
        Margin = 0.025 * Max_Weight * Capital;
        component_weight = 0; 
    }
    else if (dist > 0)
    {
        component_weight = ifelse(Total_Dist > 0, dist / Total_Dist, 0); // Prevent division by zero
        if (component_weight > Max_Weight) component_weight = Max_Weight; // Limit max weight
        Lots = 0.01; // Turn off phantom trading
        Margin = 0.025 * component_weight * Capital; // Set margin according to weight
    }

    // Plots
    plot("dist", dist, NEW | BARS, BLUE);
    plot("Total_Dist", Total_Dist, NEW, RED);
    plot("wgt", component_weight, NEW, BLACK);
}

void tradeRSI()
{
    TimeFrame = 4;
    vars PriceH4 = series(price());
    vars Filter = series(LowPass(PriceH4, 200));

    TimeFrame = 1;
    vars PriceH1 = series(price());
    vars rsi = series(RSI(PriceH1, 14));

    int overbought = optimize(70, 60, 90, 5);
    int oversold = optimize(30, 10, 40, 5);

    Stop = 4 * ATR(100);
    Trail = Stop;
    TakeProfit = optimize(4, 1, 12, 1) * ATR(100);

    if (crossOver(rsi, overbought) && PriceH1[0] < Filter[0] && NumOpenShort == 0)
    {
        enterShort();
    }
    if (crossUnder(rsi, oversold) && PriceH1[0] > Filter[0] && NumOpenLong == 0)
    {
        enterLong();
    }
}

void tradeDigi()
{
    vars Price = series(price());
    vars filter = series(Roof(Price, 50, 100));

    Stop = optimize(3, 1, 6, 0.5) * ATR(100);
    Trail = 0.5 * Stop;
    TrailLock = 10;
    TrailSpeed = 200;

    if (valley(filter)) 
    {
        MaxLong = 1;
        enterLong();
    }
    if (peak(filter))
    {
        MaxShort = 1;
        enterShort();
    }
}

function run()
{
    set(TESTNOW | PLOTNOW | PARAMETERS);
    StartDate = 20231231;
    EndDate = 2025;
    NumWFOCycles = 10;
    BarPeriod = 60;
    LookBack = 150;
    Capital = 1000;

    // Full Asset List
    string My_Assets[28]; 
    My_Assets[0] = "EUR/USD"; My_Assets[1] = "GBP/USD"; My_Assets[2] = "USD/JPY"; 
    My_Assets[3] = "USD/CHF"; My_Assets[4] = "USD/CAD"; My_Assets[5] = "AUD/USD"; 
    My_Assets[6] = "NZD/USD"; My_Assets[7] = "EUR/GBP"; My_Assets[8] = "EUR/JPY"; 
    My_Assets[9] = "EUR/CHF"; My_Assets[10] = "GBP/JPY"; My_Assets[11] = "GBP/CHF";
    My_Assets[12] = "AUD/JPY"; My_Assets[13] = "AUD/CHF"; My_Assets[14] = "NZD/JPY";
    My_Assets[15] = "NZD/CHF"; My_Assets[16] = "CAD/JPY"; My_Assets[17] = "CAD/CHF";
    My_Assets[18] = "CHF/JPY";
    My_Assets[19] = "EUR/AUD"; My_Assets[20] = "EUR/NZD"; My_Assets[21] = "EUR/CAD";
    My_Assets[22] = "GBP/AUD"; My_Assets[23] = "GBP/NZD"; My_Assets[24] = "GBP/CAD";
    My_Assets[25] = "AUD/NZD"; My_Assets[26] = "GBP/CHF"; My_Assets[27] ="NZD/CAD";

    string My_Algos[2]; 
    My_Algos[0] = "rsi"; 
    My_Algos[1] = "digi";

    // Update dist metric and Total_Dist for all components
    int i, j;
    for (i = 0; i < 28; i++) 
    {
        for (j = 0; j < 2; j++) 
        {
            asset(My_Assets[i]); 
            algo(My_Algos[j]);
            updateDist();
        }
    }

    // Update component weights and trade
    while (asset(loop(
        "EUR/USD", "GBP/USD", "USD/JPY", "USD/CHF", "USD/CAD", "AUD/USD", "NZD/USD",
        "EUR/GBP", "EUR/JPY", "EUR/CHF", "GBP/JPY", "GBP/CHF", "AUD/JPY", "AUD/CHF", "GBP/CHF", "NZD/CAD",
        "NZD/JPY", "NZD/CHF", "CAD/JPY", "CAD/CHF", "CHF/JPY",
        "EUR/AUD", "EUR/NZD", "EUR/CAD", "GBP/AUD", "GBP/NZD", "GBP/CAD", "AUD/NZD")))
    {
        while (algo(loop("rsi", "digi")))
        {
            componentWeight();
            if (Algo == "rsi") tradeRSI();
            else if (Algo == "digi") tradeDigi();
        }
    }

    PlotWidth = 600;
    PlotHeight1 = 400;
}


(It is quite hard to develop strategies with higher complexities, when documentation of parameters and flags are limited in how they are to be used. It would be quite an advantage to extend the manual of Zorro Trader to include detailed information for the use of parameters.)
While it is possible to use a dynamic threshold for a more sophisticated strategy :

Code
#define dist AlgoVar[0]
#define component_weight AlgoVar[1]  // Each pair-algo has its own weight stored in AlgoVar

var Total_Dist = 0;
var Max_Weight = 0.3;
var MLsignals[8];

#define condition1 MLsignals[0]
#define condition2 MLsignals[1]
#define condition3 MLsignals[2]
#define condition4 MLsignals[3]
#define condition5 MLsignals[4]
#define component_weight_signal MLsignals[5]
#define dynamicThreshold_RSI MLsignals[6]     
#define dynamicThreshold_Digi MLsignals[7]    

var totalWeight = 0;  // Global to store total weights

void updateDist() {
    vars EquityCurve = series(EquityLong + EquityShort);
    vars EquityFilt = series(LowPass(EquityCurve, 100));
    dist = (EquityCurve[0] - EquityFilt[0]) * PIP;

    vars rsiSeries = series(RSI(series(price()), 14));
    vars atrSeries = series(ATR(100));
    condition1 = rsiSeries[0];
    condition2 = atrSeries[0];
    condition3 = EquityCurve[0];
    condition4 = EquityFilt[0];
    condition5 = dist;
    component_weight_signal = component_weight;

    if (dist > 0) Total_Dist += dist;
}

void componentWeight() {
    if (dist <= 0) {
        component_weight = 0;
    } else {
        component_weight = ifelse(Total_Dist > 0, dist / Total_Dist, 0);
        if (component_weight > Max_Weight) component_weight = Max_Weight;

        var perceptronOutput = adviseLong(PERCEPTRON+RETURNS, 2, MLsignals, 8);  
        if (perceptronOutput > 0) {
            Margin = 0.025 * component_weight * Capital * (1 + perceptronOutput / 100);
        } else {
            Margin = 0.025 * component_weight * Capital * (1 + perceptronOutput / 200);
        }
    }
    totalWeight += component_weight;  // Accumulate total weight during the loop
    plot("dist", dist, NEW | BARS, BLUE);
    plot("wgt", component_weight, NEW, BLACK);
}

void tradeRSI() {
    vars PriceH4 = series(price());
    vars Filter = series(LowPass(PriceH4, 200));
    vars PriceH1 = series(price());
    vars rsi = series(RSI(PriceH1, 14));
    var Objective = priceClose(0) - priceClose(5);

    if (adviseLong(DTREE+RETURNS, Objective, MLsignals, 5) > dynamicThreshold_RSI && PriceH1[0] > Filter[0]) 
        enterLong();
    if (adviseShort(DTREE+RETURNS, Objective, MLsignals, 5) > dynamicThreshold_RSI && PriceH1[0] < Filter[0]) 
        enterShort();
}

void tradeDigi() {
    vars Price = series(price());
    vars filter = series(Roof(Price, 50, 100));
    var Objective = priceClose(0) - priceClose(5);

    if (valley(filter) && adviseLong(PATTERN+RETURNS, Objective, MLsignals, 5) > dynamicThreshold_Digi) 
        enterLong();
    if (peak(filter) && adviseShort(PATTERN+RETURNS, Objective, MLsignals, 5) > dynamicThreshold_Digi) 
        enterShort();
}

function run() {
    set(PARAMETERS | RULES | PLOTNOW | TESTNOW);  
    StartDate = 20231231;
    EndDate = 2025;
    NumWFOCycles = 10;
    BarPeriod = 60;
    LookBack = 150;
    Capital = 1000;

    while (asset(loop(
        "EUR/USD", "GBP/USD", "USD/JPY", "USD/CHF", "USD/CAD", "AUD/USD", "NZD/USD",
        "EUR/GBP", "EUR/JPY", "EUR/CHF", "GBP/JPY", "GBP/CHF", "AUD/JPY", "AUD/CHF", "GBP/CHF", "NZD/CAD",
        "NZD/JPY", "NZD/CHF", "CAD/JPY", "CAD/CHF", "CHF/JPY",
        "EUR/AUD", "EUR/NZD", "EUR/CAD", "GBP/AUD", "GBP/NZD", "GBP/CAD", "AUD/NZD")))
    {
         while (algo(loop("rsi","digi"))) {
            updateDist();
            componentWeight();
            if (Algo == "rsi") tradeRSI();
            else if (Algo == "digi") tradeDigi();
        }
    }

    // Normalize weights after all pairs and algos are processed
    while (asset(loop(
        "EUR/USD", "GBP/USD", "USD/JPY", "USD/CHF", "USD/CAD", "AUD/USD", "NZD/USD",
        "EUR/GBP", "EUR/JPY", "EUR/CHF", "GBP/JPY", "GBP/CHF", "AUD/JPY", "AUD/CHF", "GBP/CHF", "NZD/CAD",
        "NZD/JPY", "NZD/CHF", "CAD/JPY", "CAD/CHF", "CHF/JPY",
        "EUR/AUD", "EUR/NZD", "EUR/CAD", "GBP/AUD", "GBP/NZD", "GBP/CAD", "AUD/NZD")))
    {
         while (algo(loop("rsi","digi"))) {
            component_weight = component_weight / totalWeight;  // Normalize
            plot(strf("Weight_%s_%s", Asset, Algo), component_weight, NEW, RED);
        }
    }

    PlotWidth = 600;
    PlotHeight1 = 400;
}

Attached Files
Last edited by TipmyPip; 02/14/25 17:37.
Tale of the Five Guardians [Re: TipmyPip] #488607
02/14/25 06:58
02/14/25 06:58
Joined: Sep 2017
Posts: 164
TipmyPip Online OP
Member
TipmyPip  Online OP
Member

Joined: Sep 2017
Posts: 164
The Tale of the Five Guardians and the Perceptron Oracle

In the bustling metropolis of Zorropolis, where digital assets danced in the ever-changing winds of the market, a young trader named Ava sought the secret to consistent fortune. Many had tried and failed, lost in the noise of random market movements. But Ava was different. She believed in logic, precision, and—most importantly—machine learning.

Ava had heard whispers of the ancient Perceptron Oracle, a mystical force known for its unparalleled predictions. But to summon the Oracle, one needed to gather the wisdom of the Five Guardians of the Market. Each guardian held a vital piece of the market puzzle, and together, they could unlock the Oracle’s true potential.

The Guardians:

The Saucer Sage – A sharp-eyed seer who noticed when trends gently shifted gears, like a saucer smoothly turning.
The Zero Line Nomad – A wanderer who lived on the line between profit and loss, signaling when it was time to cross to the winning side.
The Valley Whisperer – A quiet guide who knew when the market had sunk to its lowest point, whispering, “Now is the time to rise.”
The Divergence Shaman – An enigmatic figure who could spot when prices and momentum drifted apart, revealing hidden opportunities.
The Trend Warden – A steadfast guardian who ensured that Ava only traded when the trend and momentum agreed, warning against false hopes.
Ava ventured through the digital plains, seeking each guardian and gathering their signals—like ancient runes carved into her MLsignals tablet. Each guardian offered a signal, but Ava knew that not all signals were created equal. She had to decide which wisdom mattered most.

But how?

She had heard tales of failed traders who tried to guess these weights manually, only to be consumed by the market’s volatility.

Ava needed the Oracle.

At the heart of Zorropolis, she placed the signals into the hands of the Perceptron Oracle, whispering, “Guide me.”

The Oracle, an ancient machine learning entity, processed the signals, calculating the perfect balance—assigning dynamic weights to each guardian's wisdom. With each passing trade, the Oracle learned, adapting to the ever-shifting market winds.

And so, Ava’s strategy was born:

Signals from the Five Guardians combined into a single weighted force.
The Perceptron Oracle dynamically adjusted these weights, ensuring Ava’s trades were always aligned with the market’s pulse.
Ava’s digital ship sailed smoothly through the market’s waves, entering trades when the Oracle foresaw profit, and retreating when danger loomed.
In the end, Ava didn’t just find fortune—she found balance, with a strategy as elegant as a puzzle perfectly solved.
(please keep in mind, that the sharing of the code is to inspire your mind, and find ideas to build probabilistic state machines which will turn your dreams into reality, But be aware, that the code will not produce for you millions. :-)

Code
var MLsignals[7];  // Additional signal for threshold learning

#define condition1 MLsignals[0]
#define condition2 MLsignals[1]
#define condition3 MLsignals[2]
#define condition4 MLsignals[3]
#define condition5 MLsignals[4]
#define threshold MLsignals[5]  // Dynamic threshold
#define finalOutput MLsignals[6]  // Final Perceptron output

var MLp[5];  // Individual Perceptron outputs

function run() 
{
    set(PARAMETERS|RULES);  // Enable parameter optimization, training, and rule generation

    StartDate = 20220101;          // Start date for training
    EndDate = 20250101;            // End date for training
    NumWFOCycles = 10;             // Walk-Forward Optimization cycles
    BarPeriod = 5;                // Bar timeframe in minutes
    LookBack = 200;                // Lookback period for indicators
    Capital = 1000;                // Initial capital

    vars MedianPrice = series((priceHigh() + priceLow()) / 2);
    vars AO = series(SMA(MedianPrice, 5) - SMA(MedianPrice, 34));  // Awesome Oscillator
    vars RSI = series(RSI(series(priceClose()), 14));              // RSI Indicator

    while (asset(loop(
        "EUR/USD", "GBP/USD", "USD/JPY", "USD/CHF", "USD/CAD", "AUD/USD", "NZD/USD",
        "EUR/GBP", "EUR/JPY", "EUR/CHF", "GBP/JPY", "GBP/CHF", "AUD/JPY", "AUD/CHF", "GBP/CHF", "NZD/CAD",
        "NZD/JPY", "NZD/CHF", "CAD/JPY", "CAD/CHF", "CHF/JPY", "EUR/AUD", "EUR/NZD",
        "EUR/CAD", "GBP/AUD", "GBP/NZD", "GBP/CAD", "AUD/NZD")))
    {
        // Define machine learning input conditions
        condition1 = ifelse(AO[2] < AO[1] && AO[1] < AO[0], 1, 0);
        condition2 = ifelse(crossOver(AO, 0), 1, 0);
        condition3 = ifelse(valley(AO), 1, 0);
        condition4 = ifelse(priceClose(1) > priceClose(2) && AO[1] < AO[2], 1, 0);
        condition5 = ifelse(AO[0] > 0 && RSI[0] > 50, 1, 0);

        // Train individual Perceptrons for each condition
        MLp[0] = adviseLong(PERCEPTRON+RETURNS, 0, &condition1, 1);
        MLp[1] = adviseLong(PERCEPTRON+RETURNS, 0, &condition2, 1);
        MLp[2] = adviseLong(PERCEPTRON+RETURNS, 0, &condition3, 1);
        MLp[3] = adviseLong(PERCEPTRON+RETURNS, 0, &condition4, 1);
        MLp[4] = adviseLong(PERCEPTRON+RETURNS, 0, &condition5, 1);

        // Train Perceptron to find the optimal threshold dynamically
        threshold = adviseLong(PERCEPTRON+RETURNS, 0, MLp, 5);  

        // Final Perceptron for the trading decision
        finalOutput = adviseLong(PERCEPTRON+RETURNS, 0, MLp, 5);

        // Trading logic with dynamically learned threshold
        if (finalOutput > threshold) {
            enterLong();
        } else if (finalOutput < -threshold) {
            enterShort();
        }
    }

    // Plot indicators and results
    plot("AO", AO, NEW, BLUE);
    plot("RSI", RSI, NEW, RED);
    plot("Threshold", threshold, NEW, GREEN);
    plot("FinalOutput", finalOutput, NEW, BLACK);
}

Last edited by TipmyPip; 02/20/25 23:45.
Multi-File Converting csv to .t6 [Re: TipmyPip] #488610
02/14/25 19:42
02/14/25 19:42
Joined: Sep 2017
Posts: 164
TipmyPip Online OP
Member
TipmyPip  Online OP
Member

Joined: Sep 2017
Posts: 164
Converting a list of files with high volume, to *.t6 files:

Code
#define NUM_PAIRS 28    // Number of currency pairs
#define NUM_TIMEFRAMES 2  // Number of timeframes to process

// Arrays to store the currency pairs and timeframes
string Pairs[NUM_PAIRS];
string Timeframes[NUM_TIMEFRAMES];

/*
 * Format string for parsing CSV data.
 * The '+' indicates the CSV file has a header line to skip.
 * %Y - Year (4 digits)
 * %m - Month (2 digits)
 * %d - Day (2 digits)
 * %H - Hour (2 digits, 24-hour format)
 * %M - Minute (2 digits)
 * %f3 - Open price (floating point with 3 decimals)
 * %f1 - High price (floating point with 1 decimal)
 * %f2 - Low price (floating point with 2 decimals)
 * %f4 - Close price (floating point with 4 decimals)
 * %f6 - Volume (floating point with 6 decimals)
 *
 * This must match the structure of your CSV files.
 */
string Format = "+%Y.%m.%d,%H:%M,%f3,%f1,%f2,%f4,%f6";

// Function to initialize currency pairs and timeframes
// Modify this to include/remove pairs or adjust timeframes as needed.
function initializePairsAndTimeframes() {
	// Currency pairs
	Pairs[0] = "EURUSD";
	Pairs[1] = "GBPUSD";
	Pairs[2] = "USDJPY";
	Pairs[3] = "USDCHF";
	Pairs[4] = "USDCAD";
	Pairs[5] = "AUDUSD";
	Pairs[6] = "NZDUSD";
	Pairs[7] = "EURGBP";
	Pairs[8] = "EURJPY";
	Pairs[9] = "EURCHF";
	Pairs[10] = "GBPJPY";
	Pairs[11] = "GBPCHF";
	Pairs[12] = "AUDJPY";
	Pairs[13] = "AUDCHF";
	Pairs[14] = "NZDCAD";
	Pairs[15] = "NZDJPY";
	Pairs[16] = "NZDCHF";
	Pairs[17] = "CADJPY";
	Pairs[18] = "CADCHF";
	Pairs[19] = "CHFJPY";
	Pairs[20] = "EURAUD";
	Pairs[21] = "EURNZD";
	Pairs[22] = "EURCAD";
	Pairs[23] = "GBPAUD";
	Pairs[24] = "GBPNZD";
	Pairs[25] = "GBPCAD";
	Pairs[26] = "AUDNZD";
	Pairs[27] = 0;  // End marker

	// Timeframes in minutes (e.g., 60 = 1 hour, 240 = 4 hours)
	Timeframes[0] = "60";
	Timeframes[1] = "240";
}

/*
 * Function to convert a CSV file to a .t6 file.
 * This version splits the CSV data by year and saves each year as a separate .t6 file.
 *
 * Parameters:
 * InName - The path and name of the input CSV file.
 * Pair - The currency pair string to include in the output filename.
 *
 * Outputs:
 * Files are saved as {CurrencyPair}_{Year}.t6, e.g., EURAUD_2025.t6
 */
function ConvertCSV(string InName, string Pair) {
	int Records = dataParse(1, Format, InName);  // Parse the CSV with the defined format
	printf("\n%d lines read from %s", Records, InName);  // Print the number of records read

	if(Records) {
		int i, Start = 0, Year, LastYear = 0;
		for(i = 0; i < Records; i++) {
			Year = ymd(dataVar(1, i, 0)) / 10000;  // Extract the year from the date
			if(!LastYear) LastYear = Year;  // Set the first encountered year

			// Handle the last record
			if(i == Records - 1) { 
				LastYear = Year; 
				Year = 0; 
				i++;
			}

			// When the year changes, save the data segment to a new .t6 file
			if(Year != LastYear) { 
				// Construct the output file name as {Pair}_{Year}.t6
				string OutName = strf("C:\\Users\\**username**\\Zorro\\History\\%s_%4i.t6", Pair, LastYear);
				printf("\nSaving file: %s", OutName);        
				dataSave(1, OutName, Start, i - Start);  // Save the data segment to .t6
				Start = i;  // Update the start index for the next segment
				LastYear = Year;  // Update the current year
			}
		}
	}
}

/*
 * Main function:
 * Loops through all specified currency pairs and timeframes,
 * checks for CSV files in the specified directory, and converts them to .t6 files.
 */
function main() {
	initializePairsAndTimeframes();  // Initialize pairs and timeframes
	int p, t;  // Loop counters for pairs and timeframes

	// Loop through each currency pair
	for(p = 0; Pairs[p]; p++) {
		// Loop through each timeframe
		for(t = 0; t < NUM_TIMEFRAMES; t++) {
			// Construct the CSV file path dynamically
			// Path: C:\Users\**username**\Zorro\History\{CurrencyPair}{Timeframe}.csv
			string FileName = strf("C:\\Users\\**user//name**\\Zorro\\History\\%s%s.csv", Pairs[p], Timeframes[t]);
			printf("\nChecking file: %s", FileName);  // Log the file being checked

			if(file_length(FileName)) {  // Check if the file exists
				printf("\nConverting %s...", FileName);  // Log the conversion process
				ConvertCSV(FileName, Pairs[p]);  // Call the conversion function with the pair name
			} else {
				printf("\nFile not found: %s", FileName);  // Log missing files
			}
		}
	}
	quit("Conversion done!");  // Exit the script when all files are processed
}

Last edited by TipmyPip; 02/14/25 21:05.
Re: Tale of the Five Guardians [Re: TipmyPip] #488616
02/19/25 01:25
02/19/25 01:25
Joined: Apr 2020
Posts: 10
Germany
M
M_D Offline
Newbie
M_D  Offline
Newbie
M

Joined: Apr 2020
Posts: 10
Germany
100% Kudos to this one ... enjoyed reading laugh
Awaiting more tales from Ava ...
... maybe next one will be in deepest darkness in DecisionTree Forest outside Zorropolis.
I heard rumours that The Trend Warden und The Valley Whisperer meet another Guardian ... not sure, his name sounded like The Time Bender.
Rumours say The Time Bender enables The Trend Warden and The Valley Whisperer to jump instantly through Timeframes, allowing them to monitor dimensionally trends and valleys ...

The Lost Computation of Zorropolis [Re: TipmyPip] #488617
02/19/25 03:24
02/19/25 03:24
Joined: Sep 2017
Posts: 164
TipmyPip Online OP
Member
TipmyPip  Online OP
Member

Joined: Sep 2017
Posts: 164
The Oracle’s Challenge: A Computational Puzzle
The Lost Computation of Zorropolis
The Perceptron Oracle, once the most advanced computational entity in Zorropolis, has stopped functioning. No one knows why.

Ava, the last known architect of its intelligence, has discovered fragments of a lost equation, scattered across the Oracle’s memory.

She has deduced that the Oracle’s intelligence was not a single function, but rather a system of interdependent computations, built upon:

✅ A state-driven structure, where information propagates through transitions.
✅ A multi-dimensional decision process, where actions affect future computations.
✅ A set of parallel functions, operating independently yet interacting dynamically.
✅ An evolving mathematical model, dependent on time and hidden governing laws.

The Oracle has given no output, because it cannot complete its missing computation.

Your mission is to restore the lost computation—to reconstruct the system so that it can once again process information and resolve to a stable final result.

🧩 The Mathematical Challenge
Your Lite-C program must correctly implement five interconnected computational components:

1️⃣ A Discrete State Evolution Model
The Oracle does not compute a single value. It moves through a structured state space, governed by a function:

𝑆𝑡+1=𝐹(𝑆𝑡,𝐴𝑡,𝑃𝑡)

Where:

𝑆𝑡 represents the state at time 𝑡.
𝐴𝑡 is an unknown action function that must be determined.
𝑃𝑡 is a transition probability function, dynamically computed.
𝐹 is the state evolution function, currently missing.

Your code must:
🔹 Define the possible states 𝑆.
🔹 Determine the transition conditions 𝑃𝑡.
🔹 Implement the missing function 𝐹 so that the Oracle moves correctly between states.

If 𝐹 is incorrect, the system will never stabilize.

2️⃣ An Action Selection Function At every iteration, the system must choose an action that affects its transition.

This action is determined by a hidden function:

𝐴𝑡=arg⁡max⁡ 𝑎∈𝐴𝑅(𝑎,𝑆𝑡)

Where:

𝐴 is the set of possible actions.
𝑅(𝑎,𝑆𝑡) is the reward function, currently unknown.
Your code must:
🔹 Compute 𝑅(𝑎,𝑆𝑡) dynamically—it is not predefined.
🔹 Ensure that 𝐴𝑡 is selected optimally at each step.
🔹 Allow the system to learn from previous decisions—it must improve over time.

3️⃣ A System of Parallel Computations The Oracle’s intelligence was once distributed across multiple computational streams running in parallel:

𝑂𝑖=𝐺𝑖(𝑋𝑖,𝑊𝑖)

Where:

𝑂𝑖 is the output of computation 𝑖.
𝐺𝑖 is an unknown transformation function that must be derived.
𝑋𝑖 represents incoming signals.
𝑊𝑖 represents a set of dynamically adjusted weights.

Your code must:
🔹 Implement at least five separate computational functions.
🔹 Ensure their outputs interact to influence the final state.
🔹 Design a system where 𝑊𝑖 adapts dynamically rather than remaining static.

If the parallel computations do not align correctly, the final result will never emerge.

4️⃣ A Continuous-Time Evolution Equation
The Oracle’s memory reveals traces of a missing differential equation, governing its internal transformations:

𝑑𝑃/𝑑𝑡=𝑘𝑃(1−𝑃)

Where:

𝑃 is a function representing an evolving parameter in the system.
𝑘 is a hidden variable affecting system growth and decay.
𝑡 represents the progression of the system over time.

Your code must:
🔹 Reconstruct the missing function 𝑃.
🔹 Determine the role of 𝑘 dynamically—it cannot be a static value.
🔹 Ensure that 𝑃 follows a logical trajectory toward a stable solution.

5️⃣ A Convergence Condition
The Oracle will only reactivate when its computations resolve to a finite stable value.

The system must find its final state, defined as:

lim 𝑂𝑡 = 𝐶
𝑡→∞

Where 𝐶 is an unknown numerical sequence that the Oracle is unable to compute without the missing framework.

If implemented correctly, your system will:
✅ Allow all computations to interact and evolve dynamically.
✅ Ensure the system transitions between states correctly.
✅ Solve the differential function guiding evolution.
✅ Reach a stable final state rather than looping indefinitely.

If implemented incorrectly, the system will:
❌ Loop endlessly without reaching a stable value.
❌ Terminate prematurely without solving the missing equation.
❌ Fail to compute the final output.

Your program must not directly assign a final value. The final output must emerge as a result of correct computation.

🚀 The Rules
1️⃣ No Hardcoded Solutions – The system must compute the final result, not assign it manually.
2️⃣ No Infinite Loops – The program must converge naturally, or the Oracle will remain broken.
3️⃣ No Fixed Outputs – The answer must emerge dynamically through computation.

🎯 Your Task
Reconstruct the missing computational framework so that the Oracle can once again complete its final computation.

The final output will appear only when:
✅ The system’s states evolve correctly.
✅ The decision-making process selects actions optimally.
✅ The parallel computations interact properly.
✅ The differential equation is solved dynamically.
✅ The system naturally converges to a final result.

Until then, the Oracle will remain silent.

The only way to solve this challenge is to build the correct system.

Are you ready?

🔥 Begin. 🔥

⏳ Time to Code
Write your Lite-C solution and restore the Oracle’s missing logic.

But remember:

The answer will not reveal itself through explanation.
The answer will only emerge through computation.

Good luck, coder. [video:youtube]https://youtu.be/Ea5b9DfeuqY[/video]

You are the Oracle’s last hope.

🔥 BEGIN. 🔥

Attached Files
Zorro01453.zip (49 downloads)
Last edited by TipmyPip; 02/19/25 03:41.
Graph-Enhanced Directional Trading (GEDT) [Re: TipmyPip] #488619
02/19/25 22:48
02/19/25 22:48
Joined: Sep 2017
Posts: 164
TipmyPip Online OP
Member
TipmyPip  Online OP
Member

Joined: Sep 2017
Posts: 164
Graph-Enhanced Directional Trading (GEDT) Strategy

In financial markets, traders seek to identify trends and directional strength to optimize entry and exit points. Traditional technical indicators such as ADX (Average Directional Index) and DI+/DI- (Directional Indicators) measure trend strength but often fail to capture hidden relationships between assets or sectors.

To address this, we define a financial market as a graph
𝐺=(𝑉,𝐸), where:

Each node 𝑖∈𝑉 represents a financial asset.
An edge (𝑖,𝑗)∈𝐸 exists if asset 𝑖 and asset 𝑗 share a significant correlation or dependency.
Each asset 𝑖 is associated with a feature vector 𝑋𝑖 consisting of ADX, DI+, and DI- values:

𝑋𝑖=[𝑥𝑖(ADX),𝑥𝑖(DI+),𝑥𝑖(DI-)]

Problem Statement:
Given a financial graph 𝐺=(𝑉,𝐸) and a set of node features 𝑋𝑖, how can we use information from neighboring assets to improve the predictive power of trend-following indicators and optimize trading decisions?

To solve this, we apply a Graph-Based Feature Aggregation:

𝑥~𝑖 feature = 1 / ∣𝑁(𝑖)∣ ∑ 𝑗 ∈ 𝑁(𝑖)𝑥𝑗 (featurex~)
ifeature​ = ∣N(i)∣1
​

j ∈ N(i) ∑​ x j feature
​

where 𝑁(𝑖) represents the set of neighboring assets of node 𝑖.

The goal is to use the aggregated signals:

𝑆=⋃𝑖=1𝑁𝑋𝑖​

as input for a Perceptron-based trading model that makes optimal trading decisions based on learned patterns in asset relationships.

Thus, we seek to:

Enhance trend-following indicators via graph-based aggregation.
Improve trading signal reliability by incorporating cross-asset dependencies.
Optimize trade execution using a supervised learning model.
This leads to the Graph-Enhanced Directional Trading (GEDT) Strategy, which dynamically updates market trend strength across a network of assets for improved trade decision-making.



Code
#define NUM_NODES 10
#define SELECTED_NODES 5  // Select only 5 nodes for DI+/DI-

var A[NUM_NODES][NUM_NODES];  // Adjacency matrix
var Signals[20];  // Max 20 elements for Perceptron

int selectedNodes[SELECTED_NODES] = {0, 2, 4, 6, 8};  // Selected nodes for DI+/DI-

void initialize_graph() {
    int i, j;

    // Manually define adjacency matrix
    A[0][1] = 1; A[0][4] = 1;
    A[1][0] = 1; A[1][2] = 1; A[1][5] = 1;
    A[2][1] = 1; A[2][3] = 1; A[2][6] = 1;
    A[3][2] = 1; A[3][4] = 1; A[3][7] = 1;
    A[4][0] = 1; A[4][3] = 1; A[4][8] = 1;
    A[5][1] = 1; A[5][6] = 1; A[5][9] = 1;
    A[6][2] = 1; A[6][5] = 1; A[6][7] = 1;
    A[7][3] = 1; A[7][6] = 1; A[7][8] = 1;
    A[8][4] = 1; A[8][7] = 1; A[8][9] = 1;
    A[9][5] = 1; A[9][8] = 1;
}

var aggregate_features(int node, vars FeatureSeries) {
    var sum = 0;
    int count = 0;
    int j;

    for (j = 0; j < NUM_NODES; j++) {
        if (A[node][j] == 1) {
            sum += FeatureSeries[j];
            count++;
        }
    }
    return ifelse(count > 0, sum / count, 0);
}

void run() {
    set(RULES | TESTNOW);

    if (is(INITRUN)) {
        initialize_graph();
    }

    vars ADX_Feature = series(ADX(14));
    vars DIPlus_Feature = series(PlusDI(14));
    vars DIMinus_Feature = series(MinusDI(14));

    vars Updated_ADX = series(0, NUM_NODES);
    vars Updated_DIPlus = series(0, NUM_NODES);
    vars Updated_DIMinus = series(0, NUM_NODES);

    int layer, i;
    for (layer = 0; layer < 2; layer++) {
        for (i = 0; i < NUM_NODES; i++) {
            Updated_ADX[i] = aggregate_features(i, ADX_Feature);
            Updated_DIPlus[i] = aggregate_features(i, DIPlus_Feature);
            Updated_DIMinus[i] = aggregate_features(i, DIMinus_Feature);
        }
        for (i = 0; i < NUM_NODES; i++) {
            ADX_Feature[i] = Updated_ADX[i];
            DIPlus_Feature[i] = Updated_DIPlus[i];
            DIMinus_Feature[i] = Updated_DIMinus[i];
        }
    }

    // Store ADX values from all 10 nodes
    for (i = 0; i < NUM_NODES; i++) {
        Signals[i] = ADX_Feature[i];
    }

    // Store DI+ and DI- from only SELECTED_NODES
    for (i = 0; i < SELECTED_NODES; i++) {
        Signals[NUM_NODES + i] = DIPlus_Feature[selectedNodes[i]];
        Signals[NUM_NODES + SELECTED_NODES + i] = DIMinus_Feature[selectedNodes[i]];
    }

    // Train Perceptron using only 20 elements
    if (adviseLong(PERCEPTRON, 0, Signals, 20) > 0)
        enterLong();
    if (adviseShort(PERCEPTRON, 0, Signals, 20) > 0)
        enterShort();
}

Attached Files
Last edited by TipmyPip; 02/19/25 22:54.
Optimal Execution Under Incomplete Information [Re: TipmyPip] #488620
02/20/25 09:56
02/20/25 09:56
Joined: Sep 2017
Posts: 164
TipmyPip Online OP
Member
TipmyPip  Online OP
Member

Joined: Sep 2017
Posts: 164
Strategy: Optimal Execution of EUR/USD Orders with Market Microstructure Models

Overview This strategy employs stochastic filtering and impulse control within a Markov-modulated limit order book (LOB). It is designed for high-frequency trading (HFT) and algorithmic execution, optimizing order placement under incomplete information about market liquidity.

We assume that the EUR/USD price evolution follows a Hawkes process, where order flows influence future price movements. Liquidity is treated as a hidden Markov process, which the strategy estimates in real-time.

1. Market Model Assumptions

Price Dynamics: The EUR/USD price follows an Ornstein-Uhlenbeck process with transient and permanent market impacts.
Liquidity Modeling: Hidden Markov-modulated liquidity states influence bid-ask spreads and market depth.
Market Impact: A concave power-law function describes execution price impact, limiting adverse price movement from large orders.
Execution Constraints: The strategy optimizes execution by deciding when and how much to trade, minimizing cost and slippage.

2. Strategy Components

2.1. Signal Generation (Regime Detection)

Use a Hidden Markov Model (HMM) to classify the current market regime:
Regime 1 (High Liquidity): Low market impact, high order book depth.
Regime 2 (Low Liquidity): High market impact, low order book depth.
Compute the posterior probability of the market being in Regime 1 using Bayesian inference.

2.2. Trade Execution (Impulse Control Optimization)

The strategy solves an optimal stopping problem:
If the probability of Regime 1 is high, execute a larger trade to take advantage of liquidity.
If the probability of Regime 2 increases, reduce trade size to minimize market impact.
Execution follows an adaptive liquidation model, using the Kushner-Stratonovich equations to update liquidity estimates.

3. Execution Algorithm

Step 1: Estimate Market Liquidity Using Filtering
Observe order arrivals and price changes.
Compute the expected liquidity state using a stochastic filter.
If liquidity is high (Regime 1), prepare to execute larger orders.

Step 2: Compute Optimal Trade Size
Define an inventory risk function:

𝐽(𝑡,𝑋𝑡) = 𝐸[∑𝑘𝐶(𝑆𝜏𝑘+𝐷𝜏𝑘,Δ𝑋𝜏𝑘)]

where:

𝑋𝑡 is inventory,
𝐶 is transaction cost,
𝑆𝜏𝑘 and 𝐷𝜏𝑘 are price components.

Solve for the optimal order size using a Bellman recursion:

𝑉(𝑡,𝑋𝑡)=max⁡Δ𝑋𝑡𝐸[𝐶(𝑆𝑡+𝐷𝑡,Δ𝑋𝑡)+𝑉(𝑡+1,𝑋𝑡−Δ𝑋𝑡)]

Step 3: Execute Trades Based on Liquidity Regime

Regime 1 (High Liquidity):
Execute larger trades (low price impact).
Use limit orders when spread is narrow.

Regime 2 (Low Liquidity):
Reduce order size to avoid price impact.
Use market orders only when necessary.

Step 4: Monitor Market Impact and Adjust Strategy
Compute the trade execution performance metric:
𝑃 slippage =Executed Price − Mid Price
​
Adjust trade sizes dynamically.



Code
#include <default.c>

#define LOOKBACK 5  
#define FEATURES 5  
#define X_MAX 100   

// Define Perceptron weights and biases manually
var PerceptronWeights[3] = {0.5, -0.3, 0.2};  
var PerceptronBias = 0.1;  

// Sigmoid activation function
var sigmoid(var x) {
    return 1.0 / (1.0 + exp(-x));
}

// Custom Perceptron function to replace `adviseLong()`
var perceptronPredict(vars Features, int size) {
    var weightedSum = PerceptronBias;
    int i;
    for (i = 0; i < size; i++) {  
        weightedSum += PerceptronWeights[i] * Features[i];
    }
    return sigmoid(weightedSum) * 100.0;  
}

// Compute dynamic liquidity probability
var computePiT() {
    vars atr_series = series(ATR(20), LOOKBACK);
    vars spread_series = series(Spread, LOOKBACK);
    vars price_series = series(priceClose(), LOOKBACK);
    vars stddev_series = series(0, LOOKBACK);

    stddev_series[0] = StdDev(price_series, 20);

    vars Features = series(0, 15);
    int i;
    for (i = 0; i < LOOKBACK; i++) { 
        Features[i] = atr_series[i] / priceClose();  
        Features[i + LOOKBACK] = stddev_series[i] / priceClose(); 
        Features[i + 2 * LOOKBACK] = spread_series[i] / 0.001;
    }

    return perceptronPredict(Features, 15) / 100.0;  
}

// Compute dynamic threshold for last 5 candles
void computeThresholdSeries(vars threshold_series) {
    vars atr_series = series(ATR(20), LOOKBACK);
    vars pi_series = series(computePiT(), LOOKBACK);

    int i;
    for (i = 0; i < LOOKBACK; i++) { 
        threshold_series[i] = 40 + (atr_series[i] * 100) - (pi_series[i] * 10);
        threshold_series[i] = clamp(threshold_series[i], 30, 70);
    }
}

function run() {
    set(PARAMETERS);  
    
	 StartDate = 20231231;
    EndDate = 2025;
    NumWFOCycles = 10;
    BarPeriod = 1;
    LookBack = 150;
    Capital = 1000;
	 
    vars X = series(priceClose());
    vars X_diff = series(priceClose(1) - priceClose());
    var pi_t = computePiT(); 

    vars threshold_series = series(0, LOOKBACK);
    computeThresholdSeries(threshold_series);

    vars DTREE_Features = series(0, FEATURES);
    int i;
    for (i = 0; i < LOOKBACK; i++) { 
        DTREE_Features[i] = threshold_series[i];
    }

    var trade_threshold = perceptronPredict(DTREE_Features, FEATURES);
    trade_threshold = clamp(trade_threshold, 30, 70);

    vars TradeFeatures = series(0, 3);
    TradeFeatures[0] = X[0];
    TradeFeatures[1] = X_diff[0];
    TradeFeatures[2] = pi_t;

    var long_prob = perceptronPredict(TradeFeatures, 3);
    var short_prob = perceptronPredict(TradeFeatures, 3) - 10;  

    int trade_flag = ifelse(long_prob > trade_threshold || short_prob > trade_threshold, 1, 0);
    var trade_size = ifelse(is(PARAMETERS), perceptronPredict(TradeFeatures, 3) / 100.0 * X_MAX, 10);
    string order_type = ifelse(long_prob > trade_threshold, "Market", "Limit");

    if (trade_flag) {
        if (long_prob > short_prob) {
            if (strstr(order_type, "Market")) {
                enterLong(trade_size);
            } else {
                enterLong(trade_size);
                Entry = priceClose() * 1.001;  
            }
        } else {
            if (strstr(order_type, "Market")) {
                enterShort(trade_size);
            } else {
                enterShort(trade_size);
                Entry = priceClose() * 0.999;  
            }
        }
    }

    printf("\nTrade Decision: %s", ifelse(trade_flag, "Execute", "Hold"));
    printf("\nTrade Size: %.2f units", trade_size);
    printf("\nOrder Type: %s", order_type);
    printf("\nPredicted Liquidity Probability (pi_t): %.2f", pi_t);
    printf("\nDynamic Threshold (DTREE Replaced): %.2f", trade_threshold);
}

Last edited by TipmyPip; 02/20/25 09:57.
Re: Zorro Trader GPT [Re: TipmyPip] #488622
02/20/25 23:12
02/20/25 23:12
Joined: Sep 2017
Posts: 164
TipmyPip Online OP
Member
TipmyPip  Online OP
Member

Joined: Sep 2017
Posts: 164
Challenges in Developing Machine Learning-Based Algorithmic Trading Systems

Introduction
The development of machine learning-driven trading systems presents a unique set of challenges, blending financial market analysis, algorithmic programming, and data-driven decision-making. While machine learning models promise improved decision-making and adaptability, the process of implementing them in a live trading environment is far from straightforward.

In this essay, we explore the key challenges that arise in projects like ours, based on our experience designing a feature-driven, self-learning trading system using a perceptron-based advisory model. These challenges fall into three broad categories:

Data Representation and Feature Engineering
Machine Learning Training and Model Adaptability
Financial Market Constraints and Changing Conditions
This essay aims to provide insights to traders, developers, and researchers who may face similar issues when developing algorithmic trading strategies.

1. Data Representation and Feature Engineering Issues

1.1 Importance of High-Quality Features

At the core of any machine learning model is feature engineering—the process of selecting and transforming raw market data into meaningful inputs for decision-making. The better the features, the better the model’s ability to predict market movements.

In our project, we incorporated features such as:

ATR (Average True Range) to measure volatility
Bid-Ask Spread to assess market liquidity
Standard Deviation of Closing Prices to capture price dispersion
Price Differences as a measure of momentum
However, several critical problems emerged in the data representation process:

Numerical Instability & Precision Loss

Some features frequently returned zero or negative values, which disrupted the machine learning model’s ability to generalize.
Rounding and scaling errors (e.g., improper use of fix0()) led to loss of precision, resulting in nearly identical feature values across different training cycles.
Feature Redundancy and Lack of Variability

Some computed features did not change significantly over time, leading to a situation where the model was effectively training on static data.
Without diverse and meaningful variations in input data, the machine learning model cannot detect new patterns.
Feature Misalignment with Market Structure

Some features were not well-suited to certain market regimes. For instance, ATR-based volatility measures became less useful during periods of extreme liquidity shifts.
Solution Strategies:

Introduce adaptive feature scaling to ensure values remain within an appropriate range.
Conduct feature importance analysis to identify which variables truly impact predictions.
Incorporate higher-frequency data (like order flow dynamics) to improve predictive power.

2. Challenges in Machine Learning Model Training

2.1 Model Learning Failure
A fundamental issue we encountered was that our machine learning models (Perceptron and Decision Tree) failed to adjust their decision boundaries over multiple training cycles.
This was evident in:

Consistently repeated model outputs after multiple cycles of training.
Static probabilities for long and short signals, suggesting the model was not learning new market behaviors.
Possible causes:

Training Data was Too Homogeneous

Without diverse market conditions in training data, the model struggled to learn different trading regimes.
Weights Not Updating Properly

If weight adjustments remain close to zero in every cycle, the model does not actually improve with each iteration.
Overuse of Fixed Normalization (fix0())

Removing decimal precision from input values likely weakened the depth of training, causing key information to be lost.
Solution Strategies:

Track weight updates after each training cycle to confirm learning is happening.
Introduce a rolling training window where only the most recent bars influence model updates.
Replace aggressive rounding functions (fix0) with normalized feature scaling that preserves market structure.

3. Financial Market Constraints and Changing Conditions

3.1 Market Regime Shifts
Financial markets are highly dynamic, meaning that patterns that existed during one period may become irrelevant later.
One of our biggest challenges was that the trading model performed consistently poorly after retraining, suggesting it was not adapting to new market conditions.

Key issues:

Market Volatility and Liquidity Changes
A model trained on low-volatility conditions may completely fail when a high-volatility regime emerges.
Lack of Order Flow Sensitivity
Our model did not include bid-ask imbalance data, which is critical for understanding short-term price movements.
Decision Threshold Anomalies
In multiple cases, our model produced trade thresholds of exactly zero, which resulted in no trading signals at all.
Solution Strategies:

Regime detection mechanisms that identify when the market has shifted and trigger adaptive model retraining.
Weighting recent price action more heavily in the learning process.
Enhancing feature sets with order book and volume-related indicators.

4. Debugging and Development Roadblocks
Beyond the technical issues in data and machine learning, real-world development also involves practical debugging difficulties:

Logging Issues: While we implemented logging functions, critical errors still required manual analysis of training output.
Error Propagation: A single feature issue (e.g., spread miscalculation) could cascade through the entire system, corrupting multiple layers of logic.
Cycle-Based Training Artifacts: Each new WFO (Walk Forward Optimization) cycle appeared to reset some learned information, introducing unexpected initialization problems.
How We Are Addressing These:

More granular debugging logs that track how each feature changes per training cycle.
Additional sanity checks on input data before passing it into the machine learning system.
Experimenting with incremental training updates instead of full retrains per cycle.
Conclusion
Developing machine learning-driven trading systems is a complex challenge that requires a multi-disciplinary approach across data science, financial modeling, and software engineering.

The key lessons learned from our project include:

Feature Engineering is the Most Critical Factor

Poorly designed features will lead to poor model performance, regardless of the sophistication of the machine learning algorithms.
Machine Learning Models Must Show Continuous Learning

If a model’s outputs are unchanging after multiple retrains, it is likely suffering from a lack of data diversity or improper weight updates.
Financial Markets Are Non-Stationary

Models that do not adapt to changing market conditions will become obsolete quickly.
For those embarking on similar projects, the key takeaway is that algorithmic trading development is an iterative process. No machine learning model will work perfectly out of the box, and extensive debugging, refinement, and real-world validation are necessary to build a robust and reliable system.

By addressing issues in feature selection, model learning dynamics, and real-world market adaptation, developers can improve their chances of creating an effective trading strategy that remains competitive in dynamic financial environments.
_______________________________________________________________________________________________________________________________________

Potential Problems in Machine Learning-Based Trading Strategies Using Perceptron Networks
Implementing a machine learning-driven trading strategy involves several potential pitfalls that can severely impact performance, especially when using Perceptron-based advisory models as seen in the given example. Below, I will walk through the major problems that may arise in this code and discuss how they could impact real-world trading. Tale of the Five Guardians

1. Data Quality and Indicator Calculation Issues

Problem 1: Feature Selection and Indicator Stability
The model relies on:

Awesome Oscillator (AO)
Relative Strength Index (RSI)
Basic price movements (Close Prices)
Potential Issue: Indicator Lag & False Signals
AO is a lagging indicator (based on a 5-period and 34-period SMA) and may not respond quickly to price changes.
RSI fluctuates around 50 and might not provide a strong enough signal on its own.
False crossovers or valleys: When using crossOver(AO, 0) or valley(AO), false signals may occur due to noise in the data.

Example Failure Case: False Crossover

Imagine AO crosses above zero due to a small market fluctuation, but the market immediately reverses.
The Perceptron treats this as a valid signal, leading to a bad trade.
Mitigation
Use volatility filters (e.g., ATR thresholds) to confirm signal strength.
Consider momentum confirmation rules.

2. Perceptron Learning and Weight Adaptation Issues

Problem 2: Perceptron Not Learning Properly

Each MLp[x] learns to recognize a specific condition in the market. However, since conditions are binary (0 or 1), the learning process may struggle due to:
Lack of meaningful variation: If most conditions stay at 0 (e.g., no crossover happens), the Perceptron doesn’t learn a useful pattern.
Bias toward non-trading: If the data is imbalanced, the model might default to always predicting no trade (finalOutput = 0).
Redundant learning: Since multiple Perceptrons are trained on similar conditions, the system might reinforce identical signals, reducing decision diversity.

Example Failure Case: Static Learning
If condition1 (AO trend) is mostly zero in historical data, the Perceptron may never learn an edge.
Over time, MLp[0] ≈ 0, meaning it contributes nothing to the final decision.

Mitigation
Regularly check Perceptron weight updates.
Introduce a fallback strategy for cases where MLp outputs remain static.

3. Dynamic Threshold Learning Issues
Problem 3: Threshold Convergence to Zero
The threshold (MLsignals[5]) is trained dynamically based on MLp outputs, but there are major risks:

If the Perceptron fails to distinguish good trades from noise, threshold will be too low, leading to random trades.
If the Perceptron learns incorrect correlations, threshold may converge to zero, making every price fluctuation trigger a trade.
Example Failure Case: Zero Threshold Anomaly

Code
Bar 1023: Threshold = 0.00001
Bar 1024: FinalOutput = 0.00002 → Triggers Long Entry
Bar 1025: FinalOutput = -0.00003 → Triggers Short Entry


This results in rapid overtrading and unnecessary losses.

Mitigation
Implement a minimum threshold value (e.g., clamp between 5 and 95).
Add a decay factor that prevents the threshold from over-adapting to noise.

4. Market Regime Sensitivity & Overfitting Risks
Problem 4: Overfitting to Specific Market Conditions
Since the model learns from historical patterns, it may:

Fail in new market conditions (e.g., high volatility events not seen in training).
Overreact to short-term anomalies rather than real trends.
Ignore macroeconomic changes (interest rate hikes, black swan events).
Example Failure Case: Overfitting to One Market Condition
Suppose the model trained on low-volatility data (e.g., 2019-2020 forex markets).
If a high-volatility event like COVID-19 news occurs, the learned patterns may break down.

Mitigation
Train with rolling windows rather than one long dataset.
Include market regime filters to adjust Perceptron weights dynamically.

5. Debugging and Visibility Issues

Problem 5: Lack of Visibility on Model Predictions
Without proper debugging logs, it’s difficult to know why a trade was placed.
If finalOutput changes unpredictably, it's unclear which MLp[x] contributed most.
Mitigation
Log raw outputs for each MLp signal.
Print threshold and final decision values at each step.
Final Thoughts
While machine learning offers great potential in trading, its implementation comes with several risks. The key takeaways from this example are:

Market indicators must be carefully chosen to avoid redundancy and noise.
Perceptron training should be monitored to ensure it learns useful patterns.
Threshold learning can break the system if it converges to 0 or extreme values.
Market regime shifts can destroy static models, requiring adaptive learning.
By identifying and mitigating these issues early, algorithmic traders can build more robust and reliable machine-learning trading strategies.

Last edited by TipmyPip; 02/20/25 23:14.
Markov Chain and Stochastic Asset Transitions [Re: TipmyPip] #488661
03/16/25 12:37
03/16/25 12:37
Joined: Sep 2017
Posts: 164
TipmyPip Online OP
Member
TipmyPip  Online OP
Member

Joined: Sep 2017
Posts: 164
Markov Chain and Stochastic Asset Transitions

A computational finance researcher is developing a Markov Chain-based stochastic model for predicting asset transitions in an algorithmic trading system. The trader's model tracks the evolution of asset-algorithm pairs over time and records state transitions in a matrix.

Abstract Problem Definition Let ???? be a finite set of traded assets and ???? be a finite set of trading algorithms.

Each market state ???? is uniquely determined by a pair (????,????) such that:

????(????????,????????)=???????????+????

where:

???? is the index of the asset
???????? from ????,???? is the index of the algorithm ???????? from ????,
?????? is the total number of algorithms.
At any given time ????, the system moves from one state to another, forming a discrete-time stochastic process represented by a transition probability matrix
????, where:

????????????=????(????????=??????????????????1=????????)

i.e., the probability of transitioning from state ???????? to state ????????.

The trader has recorded a total of ???? transitions over a period ???? and wishes to analyze various computationally intensive properties of the model.

Computational Challenge
On a given trading day, the trader observes the following facts:

A total of ????=105 transitions are recorded.
The most frequently visited state is ????(????????,????????), where:

???????? belongs to the top 5% of the most liquid assets.
???????? is the RSI-based strategy.
The highest probability transition is from ????(????????,????????)?????(????????,????????) with a transition frequency ????.Using this data, answer the following computationally intensive questions:

Questions:
1. Eigenvalues and Steady-State Distribution The trader wants to compute the long-term stationary distribution ???? of the Markov Chain, which satisfies:

????????=???? where ???? is the left eigenvector of ???? corresponding to eigenvalue 1.

Compute ???? numerically using an eigenvalue decomposition.

2. Transition Entropy and Predictability

Define the Shannon entropy of transitions as:

????=??????,???????????????? log ?????????????

where ???????????? is the normalized transition probability.

Compute ???? and interpret whether the transitions are highly predictable (low entropy) or random (high entropy).

3. Market Regime Clustering
Using the spectral properties of ????, perform clustering of market states using Laplacian eigenmaps:

????=????????? where ???? is the diagonal degree matrix.

Compute the first ???? eigenvectors of ???? and use k-means clustering to group market states into regimes.

4. Optimal Transition Path for Maximal Reward

Given a reward function ????(????????) that assigns a profit to each state, compute the optimal path through the Markov Chain that maximizes expected reward:

?????(????????)=max?????????(????(????????)+?????????(????????)????????????)

where ???? is the discount factor.

Solve for ????? using dynamic programming (Bellman recursion).

Bonus: Practical Implementation
Implement eigenvalue decomposition numerically to find steady-state probabilities.
Use Monte Carlo simulation to approximate transition entropy.
Apply Laplacian spectral clustering to identify market regimes.
Compute optimal trading state sequences using Markov Decision Processes (MDP).



Code
#ifndef MARKOVCHAINLIB_H
#define MARKOVCHAINLIB_H

#include <default.c> //  Ensures string functions are available

#define MAX_STATES 100

// Declare the Markov Chain transition matrix
int markovChain[MAX_STATES][MAX_STATES];

// Initialize the Markov Chain matrix with zeros manually
void initMarkovChain() {
    int i, j;
    for(i = 0; i < MAX_STATES; i++) {
        for(j = 0; j < MAX_STATES; j++) {
            markovChain[i][j] = 0;
        }
    }
}

//  Fixed: Use `strcmp()` or `==` for comparison
int findIndex(string* array, int size, string target) {
    int i;
    for (i = 0; i < size; i++) {
        if (array[i] == target) {  //  Simplified comparison
            return i; // Found, return index
        }
    }
    return -1; // Not found
}

// Function to get the index of an asset
int assetIdx(string asset) {
    static string assets[28];

    assets[0] = "EUR/USD";  assets[1] = "GBP/USD";  assets[2] = "USD/JPY"; 
    assets[3] = "USD/CHF";  assets[4] = "USD/CAD";  assets[5] = "AUD/USD"; 
    assets[6] = "NZD/USD";  assets[7] = "EUR/GBP";  assets[8] = "EUR/JPY"; 
    assets[9] = "EUR/CHF";  assets[10] = "GBP/JPY"; assets[11] = "GBP/CHF";
    assets[12] = "AUD/JPY"; assets[13] = "AUD/CHF"; assets[14] = "NZD/JPY";
    assets[15] = "NZD/CHF"; assets[16] = "CAD/JPY"; assets[17] = "CAD/CHF";
    assets[18] = "CHF/JPY"; assets[19] = "EUR/AUD"; assets[20] = "EUR/NZD"; 
    assets[21] = "EUR/CAD"; assets[22] = "GBP/AUD"; assets[23] = "GBP/NZD"; 
    assets[24] = "GBP/CAD"; assets[25] = "AUD/NZD"; assets[26] = "GBP/CHF"; 
    assets[27] = "NZD/CAD";

    return findIndex(assets, 28, asset);
}

// Function to get the index of an algorithm
int algoIdx(string algo) {
    static string algos[2]; 
    algos[0] = "rsi";
    algos[1] = "digi";

    return findIndex(algos, 2, algo);
}

// Function to compute the state ID based on asset and algorithm
int getStateID(string asset, string algo) {
    int aIdx = assetIdx(asset);
    int algoIdxValue = algoIdx(algo);
    
    if (aIdx == -1 || algoIdxValue == -1) {
        return -1; // Return invalid state if either index is not found
    }
    
    return aIdx * 2 + algoIdxValue; // Multiply by 2 because we have 2 algorithms
}

// Update the Markov Chain transition count
void updateMarkovChain(int prev, int next) {
    if (prev >= 0 && prev < MAX_STATES && next >= 0 && next < MAX_STATES) {
        markovChain[prev][next]++;
    }
}

#endif // MARKOVCHAINLIB_H




Code
#include "MarkovChainLib.h" // Include the Markov Chain logic

var MLsignals[8];
var Total_Dist = 0;
var Max_Weight = 0.3;
var totalWeight = 0;

#define dist AlgoVar[0]
#define component_weight AlgoVar[1]

void updateDist() {
    vars EquityCurve = series(EquityLong + EquityShort);
    vars EquityFilt = series(LowPass(EquityCurve, 100));
    dist = (EquityCurve[0] - EquityFilt[0]) * PIP;

    vars rsiSeries = series(RSI(series(price()), 14));
    vars atrSeries = series(ATR(100));
    MLsignals[0] = rsiSeries[0];
    MLsignals[1] = atrSeries[0];
    MLsignals[2] = EquityCurve[0];
    MLsignals[3] = EquityFilt[0];
    MLsignals[4] = dist;
    MLsignals[5] = component_weight;

    if (dist > 0) Total_Dist += dist;
}

void componentWeight() {
    if (dist <= 0) {
        component_weight = 0;
    } else {
        component_weight = ifelse(Total_Dist > 0, dist / Total_Dist, 0);
        if (component_weight > Max_Weight) component_weight = Max_Weight;

        var perceptronOutput = adviseLong(PERCEPTRON+RETURNS, 2, MLsignals, 8);
        Margin = 0.025 * component_weight * Capital * (1 + ifelse(perceptronOutput > 0, perceptronOutput / 100, perceptronOutput / 200));
    }
    totalWeight += component_weight;
}

//  Define the `tradeRSI()` function
void tradeRSI() {
    vars RSIs = series(RSI(series(price()), 14));

    if (crossOver(RSIs, 70)) {
        enterShort(); // RSI crosses above 70 ? Sell
    }
    else if (crossUnder(RSIs, 30)) {
        enterLong(); // RSI crosses below 30 ? Buy
    }
}

//  Define the `tradeDigi()` function
void tradeDigi() {
    if (price() > SMA(series(price()), 50)) {
        enterLong();  // If price is above SMA(50), enter a long trade
    } else {
        enterShort(); // Otherwise, enter a short trade
    }
}

void run() {
    set(PARAMETERS | RULES | PLOTNOW | TESTNOW);  
    StartDate = 20231231;
    EndDate = 2025;
    NumWFOCycles = 10;
    BarPeriod = 60;
    LookBack = 150;
    Capital = 1000;

    initMarkovChain();

    int prevState = -1;

    while (asset(loop(
        "EUR/USD", "GBP/USD", "USD/JPY", "USD/CHF", "USD/CAD", "AUD/USD", "NZD/USD",
        "EUR/GBP", "EUR/JPY", "EUR/CHF", "GBP/JPY", "GBP/CHF", "AUD/JPY", "AUD/CHF", "GBP/CHF", "NZD/CAD",
        "NZD/JPY", "NZD/CHF", "CAD/JPY", "CAD/CHF", "CHF/JPY",
        "EUR/AUD", "EUR/NZD", "EUR/CAD", "GBP/AUD", "GBP/NZD", "GBP/CAD", "AUD/NZD")))
    {
        while (algo(loop("rsi","digi"))) {
            updateDist();
            componentWeight();

            int currentStateID = getStateID(Asset, Algo);
            if (prevState != -1) updateMarkovChain(prevState, currentStateID);
            prevState = currentStateID;

            // FIXED: Replace `strxcmp()` with `strcmp()` or `==`
            if (Algo == "rsi") tradeRSI();
            else if (Algo == "digi") tradeDigi();
        }
    }

    // Normalize weights after all pairs and algos are processed
    while (algo(loop("rsi","digi"))) {
        component_weight /= totalWeight;
        plot(strf("Weight_%s_%s", Asset, Algo), component_weight, NEW, RED);
    }

    PlotWidth = 600;
    PlotHeight1 = 400;
}

Attached Files
Last edited by TipmyPip; 03/16/25 12:40.
VWAP Indicator for Zorro [Re: TipmyPip] #488679
03/26/25 23:22
03/26/25 23:22
Joined: Sep 2017
Posts: 164
TipmyPip Online OP
Member
TipmyPip  Online OP
Member

Joined: Sep 2017
Posts: 164
Here is a working VWAP indicator for Zorro.

Code
// === Helper Functions (must be above run) ===

var vwapPrice(int offset, int type)
{
	switch (type) {
		case 0: return priceClose(offset);
		case 1: return priceOpen(offset);
		case 2: return priceHigh(offset);
		case 3: return priceLow(offset);
		case 4: return (priceHigh(offset) + priceLow(offset)) / 2;
		case 5: return (priceHigh(offset) + priceLow(offset) + priceClose(offset)) / 3;
		case 6: return (priceHigh(offset) + priceLow(offset) + 2*priceClose(offset)) / 4;
		default: return priceClose(offset);
	}
}

var stdDevVWAP(int N, int offset, int type, var vwapValue)
{
	var s = 0;
	int i;
	for (i = 0; i < N; i++) {
		var p = vwapPrice(offset + i, type);
		s += pow(p - vwapValue, 2);
	}
	return sqrt(s / N);
}

var VWAP_Z(int N, int offset, int type)
{
	var sumPV = 0, sumVol = 0;
	int i;
	for (i = 0; i < N; i++) {
		var price = vwapPrice(offset + i, type);
		var volume = marketVol(offset + i);
		sumPV += price * volume;
		sumVol += volume;
	}
	return ifelse(sumVol > 0, sumPV / sumVol, 0);
}

// === Main Strategy/Indicator Function ===

function run()
{
	set(PLOTNOW);

	int N = 20;
	int type = 0; // 0 = PRICE_CLOSE
	var dev1 = 1, dev2 = 2, dev3 = 2.5;

	var vwapVal = VWAP_Z(N, 0, type);
	var stdDev = stdDevVWAP(N, 0, type, vwapVal);

	plot("VWAP", vwapVal, LINE, BLACK);

	plot("UpperBand1", vwapVal + dev1 * stdDev, LINE, RED);
	plot("LowerBand1", vwapVal - dev1 * stdDev, LINE, RED);

	plot("UpperBand2", vwapVal + dev2 * stdDev, LINE, GREEN);
	plot("LowerBand2", vwapVal - dev2 * stdDev, LINE, GREEN);

	plot("UpperBand3", vwapVal + dev3 * stdDev, LINE, BLUE);
	plot("LowerBand3", vwapVal - dev3 * stdDev, LINE, BLUE);
}

ZorroGPT [Re: TipmyPip] #488745
05/23/25 09:22
05/23/25 09:22
Joined: Sep 2017
Posts: 164
TipmyPip Online OP
Member
TipmyPip  Online OP
Member

Joined: Sep 2017
Posts: 164
New Version of ZorroGPT 2.66.1

https://bit.ly/3Hlfg8S

Code
function var ComplexIndicator()
{
    vars Price = series(price());
    vars FastMA = series(SMA(Price, 10));
    vars SlowMA = series(SMA(Price, 30));
    
    // Custom weighted oscillator
    var diff = (FastMA[0] - SlowMA[0]);
    var norm = (abs(FastMA[0]) + abs(SlowMA[0]))/2;
    var rawOsc = diff / (norm + 0.0001); // avoid div by 0
    
    // Non-linear scaling and signal enhancement
    var weight = pow(rawOsc, 3) * sin(4 * rawOsc); // enhanced non-linearity
    var smooth = EMA(series(weight), 14); // smooth the output

    return smooth[0];
}

Last edited by TipmyPip; 05/23/25 09:23.
Market Manipulation Index (MMI) for Zorro [Re: TipmyPip] #488816
07/09/25 13:41
07/09/25 13:41
Joined: Sep 2017
Posts: 164
TipmyPip Online OP
Member
TipmyPip  Online OP
Member

Joined: Sep 2017
Posts: 164
Market Manipulation Index (MMI): A Powerful Tool to Detect Market Irregularities
In today's highly automated markets, price movements often don't reflect pure supply and demand. Instead, they can be influenced by large players and algorithmic manipulation. The Market Manipulation Index (MMI) is a technical indicator designed to expose such behavior by quantifying market irregularity.

What Is MMI?

The MMI measures how predictable or structured a market’s behavior is, based on:

Sine wave deviation: Measures how well price movements align with cyclical patterns.

Predictability analysis: Uses linear regression to see how predictable prices are based on past data.

Spectral energy analysis (optional): Assesses noise vs. structure via smoothed price bands.

What Does MMI Tell You?

Low MMI (< 0.3): A clean, trending, or mean-reverting market — easier to trade.

High MMI (> 0.7): A noisy or manipulated market — harder to predict and trade reliably.

How Is MMI Calculated?

At its core, MMI uses:

Rolling volatility (variance)

EMA-smoothened error estimates

Deviation from sine waves and linear predictions

It normalizes results to a 0–1 scale, highlighting when the market departs from natural structures.

How To Use MMI

As a filter: Only take trades when MMI is low (e.g., < 0.3).

As a warning: Avoid entering trades during high MMI spikes.

Combined with VWAP: Use VWAP-based MMI to detect price distortions around fair value.

With other signals: Use MMI to confirm or reject breakout or reversal signals.

Practical Trading Tip

Pair MMI with volume or VWAP:

If price deviates strongly from VWAP and MMI is high, manipulation may be in play.

If price returns to VWAP and MMI drops, the market may stabilize — a good entry zone.

Available in Zorro Trader

This indicator can be implemented in lite-C for Zorro, fully compatible with live and backtesting. You can:

Visualize MMI over time

Trigger signals from MMI zones

Customize sensitivity via adaptive smoothing and windows

Conclusion

MMI is not just an indicator — it’s a market integrity scanner. When combined with volume, VWAP, and structure-aware strategies, it becomes a powerful filter to protect traders from erratic or manipulated conditions.

Use MMI to step away from noise — and trade only the moments that matter.

Code
var clamp(var value, var min, var max)
{
	if (value < min) return min;
	if (value > max) return max;
	return value;
}

var adaptiveWinLength()
{
	var atr_val = ATR(14);
	return max(10, round(50 * (atr_val / priceClose()) * 1.0));
}

var sineWave()
{
	return sin(2 * PI * Bar / 20); // SINE_LEN = 20
}

var spectralEnergy(int baseWin)
{
	static var* lowSeries;
	static var* highSeries;

	var lowBand = EMA(priceClose(), 34) - EMA(priceClose(), 89);
	var highBand = priceClose() - EMA(priceClose(), 8);

	lowSeries = series(lowBand);
	highSeries = series(highBand);

	var energyLow = Variance(lowSeries, baseWin);
	var energyHigh = Variance(highSeries, baseWin);

	var spectralRatio = energyHigh / (energyHigh + energyLow + 0.000001);
	return clamp(spectralRatio, 0, 1);
}

var predictabilityMI(int window)
{
	static var* priceSeries;
	priceSeries = series(priceClose());

	var x1 = priceClose(1);
	var x2 = priceClose(2);
	var y = priceClose();

	var sum_x1 = EMA(x1, window);
	var sum_x2 = EMA(x2, window);
	var sum_y = EMA(y, window);

	var sum_x1x1 = EMA(x1 * x1, window);
	var sum_x2x2 = EMA(x2 * x2, window);
	var sum_x1x2 = EMA(x1 * x2, window);
	var sum_x1y = EMA(x1 * y, window);
	var sum_x2y = EMA(x2 * y, window);

	var denom = sum_x1x1 * sum_x2x2 - sum_x1x2 * sum_x1x2;
	var a = ifelse(denom != 0, (sum_x2x2 * sum_x1y - sum_x1x2 * sum_x2y) / denom, 0);
	var b = ifelse(denom != 0, (sum_x1x1 * sum_x2y - sum_x1x2 * sum_x1y) / denom, 0);

	var y_hat = a * x1 + b * x2;
	var residual = y - y_hat;
	var mse_pred = EMA(pow(residual, 2), window);
	var var_price = Variance(priceSeries, 50);

	return clamp(mse_pred / var_price, 0, 1);
}

var sineMI(int window)
{
	static var* priceSeries;
	priceSeries = series(priceClose());

	var sine = sineWave();
	var price = priceClose();
	var sine_dev = sine - EMA(sine, window);
	var price_dev = price - EMA(price, window);
	var mse_sine = EMA(pow(price_dev - sine_dev, 2), window);
	var var_price = Variance(priceSeries, 50);

	return clamp(mse_sine / var_price, 0, 1);
}

// === Main Indicator Function ===

function run()
{
	set(PLOTNOW);

	if (Bar < 60) return;

	int win = adaptiveWinLength();

	var mi_sine = sineMI(win);
	var mi_pred = predictabilityMI(win);
	var mi_spec = spectralEnergy(50);

	var cmi_raw = (mi_sine + mi_pred + mi_spec) / 3;
	var cmi = EMA(cmi_raw, 5); // SMOOTH = 5

	plot("MMI", cmi, LINE, RED);
	plot("Low", 0.3, LINE, GREEN);
	plot("High", 0.7, LINE, ORANGE);
}

enhMMI indicator [Re: TipmyPip] #488828
07/20/25 06:53
07/20/25 06:53
Joined: Sep 2017
Posts: 164
TipmyPip Online OP
Member
TipmyPip  Online OP
Member

Joined: Sep 2017
Posts: 164
I believe we have arrived to a very significate waypoint in our contribution to Zorro Project, and we are about to make a massive step forward.

This machine learning–enhanced version of the MMI indicator improves it by making it adaptive, rather than using fixed smoothing and parameters.

Here’s what it adds:

Self-learning behavior

The indicator looks at its last 5 values and uses a small neural network (perceptron) to predict the next change in MMI.

It learns as the market evolves, adapting automatically without manual parameter tuning.

Dynamic responsiveness

If the ML model predicts stable, low-noise conditions, the indicator reduces smoothing so it reacts faster to market changes.

If the model predicts choppy or manipulated conditions, the indicator smooths itself more, filtering out noise.

Better visualization of market regimes

The MMI line now responds differently to quiet vs. volatile markets, making the high and low zones (0.3/0.7) more reliable as signals.

Code
// === Config ===
#define INPUT_SIZE 5       // Number of past MMI points as ML features
#define TRAIN_BARS 500     // Training period for ML model

// === Globals ===
int PeriodMax = 0;

// === Helper Functions ===
var clamp(var value, var minv, var maxv)
{
    if (value < minv) return minv;
    if (value > maxv) return maxv;
    return value;
}

int adaptiveWinLength()
{
    var p = priceClose();
    if (p == 0) return 20; // safety
    var atr_val = ATR(14);
    int w = round(50 * (atr_val / p));
    return max(10, w);
}

var sineWave()
{
    return sin(2*PI*Bar/20.); // SINE_LEN = 20
}

var spectralEnergy(int baseWin)
{
    static var* lowSeries;
    static var* highSeries;

    var lowBand  = EMA(priceClose(),34) - EMA(priceClose(),89);
    var highBand = priceClose() - EMA(priceClose(),8);

    lowSeries  = series(lowBand);
    highSeries = series(highBand);

    var eLow  = Variance(lowSeries, baseWin);
    var eHigh = Variance(highSeries, baseWin);

    var denom = eHigh + eLow + 1e-6;
    var spectralRatio = eHigh / denom;
    return clamp(spectralRatio,0,1);
}

var predictabilityMI(int window)
{
    static var* priceSeries;
    priceSeries = series(priceClose());

    var x1 = priceClose(1);
    var x2 = priceClose(2);
    var y  = priceClose();

    var s_x1   = EMA(x1, window);
    var s_x2   = EMA(x2, window);
    var s_y    = EMA(y,  window);

    var s_x1x1 = EMA(x1*x1, window);
    var s_x2x2 = EMA(x2*x2, window);
    var s_x1x2 = EMA(x1*x2, window);
    var s_x1y  = EMA(x1*y,  window);
    var s_x2y  = EMA(x2*y,  window);

    var denom = s_x1x1*s_x2x2 - s_x1x2*s_x1x2;
    var a = ifelse(denom != 0, (s_x2x2*s_x1y - s_x1x2*s_x2y)/denom, 0);
    var b = ifelse(denom != 0, (s_x1x1*s_x2y - s_x1x2*s_x1y)/denom, 0);

    var y_hat = a*x1 + b*x2;
    var residual  = y - y_hat;
    var mse_pred  = EMA(residual*residual, window);
    var var_price = Variance(priceSeries, 50);

    var ratio = ifelse(var_price > 0, mse_pred/var_price, 0);
    return clamp(ratio,0,1);
}

var sineMI(int window)
{
    static var* priceSeries;
    priceSeries = series(priceClose());

    var s       = sineWave();
    var price   = priceClose();
    var s_dev   = s     - EMA(s,     window);
    var p_dev   = price - EMA(price, window);
    var mse_sin = EMA((p_dev - s_dev)*(p_dev - s_dev), window);
    var var_pr  = Variance(priceSeries, 50);

    var ratio = ifelse(var_pr > 0, mse_sin/var_pr, 0);
    return clamp(ratio,0,1);
}

// === Main Indicator (Adaptive MMI) ===
function run()
{
    set(PLOTNOW);

    // Ensure enough history for all components and ML
    LookBack = max( max(90, 50), INPUT_SIZE + 6 ); // EMA(89) & Variance(50) & features depth
    BarPeriod = 60; // example; set to your actual bar size

    if (Bar < max(LookBack, TRAIN_BARS)) return;

    int win = adaptiveWinLength();

    // --- Base MMI Components ---
    var mi_sine = sineMI(win);
    var mi_pred = predictabilityMI(win);
    var mi_spec = spectralEnergy(50);
    var cmi_raw = (mi_sine + mi_pred + mi_spec)/3;

    // --- Store MMI history for ML ---
    static var* mmiSeries;
    mmiSeries = series(EMA(cmi_raw,5));

    // Make sure series depth is sufficient
    if (mmiSeries[INPUT_SIZE] == 0 && Bar < LookBack + INPUT_SIZE + 2) return;

    // --- Build ML Features (past INPUT_SIZE values) ---
    var features[INPUT_SIZE];
    int i;
    for(i=0; i<INPUT_SIZE; i++)
        features[i] = mmiSeries[i+1]; // strictly past values

    // --- Predict the next change in MMI using ML ---
    // Train once, then reuse model each bar (Retrain=0). You can switch to Retrain=1 for rolling retrain.
    var predicted_delta = adviseLong(TRAIN_BARS, 0, features, INPUT_SIZE);

    // --- Normalize and control adaptivity ---
    var norm_delta = clamp(predicted_delta, -1, 1);    // keep it bounded
    var adaptFactor = clamp(1 - fabs(norm_delta), 0.4, 0.9);

    // Integer, bounded smoothing period for EMA
    int adaptPeriod = (int)round(5./adaptFactor);
    adaptPeriod = max(2, min(50, adaptPeriod));        // guard rails

    // --- Compute the adaptive MMI (bounded 0-1) ---
    var adaptiveMMI = clamp(EMA(cmi_raw, adaptPeriod), 0, 1);

    // --- Plot the indicator ---
    plot("Adaptive MMI", adaptiveMMI, LINE, RED);
    plot("Predicted ?",  norm_delta, LINE, BLUE);
    plot("Low",  0.3, LINE, GREEN);
    plot("High", 0.7, LINE, ORANGE);
}

Last edited by TipmyPip; 08/08/25 19:28.
Re: enhanced MMI [Re: TipmyPip] #488850
08/06/25 17:15
08/06/25 17:15
Joined: Jan 2017
Posts: 15
Israel
D
dBc Offline
Newbie
dBc  Offline
Newbie
D

Joined: Jan 2017
Posts: 15
Israel
Thank you for sheering this code.

I tried to run it with one of mine assets price series, and the MMI indicator oscillates between 0-1.
When I run the previous posted MMI (not adaptive) the MMI value is continues.

Can you please provide one asset name, you run the enhanced MMI?

Many thanks

Attached Files enhMMI_5100151.png
Last edited by dBc; 08/06/25 17:22.
Re: enhanced MMI [Re: dBc] #488853
08/08/25 18:56
08/08/25 18:56
Joined: Sep 2017
Posts: 164
TipmyPip Online OP
Member
TipmyPip  Online OP
Member

Joined: Sep 2017
Posts: 164
You should try the new one... But still I have improved it a bit, but you will need to experiment with different assets.

Last edited by TipmyPip; 08/08/25 19:30.
multi-timeframe “Market Mode Index” [Re: TipmyPip] #488854
08/09/25 01:33
08/09/25 01:33
Joined: Sep 2017
Posts: 164
TipmyPip Online OP
Member
TipmyPip  Online OP
Member

Joined: Sep 2017
Posts: 164
multi-timeframe “Market Mode Index” that blends a fast and a slow version of the same measure, then adapts it dynamically using machine learning.

Let’s break it down step-by-step:

1. Core Idea: Market Mode Index (MMI)
MMI here is a composite metric built from three sub-indicators:

sineMI() – Compares the price’s deviation from a pure sine wave.

Measures cyclicity — whether the market is behaving like a smooth oscillation.

predictabilityMI() – Uses a simple linear regression on lagged prices.

Measures linear predictability — can future price be explained by the past two bars?

spectralEnergy() – Compares “low-band” vs. “high-band” volatility.

Measures energy distribution — is the market dominated by slow or fast components?

These are averaged and normalized between 0 and 1, where:

Low MMI (~0.3 or less) ? Market is more trending or predictable.

High MMI (~0.7 or more) ? Market is more noisy or mean-reverting.

2. Two Timeframes You calculate MMI in:

Fast timeframe ? Base BarPeriod (e.g., 60 min bars).

Slow timeframe ? SLOW_FACTOR * BarPeriod (e.g., 4h bars if SLOW_FACTOR=4).

This produces:

mmiFast = Short-term market mode.

mmiSlow = Longer-term market mode (re-projected to the base TF so they align).

3. Machine Learning Forecast
The script builds a feature vector from past values of both MMI series:

First 5 values from mmiFast (lagged 1..5 bars)

First 5 values from mmiSlow (lagged 1..5 bars)

That 10-dimensional vector goes into:

adviseLong(TRAIN_BARS, 0, features, MAX_FEATURES)
This uses Zorro’s built-in machine learning (default perceptron) to predict ? (change) in MMI from this combined history.

4. Adaptive Blending
The predicted ? (direction & magnitude) controls:

How much weight to give mmiFast vs. mmiSlow in the final blend.

How fast the blended MMI is smoothed (adaptive EMA period).

This way:

If the ML thinks fast MMI is stable ? more weight on fast component.

If ML thinks change is coming ? more weight on slow component, slower smoothing.

5. Outputs It plots:

Adaptive MMI (red) ? The blended, ML-weighted index.

Fast MMI (blue)

Slow MMI (black, projected to base TF)

Pred ? (purple) ? Normalized ML forecast of MMI change.

Low (green line at 0.3) ? Often a trend-friendly zone.

High (orange line at 0.7) ? Often a range/noise zone.

How to Use It
Traders often interpret MMI like this:

Below low threshold (~0.3) ? Favors trend-following systems.

Above high threshold (~0.7) ? Favors mean-reversion systems.

Between thresholds ? No clear bias.

Here, you have an adaptive, multi-TF version that tries to smooth noise and anticipate regime changes rather than reacting only to raw MMI.

Code
// === Config ===
#define INPUT_SIZE     5       // past points per TF
#define TRAIN_BARS     500
#define SLOW_FACTOR    4       // slow TF = SLOW_FACTOR * BarPeriod
#define MAX_FEATURES   10      // 2 * INPUT_SIZE
#define LONGEST_EMA    89      // longest fixed EMA length in code

// === Safety Helpers ===
int safeWin(int requested)
{
    return min(requested, max(2, Bar - 1)); // clamp to available bars
}

var clamp(var value, var min, var max)
{
    if (value < min) return min;
    if (value > max) return max;
    return value;
}

// === Adaptive Window ===
var adaptiveWinLength()
{
    static var* atrSeries;
    atrSeries = series(ATR(14));
    var atr_val = atrSeries[0];
    return max(10, round(50 * (atr_val / priceClose()) * 1.0));
}

// === Indicators ===
var sineWave()
{
    return sin(2 * PI * Bar / 20); // SINE_LEN = 20
}

var spectralEnergy(int baseWin)
{
    static var* pClose;
    static var* lowBandSeries;
    static var* highBandSeries;

    pClose = series(priceClose());

    var ema34 = EMA(pClose, safeWin(34));
    var ema89 = EMA(pClose, safeWin(LONGEST_EMA));
    var lowBand  = ema34 - ema89;

    var ema8  = EMA(pClose, safeWin(8));
    var highBand = pClose[0] - ema8;

    lowBandSeries  = series(lowBand);
    highBandSeries = series(highBand);

    var energyLow  = Variance(lowBandSeries, safeWin(baseWin));
    var energyHigh = Variance(highBandSeries, safeWin(baseWin));

    var spectralRatio = energyHigh / (energyHigh + energyLow + 0.000001);
    return clamp(spectralRatio, 0, 1);
}

var predictabilityMI(int window)
{
    static var* p;
    static var* p1;
    static var* p2;
    static var* p1_sq;
    static var* p2_sq;
    static var* p1p2;
    static var* p1p;
    static var* p2p;
    static var* res_sq;

    p     = series(priceClose());
    p1    = series(priceClose(1));
    p2    = series(priceClose(2));
    p1_sq = series(p1[0]*p1[0]);
    p2_sq = series(p2[0]*p2[0]);
    p1p2  = series(p1[0]*p2[0]);
    p1p   = series(p1[0]*p[0]);
    p2p   = series(p2[0]*p[0]);

    var sum_x1 = EMA(p1, safeWin(window));
    var sum_x2 = EMA(p2, safeWin(window));
    var sum_y  = EMA(p,  safeWin(window));

    var sum_x1x1 = EMA(p1_sq, safeWin(window));
    var sum_x2x2 = EMA(p2_sq, safeWin(window));
    var sum_x1x2 = EMA(p1p2,  safeWin(window));
    var sum_x1y  = EMA(p1p,   safeWin(window));
    var sum_x2y  = EMA(p2p,   safeWin(window));

    var denom = sum_x1x1 * sum_x2x2 - sum_x1x2 * sum_x1x2;
    var a = ifelse(denom != 0, (sum_x2x2 * sum_x1y - sum_x1x2 * sum_x2y) / denom, 0);
    var b = ifelse(denom != 0, (sum_x1x1 * sum_x2y - sum_x1x2 * sum_x1y) / denom, 0);

    var y_hat    = a * p1[0] + b * p2[0];
    var residual = p[0] - y_hat;

    res_sq = series(pow(residual, 2));

    var mse_pred  = EMA(res_sq, safeWin(window));
    var var_price = Variance(p, safeWin(50));

    return clamp(mse_pred / var_price, 0, 1);
}

var sineMI(int window)
{
    static var* p;
    static var* s;
    static var* sine_dev_sq;

    p = series(priceClose());
    s = series(sineWave());

    var sine_dev  = s[0] - EMA(s, safeWin(window));
    var price_dev = p[0] - EMA(p, safeWin(window));

    sine_dev_sq = series(pow(price_dev - sine_dev, 2));

    var mse_sine  = EMA(sine_dev_sq, safeWin(window));
    var var_price = Variance(p, safeWin(50));

    return clamp(mse_sine / var_price, 0, 1);
}

var computeMMI(int win)
{
    var mi_sine = sineMI(win);
    var mi_pred = predictabilityMI(win);
    var mi_spec = spectralEnergy(50);
    return clamp((mi_sine + mi_pred + mi_spec) / 3, 0, 1);
}

// === Feature readiness ===
int featuresReady(var* fast, var* slow, int size)
{
    int i;
    for(i = 0; i < size; i++)
        if(fast[i+1] == 0 || slow[i+1] == 0)
            return 0; 
    return 1;
}

// === Main ===
function run()
{
    set(PLOTNOW);
    BarPeriod = 60; 
    LookBack  = max(LONGEST_EMA * 2, 2*INPUT_SIZE + 12);

    // Global warm-up guard
    int slowBars = Bar / SLOW_FACTOR;
    int minSlowBars = max(LONGEST_EMA * 2, 2*INPUT_SIZE + 12);
    if (Bar < TRAIN_BARS || slowBars < minSlowBars)
        return;

    // ===== FAST TF =====
    int fastWin = adaptiveWinLength();
    static var* mmiFast;
    mmiFast = series(EMA(series(computeMMI(fastWin)), safeWin(5)));

    // ===== SLOW TF =====
    int tfKeep = TimeFrame;
    TimeFrame  = SLOW_FACTOR;

    int slowWin = max(10, round(fastWin / SLOW_FACTOR));
    static var* mmiSlowTF;
    mmiSlowTF = series(EMA(series(computeMMI(slowWin)), safeWin(5)));

    var mmiSlowNow = mmiSlowTF[0];
    TimeFrame = tfKeep;

    static var* mmiSlowOnBase;
    mmiSlowOnBase = series(mmiSlowNow);

    // ===== Feature guard =====
    if (!featuresReady(mmiFast, mmiSlowOnBase, INPUT_SIZE))
        return;

    // ===== ML Features =====
    var features[MAX_FEATURES];
    int i;
    for(i=0; i<INPUT_SIZE; i++)
        features[i] = mmiFast[i+1];
    for(i=0; i<INPUT_SIZE; i++)
        features[INPUT_SIZE + i] = mmiSlowOnBase[i+1];

    var predicted_delta = adviseLong(TRAIN_BARS, 0, features, MAX_FEATURES);
    var norm_delta = clamp(predicted_delta, -1, 1);

    var adaptFactor  = clamp(1 - fabs(norm_delta), 0.4, 0.9);
    int adaptPeriod  = max(2, min(50, (int)round(5./adaptFactor)));

    var w_fast = clamp(0.5 + 0.5*fabs(norm_delta), 0.4, 0.9);
    var w_slow = 1 - w_fast;
    var cmi_blend = w_fast*mmiFast[0] + w_slow*mmiSlowOnBase[0];

    var adaptiveMMI = clamp(EMA(series(cmi_blend), safeWin(adaptPeriod)), 0, 1);

    // ===== Plots =====
    plot("Adaptive MMI", adaptiveMMI, LINE, RED);
    plot("Fast MMI",     mmiFast[0],  LINE, BLUE);
    plot("Slow MMI",     mmiSlowOnBase[0], LINE, BLACK);
    plot("Pred ?",       norm_delta,  LINE, PURPLE);
    plot("Low",  0.3, LINE, GREEN);
    plot("High", 0.7, LINE, ORANGE);
}

Murrey Math Lines [Re: TipmyPip] #488862
08/21/25 05:43
08/21/25 05:43
Joined: Sep 2017
Posts: 164
TipmyPip Online OP
Member
TipmyPip  Online OP
Member

Joined: Sep 2017
Posts: 164
Murrey Math Lines are price-based support and resistance levels derived from a mathematical structure similar to Gann theory. They aim to identify significant price zones for:

Reversals

Breakouts

Support/Resistance

Trend identification

The price range is divided into 8 intervals (called "octaves") and extended to cover 13 Murrey Math Lines from [-2/8] to [+2/8].


How Traders Use MML
Level Meaning
[+2/8] Extreme resistance – reversal zone
[+1/8] Weak resistance
[8/8] Major resistance – strong reversal
[4/8] Key Pivot Point – balance level
[0/8] Major support – strong reversal
[-1/8] Weak support
[-2/8] Extreme support – reversal zone


Code
// MurreyMath.c
// Murrey Math Channel for Zorro Lite-C

// --- Determine fractal size ---
function double DetermineFractal(double value)
{
    if(value <= 250000 && value > 25000)     return 100000;
    if(value <= 25000  && value > 2500)      return 10000;
    if(value <= 2500   && value > 250)       return 1000;
    if(value <= 250    && value > 25)        return 100;
    if(value <= 25     && value > 12.5)      return 12.5;
    if(value <= 12.5   && value > 6.25)      return 12.5;
    if(value <= 6.25   && value > 3.125)     return 6.25;
    if(value <= 3.125  && value > 1.5625)    return 3.125;
    if(value <= 1.5625 && value > 0.390625)  return 1.5625;
    if(value <= 0.390625 && value > 0)       return 0.1953125;
    return 0;
}

// --- Murrey Math calculation ---
// Fills "levels[13]" with values from [+2/8] to [-2/8]
function MurreyMath(vars PriceHigh, vars PriceLow, int Period, var* levels)
{
    if(Bar < Period + 1) return; // Not enough bars yet

    var min = MinVal(PriceLow, Period);
    var max = MaxVal(PriceHigh, Period);

    var fractal = DetermineFractal(max);
    var range   = max - min;
    var sum     = floor(log(fractal / range) / log(2));
    var octave  = fractal * pow(0.5, sum);
    var mn      = floor(min / octave) * octave;
    var mx      = mn + (2 * octave);
    if((mn + octave) >= max)
        mx = mn + octave;

    // Resistance determination
    var x1 = 0, x2 = 0, x3 = 0, x4 = 0, x5 = 0, x6 = 0;
    if ((min >= (3*(mx-mn)/16 + mn)) && (max <= (9*(mx-mn)/16 + mn))) x2 = mn + (mx - mn)/2;
    if ((min >= (mn - (mx - mn)/8)) && (max <= (5*(mx - mn)/8 + mn)) && (x2 == 0)) x1 = mn + (mx - mn)/2;
    if ((min >= (mn + 7*(mx - mn)/16)) && (max <= (13*(mx - mn)/16 + mn))) x4 = mn + 3*(mx - mn)/4;
    if ((min >= (mn + 3*(mx - mn)/8)) && (max <= (9*(mx - mn)/8 + mn)) && (x4 == 0)) x5 = mx;
    if ((min >= (mn + (mx - mn)/8)) && (max <= (7*(mx - mn)/8 + mn)) && (x1 == 0) && (x2 == 0) && (x4 == 0) && (x5 == 0))
        x3 = mn + 3*(mx - mn)/4;
    if ((x1 + x2 + x3 + x4 + x5) == 0) x6 = mx;
    var resistance = x1 + x2 + x3 + x4 + x5 + x6;

    // Support determination
    var y1 = 0, y2 = 0, y3 = 0, y4 = 0, y5 = 0, y6 = 0;
    if (x1 > 0) y1 = mn;
    if (x2 > 0) y2 = mn + (mx - mn)/4;
    if (x3 > 0) y3 = mn + (mx - mn)/4;
    if (x4 > 0) y4 = mn + (mx - mn)/2;
    if (x5 > 0) y5 = mn + (mx - mn)/2;
    if ((resistance > 0) && ((y1 + y2 + y3 + y4 + y5) == 0)) y6 = mn;
    var support = y1 + y2 + y3 + y4 + y5 + y6;

    var divide = (resistance - support) / 8;

    levels[12] = support - 2*divide;   // [-2/8]
    levels[11] = support - divide;     // [-1/8]
    levels[10] = support;              // [0/8]
    levels[9]  = support + divide;     // [1/8]
    levels[8]  = support + 2*divide;   // [2/8]
    levels[7]  = support + 3*divide;   // [3/8]
    levels[6]  = support + 4*divide;   // [4/8]
    levels[5]  = support + 5*divide;   // [5/8]
    levels[4]  = support + 6*divide;   // [6/8]
    levels[3]  = support + 7*divide;   // [7/8]
    levels[2]  = resistance;           // [8/8]
    levels[1]  = resistance + divide;  // [+1/8]
    levels[0]  = resistance + 2*divide;// [+2/8]
}

// --- Main script ---
function run()
{
	 set(PLOTNOW);
    BarPeriod = 1440; // Daily bars
    LookBack  = 200;  // Required lookback

    // Use correct series() syntax: returns `vars` (pointer array)
    vars PriceHigh = series(priceHigh(), LookBack);
    vars PriceLow  = series(priceLow(), LookBack);

    static var mm[13]; // Buffer for Murrey levels
    MurreyMath(PriceHigh, PriceLow, 64, mm);

    // Plotting the Murrey channels
    plot("MM +2/8", mm[0], LINE, BLUE);
    plot("MM +1/8", mm[1], LINE, BLUE);
    plot("MM 8/8",  mm[2], LINE, BLACK);
    plot("MM 7/8",  mm[3], LINE, RED);
    plot("MM 6/8",  mm[4], LINE, RED);
    plot("MM 5/8",  mm[5], LINE, GREEN);
    plot("MM 4/8",  mm[6], LINE, BLACK);
    plot("MM 3/8",  mm[7], LINE, GREEN);
    plot("MM 2/8",  mm[8], LINE, RED);
    plot("MM 1/8",  mm[9], LINE, RED);
    plot("MM 0/8",  mm[10], LINE, BLUE);
    plot("MM -1/8", mm[11], LINE, BLUE);
    plot("MM -2/8", mm[12], LINE, BLUE);
}

The Strategy of Spiritual Love. [Re: TipmyPip] #488868
09/01/25 17:20
09/01/25 17:20
Joined: Sep 2017
Posts: 164
TipmyPip Online OP
Member
TipmyPip  Online OP
Member

Joined: Sep 2017
Posts: 164
In the name of spiritual love, there is a hidden essence to the following code which will enable to create really complex strategies While considering the applications vast in their nature... and in their complexity...

Code
#define MAX_BRANCHES 3
#define MAX_DEPTH 4

typedef struct Node {
    var v;      // scalar value
    var r;      // intrinsic rate
    void* c;    // array of child Node* (cast on access)
    int n;      // number of children
    int d;      // depth
} Node;

Node* Root;

Node* createNode(int depth)
{
    Node* u = (Node*)malloc(sizeof(Node));
    u->v = random();
    u->r = 0.01 + 0.02*depth + random()*0.005;
    u->d = depth;

    if(depth > 0) {
        u->n = (int)(random()*MAX_BRANCHES) + 1;
        u->c = malloc(u->n * sizeof(void*));  // allocate array of Node*
        int i;
        for(i = 0; i < u->n; i++)
            ((Node**)u->c)[i] = createNode(depth - 1);
    } else {
        u->n = 0;
        u->c = 0;
    }
    return u;
}

var evaluate(Node* u)
{
    if(!u) return 0;

    var sum = 0;
    int i;
    for(i = 0; i < u->n; i++)
        sum += evaluate(((Node**)u->c)[i]);

    var phase  = sin(u->r * Bar + sum);
    var weight = 1.0 / pow(u->d + 1, 1.25);
    u->v = (1 - weight)*u->v + weight*phase;

    return u->v;
}

int countNodes(Node* u)
{
    if(!u) return 0;
    int count = 1, i;
    for(i = 0; i < u->n; i++)
        count += countNodes(((Node**)u->c)[i]);
    return count;
}

void printTree(Node* u, int indent)
{
    if(!u) return;

    string pad = " ";
    int i;
    for(i = 0; i < indent; i++)
        pad = strf("%s ", pad);

    printf("\n%s[Node] d=%i n=%i v=%.3f", pad, u->d, u->n, u->v);

    for(i = 0; i < u->n; i++)
        printTree(((Node**)u->c)[i], indent + 1);
}

void freeTree(Node* u)
{
    if(!u) return;
    int i;
    for(i = 0; i < u->n; i++)
        freeTree(((Node**)u->c)[i]);
    if(u->c) free(u->c);
    free(u);
}

function run()
{
    static int initialized = 0;
    static var lambda;

    if(is(INITRUN) && !initialized) {
        Root = createNode(MAX_DEPTH);
        initialized = 1;
        printf("\nRoot initialized with %i nodes", countNodes(Root));
    }

    lambda = evaluate(Root);
    printf("\nlambda = %.5f", lambda);

    if(Bar % 100 == 0)
        printTree(Root, 0);

    if(lambda > 0.75)
        enterLong();
}

// Called automatically at end of session/backtest; safe place to free memory.
function cleanup()
{
    if(Root) freeTree(Root);
}

Last edited by TipmyPip; 09/01/25 21:40.
The Breach of Algorithms [Re: TipmyPip] #488869
09/01/25 18:14
09/01/25 18:14
Joined: Sep 2017
Posts: 164
TipmyPip Online OP
Member
TipmyPip  Online OP
Member

Joined: Sep 2017
Posts: 164
In a forest of clocks, a root hums to its kin,
each branch answering with a quieter echo.
The night counts in bars; a pale moon lifts a number,
lets it ring the hush, then folds it back into itself.

Windows open on measured breath—
square whispers gathered, their weight made gentle.
Sometimes the canopy speaks its whole shape,
most nights it keeps the lattice veiled.

When an unseen gate brightens, the path inclines;
footsteps lean forward, then vanish like careful hands
untying threads at dawn—no trace, only the hush
remembering how it almost said its name.

[Linked Image]

Code
#define MAX_BRANCHES 3
#define MAX_DEPTH    4
#define NWIN         256   // window length for energy/power estimates

typedef struct Node {
    var  v;   // state
    var  r;   // intrinsic rate
    void* c;  // array of child Node* (cast on access)
    int  n;   // number of children
    int  d;   // depth
} Node;

Node* Root;

// ---- Discrete-time helpers for energy/power -------------------------------

// Sum of squares over the last N samples of a series (Data[0] = most recent)
var sumsq(vars Data, int N)
{
    var s = 0;
    int i;
    for(i = 0; i < N; i++)
        s += Data[i]*Data[i];
    return s;
}

// ---- Tree construction / evaluation ---------------------------------------

Node* createNode(int depth)
{
    Node* u = (Node*)malloc(sizeof(Node));
    u->v = random();
    u->r = 0.01 + 0.02*depth + random()*0.005;
    u->d = depth;

    if(depth > 0) {
        u->n = (int)(random()*MAX_BRANCHES) + 1;
        u->c = malloc(u->n * sizeof(void*));  // array of Node*
        int i;
        for(i = 0; i < u->n; i++)
            ((Node**)u->c)[i] = createNode(depth - 1);
    } else {
        u->n = 0;
        u->c = 0;
    }
    return u;
}

var evaluateNode(Node* u)
{
    if(!u) return 0;

    var sum = 0;
    int i;
    for(i = 0; i < u->n; i++)
        sum += evaluateNode(((Node**)u->c)[i]);

    // depth-attenuated phase response
    var phase  = sin(u->r * Bar + sum);
    var weight = 1.0 / pow(u->d + 1, 1.25);
    u->v = (1 - weight)*u->v + weight*phase;
    return u->v;
}

int countNodes(Node* u)
{
    if(!u) return 0;
    int count = 1, i;
    for(i = 0; i < u->n; i++)
        count += countNodes(((Node**)u->c)[i]);
    return count;
}

void printTree(Node* u, int indent)
{
    if(!u) return;
    string pad = " ";
    int i;
    for(i = 0; i < indent; i++)
        pad = strf("%s ", pad);
    printf("\n%s[Node] d=%i n=%i v=%.3f", pad, u->d, u->n, u->v);
    for(i = 0; i < u->n; i++)
        printTree(((Node**)u->c)[i], indent + 1);
}

void freeTree(Node* u)
{
    if(!u) return;
    int i;
    for(i = 0; i < u->n; i++)
        freeTree(((Node**)u->c)[i]);
    if(u->c) free(u->c);
    free(u);
}

// ---- Main bar loop ---------------------------------------------------------

function run()
{
    static int initialized = 0;
    static var E_cum = 0;          // cumulative energy of lambda
    static var lambda;             // field projection per bar

    if(is(INITRUN) && !initialized) {
        // ensure series buffer supports our window NWIN
        if(LookBack < NWIN) LookBack = NWIN;
        Root = createNode(MAX_DEPTH);
        initialized = 1;
        printf("\nRoot initialized with %i nodes", countNodes(Root));
    }

    // 1) Evaluate harmonic field -> lambda[n]
    lambda = evaluateNode(Root);

    // 2) Build a series of lambda for windowed energy/power
    vars LamSeries = series(lambda);       // LamSeries[0] == current lambda

    // 3) Windowed energy & power (discrete-time)
    var E_win = sumsq(LamSeries, NWIN);    // sum_{k=0..NWIN-1} |lambda|^2
    var P_win = E_win / NWIN;              // average power over the window

    // 4) Cumulative energy (to date)
    E_cum += lambda*lambda;

    // 5) Output / optional plot
    printf("\nlambda=%.6f  E_win(%i)=%.6f  P_win=%.6f  E_cum=%.6f",
        lambda, NWIN, E_win, P_win, E_cum);

    plot("lambda",lambda,LINE,0);
    plot("P_win",P_win,LINE,0);

    if(Bar % 100 == 0)
        printTree(Root, 0);

    // Optional symbolic trigger
    if(lambda > 0.75)
        enterLong();
}

// Called automatically at end of session/backtest; free memory.
function cleanup()
{
    if(Root) freeTree(Root);
}


Last edited by TipmyPip; 09/01/25 21:39.
Proportional Rule-Switching Agents (PRSA) [Re: TipmyPip] #488870
09/01/25 21:35
09/01/25 21:35
Joined: Sep 2017
Posts: 164
TipmyPip Online OP
Member
TipmyPip  Online OP
Member

Joined: Sep 2017
Posts: 164
Proportional Rule-Switching Agents (PRSA)

Imagine a living manuscript written by a hundred hands at once. Each hand writes a short line, then pauses, then writes again—never alone, always listening to the murmur of the others. The manuscript isn’t fixed; it’s an evolving chorus of lines that lean toward one another, soften their edges, and find a shared cadence.

At the center is a quiet pulse. It doesn’t command; it suggests. Think of it as a tide moving through a branching shoreline. The shoreline is a tree of small decisions—depths and rates, paths and forks—whose shape guides how the tide spreads. Higher branches respond lightly, closer roots sway more; together they create a rhythmic backdrop the whole chorus can feel.

Each line in the chorus follows a simple grammar: a memory of where it just was, a curiosity for two neighboring lines, a sensitivity to the tide, and an ear for the room’s overall hum. The neighbors are not chosen by proximity on a page, but by a subtle kinship: branches of similar depth, currents of similar speed. That kinship becomes a weight—stronger for close cousins on the tree, lighter for distant relatives. In this way, the manuscript prefers coherence without requiring uniformity.

But there is also a conductor, and it is not a person. It’s a small, rule-making mechanism that learns how the chorus tends to sing. It listens to compact snapshots of the room—the average tone, the collective energy, the pulse—and proposes how each line should bend its next note. These proposals are not arbitrary. They are piecewise: “in rooms like this, with tones like these, bend this way.” Over time, the conductor not only adjusts the lines; it also redesigns the seating chart—who listens to whom—and even assigns proportions, a fair share of influence, so that the ensemble does not tilt toward a single loud voice.

There is a discipline to this play. Every adjustment is bounded; every tendency is balanced by a counter-tendency. Momentum smooths sudden jolts. Proportions are normalized so that attention remains a scarce resource, not a runaway gift. The results are logged, line by line, in separate books—one book per voice—yet each book quotes the others. Open any page and you’ll find a self-contained verse that still points outward, referencing the tide it felt, the neighbors it heard, the seat it was given, and the short rule that shaped its choice.

Sometimes, at appointed moments, the seating, the rules, and the proportions are reconsidered. The chorus does not dissolve; it molts. Kinships are re-weighed; alliances shift; the grammar is rewritten in the margins. This is not chaos—more like seasons. The same tree; new leaves. The same tide; a different shore.

What emerges is not a single melody, but a texture: local phrases that brighten or darken together, clusters that coordinate without collapsing, solos that rise only when the room invites them. The manuscript remains legible because it keeps explaining itself—every verse carries its own recipe—yet it stays surprising because the recipes are learned, not imposed.

In the end, the system is a study in measured togetherness. It suggests how separate lines can become mutually informative without losing their character; how guidance can be learned rather than declared; how proportion can prevent dominance; how memory can soften change. It does not promise an endpoint. It promises a way of moving—iterative, attentive, shaped by a shared structure yet open to revision—so that the whole becomes more than a sum, and the path, though unknowable in advance, feels quietly inevitable as it unfolds.


Code
// ================= PARAMETERS =================
#define MAX_BRANCHES    3
#define MAX_DEPTH       4
#define NWIN            256
#define NET_EQNS        100
#define DEGREE          4
#define KPROJ           16
#define REWIRE_EVERY    127
#define LOG_EVERY       1

// DTREE-driven rewiring candidates per neighbor slot
#define CAND_NEIGH      8

// Feature sizes for DTREE calls
#define ADV_EQ_NF       10   // per-equation features
#define ADV_PAIR_NF     12   // pair features

// ================ HARMONIC D-TREE (structural context) ================
typedef struct Node {
    var  v;     // state
    var  r;     // intrinsic rate
    void* c;    // array of child Node* (cast on access)
    int  n;     // number of children
    int  d;     // depth
} Node;

Node* Root;

// D-tree index
Node** G_TreeIdx;   // [cap]
int    G_TreeN;     // count
int    G_TreeCap;   // capacity
var    G_DTreeExp;  // exponent for evaluateNode() attenuation

// --------- helpers ----------
// Zorro: random(Max) ? uniform [0..Max), abs() for absolute value, clamp() builtin.

// uniform integer in [lo..hi]
int randint(int lo, int hi)
{
    return lo + (int)random(hi - lo + 1);
}

// uniform var in [a..b)
var randu(var a, var b)
{
    return a + random(b - a);
}

// return ±1 with 50/50 probability (guaranteed nonzero)
var randsign()
{
    return ifelse(random(1) < 0.5, -1.0, 1.0);
}

// map u?[-1,1] to [lo,hi]
var mapUnit(var u, var lo, var hi)
{
    if(u < -1) u = -1;
    if(u >  1) u =  1;
    var t = 0.5*(u + 1.0);
    return lo + t*(hi - lo);
}

void pushTreeNode(Node* u){ if(G_TreeN < G_TreeCap) G_TreeIdx[G_TreeN++] = u; }
void indexTreeDFS(Node* u){ if(!u) return; pushTreeNode(u); int i; for(i=0;i<u->n;i++) indexTreeDFS(((Node**)u->c)[i]); }

Node* createNode(int depth)
{
    Node* u = (Node*)malloc(sizeof(Node));
    u->v = 2*random(1) - 1;                           // [-1..1)
    u->r = 0.01 + 0.02*depth + random(1)*0.005;       // small positive
    u->d = depth;

    if(depth > 0){
        u->n = randint(1, MAX_BRANCHES);
        u->c = malloc(u->n * sizeof(void*));
        int i; for(i=0;i<u->n;i++) ((Node**)u->c)[i] = createNode(depth - 1);
    } else { u->n = 0; u->c = 0; }
    return u;
}

var evaluateNode(Node* u)
{
    if(!u) return 0;
    var sum=0; int i; for(i=0;i<u->n;i++) sum += evaluateNode(((Node**)u->c)[i]);
    var phase  = sin(u->r * Bar + sum);
    var weight = 1.0 / pow(u->d + 1, G_DTreeExp);
    u->v = (1 - weight)*u->v + weight*phase;
    return u->v;
}

int countNodes(Node* u){ if(!u) return 0; int c=1,i; for(i=0;i<u->n;i++) c += countNodes(((Node**)u->c)[i]); return c; }
void freeTree(Node* u){ if(!u) return; int i; for(i=0;i<u->n;i++) freeTree(((Node**)u->c)[i]); if(u->c) free(u->c); free(u); }

// =========== NETWORK STATE & COEFFICIENTS ===========
int   G_N  = NET_EQNS;
int   G_D  = DEGREE;
int   G_K  = KPROJ;

// states
var*  G_State;    // [G_N]
var*  G_Prev;     // [G_N]
var*  G_Vel;      // [G_N]

// sparse adjacency
int*  G_Adj;      // [G_N*G_D]

// random projection + features
var*  G_RP;       // [G_K*G_N]
var*  G_Z;        // [G_K]

// weights (will be DTREE-synthesized each epoch)
int*  G_Mode;     // 0..3 selects nonlinearity combo
var*  G_WSelf;    // self
var*  G_WN1;      // neighbor 1
var*  G_WN2;      // neighbor 2
var*  G_WGlob1;   // global term 1
var*  G_WGlob2;   // global term 2
var*  G_WMom;     // momentum
var*  G_WTree;    // DTree-coupling weight
var*  G_WAdv;     // built-in DTREE advice weight

// argument coefficients for the two nonlinearities
var*  A1x;  var*  A1lam;  var*  A1mean;  var*  A1E;  var*  A1P;  var*  A1i;  var*  A1c;
var*  A2x;  var*  A2lam;  var*  A2mean;  var*  A2E;  var*  A2P;  var*  A2i;  var*  A2c;

// global-term coeffs
var*  G1mean;   var*  G1E;
var*  G2P;      var*  G2lam;

// DTree (structural) coupling diagnostics & parameters
var*  G_TreeTerm;  // DT(i) numeric
int*  G_TopEq;     // strongest partner index
var*  G_TopW;      // strongest partner normalized weight
int*  G_EqTreeId;  // eq -> tree node id
var*  TAlpha;      // per-eq depth penalty
var*  TBeta;       // per-eq rate  penalty

// predictability and DTREE advice score
var*  G_Pred;       // [0..1]
var*  G_AdvScore;   // [-1..1]

// DTREE-created proportions (sum to 1 across equations)
var*  G_PropRaw;
var*  G_Prop;

// symbolic equation string per equation
string* G_Sym;

// epoch/context & feedback
int    G_Epoch = 0;
int    G_CtxID = 0;
var    G_FB_A = 0.7;
var    G_FB_B = 0.3;

// ---------- predictability from D-tree (0..1) ----------
var nodePredictability(Node* t)
{
    if(!t) return 0.5;
    var disp=0; int n=t->n, i;
    for(i=0;i<n;i++){ Node* c=((Node**)t->c)[i]; disp += abs(c->v - t->v); }  // abs(var)
    if(n>0) disp /= n;
    var depthFac = 1.0/(1+t->d);
    var rateBase = 0.01 + 0.02*t->d;
    var rateFac  = exp(-25.0*abs(t->r - rateBase));
    var p = 0.5*(depthFac + rateFac);
    p = 0.5*p + 0.5*(1.0/(1.0 + disp));
    return clamp(p,0,1);  // built-in clamp
}

// filenames
void buildEqFileName(int idx, char* outName /*>=64*/)
{
    strcpy(outName, "Log\\Alpha01_eq_");
    string idxs = strf("%03i", idx);
    strcat(outName, idxs);
    strcat(outName, ".csv");
}

// --------- allocation ----------
void allocateNet()
{
    int N=G_N, D=G_D, K=G_K;

    G_State=(var*)malloc(N*sizeof(var));  G_Prev=(var*)malloc(N*sizeof(var));  G_Vel=(var*)malloc(N*sizeof(var));
    G_Adj=(int*)malloc(N*D*sizeof(int));
    G_RP=(var*)malloc(K*N*sizeof(var));   G_Z=(var*)malloc(K*sizeof(var));

    G_Mode=(int*)malloc(N*sizeof(int));
    G_WSelf=(var*)malloc(N*sizeof(var));  G_WN1=(var*)malloc(N*sizeof(var));   G_WN2=(var*)malloc(N*sizeof(var));
    G_WGlob1=(var*)malloc(N*sizeof(var)); G_WGlob2=(var*)malloc(N*sizeof(var));
    G_WMom=(var*)malloc(N*sizeof(var));   G_WTree=(var*)malloc(N*sizeof(var)); G_WAdv=(var*)malloc(N*sizeof(var));

    A1x=(var*)malloc(N*sizeof(var)); A1lam=(var*)malloc(N*sizeof(var)); A1mean=(var*)malloc(N*sizeof(var));
    A1E=(var*)malloc(N*sizeof(var)); A1P=(var*)malloc(N*sizeof(var));   A1i=(var*)malloc(N*sizeof(var)); A1c=(var*)malloc(N*sizeof(var));
    A2x=(var*)malloc(N*sizeof(var)); A2lam=(var*)malloc(N*sizeof(var)); A2mean=(var*)malloc(N*sizeof(var));
    A2E=(var*)malloc(N*sizeof(var)); A2P=(var*)malloc(N*sizeof(var));   A2i=(var*)malloc(N*sizeof(var)); A2c=(var*)malloc(N*sizeof(var));

    G1mean=(var*)malloc(N*sizeof(var)); G1E=(var*)malloc(N*sizeof(var));
    G2P=(var*)malloc(N*sizeof(var));    G2lam=(var*)malloc(N*sizeof(var));

    G_TreeTerm=(var*)malloc(N*sizeof(var)); G_TopEq=(int*)malloc(N*sizeof(int)); G_TopW=(var*)malloc(N*sizeof(var));
    TAlpha=(var*)malloc(N*sizeof(var));     TBeta=(var*)malloc(N*sizeof(var));

    G_Pred=(var*)malloc(N*sizeof(var)); G_AdvScore=(var*)malloc(N*sizeof(var));

    G_PropRaw=(var*)malloc(N*sizeof(var));  G_Prop=(var*)malloc(N*sizeof(var));

    G_Sym=(string*)malloc(N*sizeof(string));

    int i;
    for(i=0;i<N;i++){
        G_State[i]=2*random(1)-1; G_Prev[i]=G_State[i]; G_Vel[i]=0;

        // initialize; will be overwritten by DTREE synthesis
        G_Mode[i]=0;
        G_WSelf[i]=0.5; G_WN1[i]=0.2; G_WN2[i]=0.2; G_WGlob1[i]=0.1; G_WGlob2[i]=0.1; G_WMom[i]=0.05; G_WTree[i]=0.15; G_WAdv[i]=0.15;

        A1x[i]=1; A1lam[i]=0.1; A1mean[i]=0; A1E[i]=0; A1P[i]=0; A1i[i]=0; A1c[i]=0;
        A2x[i]=1; A2lam[i]=0.1; A2mean[i]=0; A2E[i]=0; A2P[i]=0; A2i[i]=0; A2c[i]=0;

        G1mean[i]=1.0; G1E[i]=0.001;
        G2P[i]=0.6;    G2lam[i]=0.3;

        TAlpha[i]=0.8; TBeta[i]=25.0;

        G_TreeTerm[i]=0; G_TopEq[i]=-1; G_TopW[i]=0;
        G_Pred[i]=0.5;   G_AdvScore[i]=0;

        G_PropRaw[i]=1;  G_Prop[i]=1.0/G_N;

        G_Sym[i]=(char*)malloc(1024); strcpy(G_Sym[i],"");
    }

    // D-tree index & mapping buffers
    G_TreeCap=512; G_TreeIdx=(Node**)malloc(G_TreeCap*sizeof(Node*)); G_TreeN=0;
    G_EqTreeId=(int*)malloc(N*sizeof(int));
}

void freeNet()
{
    int i;
    if(G_State)free(G_State); if(G_Prev)free(G_Prev); if(G_Vel)free(G_Vel);
    if(G_Adj)free(G_Adj); if(G_RP)free(G_RP); if(G_Z)free(G_Z);
    if(G_Mode)free(G_Mode); if(G_WSelf)free(G_WSelf); if(G_WN1)free(G_WN1); if(G_WN2)free(G_WN2);
    if(G_WGlob1)free(G_WGlob1); if(G_WGlob2)free(G_WGlob2); if(G_WMom)free(G_WMom);
    if(G_WTree)free(G_WTree); if(G_WAdv)free(G_WAdv);

    if(A1x)free(A1x); if(A1lam)free(A1lam); if(A1mean)free(A1mean); if(A1E)free(A1E); if(A1P)free(A1P); if(A1i)free(A1i); if(A1c)free(A1c);
    if(A2x)free(A2x); if(A2lam)free(A2lam); if(A2mean)free(A2mean); if(A2E)free(A2E); if(A2P)free(A2P); if(A2i)free(A2i); if(A2c)free(A2c);

    if(G1mean)free(G1mean); if(G1E)free(G1E); if(G2P)free(G2P); if(G2lam)free(G2lam);

    if(G_TreeTerm)free(G_TreeTerm); if(G_TopEq)free(G_TopEq); if(G_TopW)free(G_TopW);
    if(TAlpha)free(TAlpha); if(TBeta)free(TBeta);

    if(G_Pred)free(G_Pred); if(G_AdvScore)free(G_AdvScore);

    if(G_PropRaw)free(G_PropRaw); if(G_Prop)free(G_Prop);

    if(G_Sym){ for(i=0;i<G_N;i++) if(G_Sym[i]) free(G_Sym[i]); free(G_Sym); }
    if(G_TreeIdx)free(G_TreeIdx); if(G_EqTreeId)free(G_EqTreeId);
}

// --------- random projection ----------
void randomizeRP(){
    int K=G_K,N=G_N,k,j;
    for(k=0;k<K;k++)
        for(j=0;j<N;j++)
            G_RP[k*N+j] = ifelse(random(1) < 0.5, -1.0, 1.0);  // unbiased ±1
}
void computeProjection(){ int K=G_K,N=G_N,k,j; for(k=0;k<K;k++){ var acc=0; for(j=0;j<N;j++) acc+=G_RP[k*N+j]*(G_State[j]*G_State[j]); G_Z[k]=acc; }}

// --------- build features for DTREE ----------
void buildEqFeatures(int i, var lambda, var mean, var energy, var power, var* S /*ADV_EQ_NF*/)
{
    Node* t=G_TreeIdx[G_EqTreeId[i]];
    S[0]=G_State[i];
    S[1]=mean;
    S[2]=power;
    S[3]=energy;
    S[4]=lambda;
    S[5]=G_Pred[i];
    S[6]=t->d;
    S[7]=t->r;
    S[8]=G_TreeTerm[i];
    S[9]=G_Mode[i];
}

void buildPairFeatures(int i,int j, var lambda, var mean, var energy, var power, var* P /*ADV_PAIR_NF*/)
{
    Node* ti=G_TreeIdx[G_EqTreeId[i]];
    Node* tj=G_TreeIdx[G_EqTreeId[j]];
    P[0]=G_State[i]; P[1]=G_State[j];
    P[2]=ti->d; P[3]=tj->d;
    P[4]=ti->r; P[5]=tj->r;
    P[6]=abs(P[2]-P[3]); P[7]=abs(P[4]-P[5]); // abs(var)
    P[8]=G_Pred[i]*G_Pred[j];
    P[9]=lambda; P[10]=mean; P[11]=power;
}

// --------- DTREE advice wrappers ----------
var adviseEq(int i, var lambda, var mean, var energy, var power)
{
    var S[ADV_EQ_NF]; buildEqFeatures(i,lambda,mean,energy,power,S);
    var a = adviseLong(DTREE, 0, S, ADV_EQ_NF); // ~[-100,100]
    return a/100.;
}

var advisePair(int i,int j, var lambda, var mean, var energy, var power)
{
    var P[ADV_PAIR_NF]; buildPairFeatures(i,j,lambda,mean,energy,power,P);
    var a = adviseLong(DTREE, 0, P, ADV_PAIR_NF);
    return a/100.;
}

// --------- DTREE-driven adjacency selection ----------
void rewireAdjacency_DTREE(var lambda, var mean, var energy, var power)
{
    int N=G_N, D=G_D, i, d, c, best, cand;
    for(i=0;i<N;i++){
        for(d=0; d<D; d++){
            var bestScore = -2; best = -1;
            for(c=0;c<CAND_NEIGH;c++){
                cand = randint(0,N-1);
                if(cand==i) continue;
                // avoid duplicates already chosen for this row
                int clash=0, k; for(k=0;k<d;k++) if(G_Adj[i*D+k]==cand){clash=1; break;}
                if(clash) continue;

                var s = advisePair(i,cand,lambda,mean,energy,power); // [-1,1]
                if(s > bestScore){ bestScore=s; best=cand; }
            }
            if(best<0){ // fallback
                do{ best = randint(0,N-1);} while(best==i);
            }
            G_Adj[i*D + d] = best;
        }
    }
}

// --------- DTREE-created coefficients, modes & proportions ----------
void synthesizeEquationFromDTREE(int i, var lambda, var mean, var energy, var power)
{
    // multiple advice calls; each mapped to a coefficient range
    var a_mode = adviseEq(i,lambda,mean,energy,power);
    G_Mode[i] = (int)(abs(a_mode*1000)) & 3;

    var a_wself = adviseEq(i,lambda,mean,energy,power);
    var a_wn1   = adviseEq(i,lambda,mean,energy,power);
    var a_wn2   = adviseEq(i,lambda,mean,energy,power);
    var a_g1    = adviseEq(i,lambda,mean,energy,power);
    var a_g2    = adviseEq(i,lambda,mean,energy,power);
    var a_mom   = adviseEq(i,lambda,mean,energy,power);
    var a_tree  = adviseEq(i,lambda,mean,energy,power);
    var a_adv   = adviseEq(i,lambda,mean,energy,power);

    G_WSelf[i]  = mapUnit(a_wself, 0.15, 0.85);
    G_WN1[i]    = mapUnit(a_wn1,   0.05, 0.35);
    G_WN2[i]    = mapUnit(a_wn2,   0.05, 0.35);
    G_WGlob1[i] = mapUnit(a_g1,    0.05, 0.30);
    G_WGlob2[i] = mapUnit(a_g2,    0.05, 0.30);
    G_WMom[i]   = mapUnit(a_mom,   0.02, 0.15);
    G_WTree[i]  = mapUnit(a_tree,  0.05, 0.35);
    G_WAdv[i]   = mapUnit(a_adv,   0.05, 0.35);

    // argument coefficients (range chosen to be stable)
    var a1 = adviseEq(i,lambda,mean,energy,power);
    var a2 = adviseEq(i,lambda,mean,energy,power);
    var a3 = adviseEq(i,lambda,mean,energy,power);
    var a4 = adviseEq(i,lambda,mean,energy,power);
    var a5 = adviseEq(i,lambda,mean,energy,power);
    var a6 = adviseEq(i,lambda,mean,energy,power);
    var a7 = adviseEq(i,lambda,mean,energy,power);

    A1x[i]   = randsign()*mapUnit(a1, 0.6, 1.2);
    A1lam[i] = randsign()*mapUnit(a2, 0.05,0.35);
    A1mean[i]= mapUnit(a3,-0.30,0.30);
    A1E[i]   = mapUnit(a4,-0.0015,0.0015);
    A1P[i]   = mapUnit(a5,-0.30,0.30);
    A1i[i]   = mapUnit(a6,-0.02,0.02);
    A1c[i]   = mapUnit(a7,-0.20,0.20);

    // second nonlinearity args
    var b1 = adviseEq(i,lambda,mean,energy,power);
    var b2 = adviseEq(i,lambda,mean,energy,power);
    var b3 = adviseEq(i,lambda,mean,energy,power);
    var b4 = adviseEq(i,lambda,mean,energy,power);
    var b5 = adviseEq(i,lambda,mean,energy,power);
    var b6 = adviseEq(i,lambda,mean,energy,power);
    var b7 = adviseEq(i,lambda,mean,energy,power);

    A2x[i]   = randsign()*mapUnit(b1, 0.6, 1.2);
    A2lam[i] = randsign()*mapUnit(b2, 0.05,0.35);
    A2mean[i]= mapUnit(b3,-0.30,0.30);
    A2E[i]   = mapUnit(b4,-0.0015,0.0015);
    A2P[i]   = mapUnit(b5,-0.30,0.30);
    A2i[i]   = mapUnit(b6,-0.02,0.02);
    A2c[i]   = mapUnit(b7,-0.20,0.20);

    // global-term coeffs
    var c1 = adviseEq(i,lambda,mean,energy,power);
    var c2 = adviseEq(i,lambda,mean,energy,power);
    var d1 = adviseEq(i,lambda,mean,energy,power);
    var d2 = adviseEq(i,lambda,mean,energy,power);
    G1mean[i] = mapUnit(c1, 0.4, 1.6);
    G1E[i]    = mapUnit(c2,-0.004,0.004);
    G2P[i]    = mapUnit(d1, 0.1, 1.2);
    G2lam[i]  = mapUnit(d2, 0.05,0.7);

    // per-equation alpha/beta penalties (for structural DTree kernel)
    var e1 = adviseEq(i,lambda,mean,energy,power);
    var e2 = adviseEq(i,lambda,mean,energy,power);
    TAlpha[i] = mapUnit(e1, 0.3, 1.5);
    TBeta[i]  = mapUnit(e2, 6.0, 50.0);

    // DTREE-created raw proportion; normalized later
    var p = adviseEq(i,lambda,mean,energy,power);      // [-1,1]
    G_PropRaw[i] = 0.01 + 0.99 * (0.5*(p+1.0));        // in (0.01..1.0]
}

// normalize proportions so sum_i Prop[i] = 1
void normalizeProportions()
{
    int N=G_N,i; var s=0; for(i=0;i<N;i++) s += G_PropRaw[i];
    if(s<=0) { for(i=0;i<N;i++) G_Prop[i] = 1.0/N; return; }
    for(i=0;i<N;i++) G_Prop[i] = G_PropRaw[i]/s;
}

// --------- DTree proportional coupling: DT(i) with Proportion & Predictability ----------
var dtreeTerm(int i, int* outTopEq, var* outTopW)
{
    int N=G_N,j;
    int tid_i=G_EqTreeId[i]; Node* ti=G_TreeIdx[tid_i]; int di=ti->d; var ri=ti->r;
    var alpha=TAlpha[i], beta=TBeta[i];

    var sumw=0, acc=0, bestW=-1; int bestJ=-1;
    for(j=0;j<N;j++){
        if(j==i) continue;
        int tid_j=G_EqTreeId[j]; Node* tj=G_TreeIdx[tid_j]; int dj=tj->d; var rj=tj->r;

        var w = exp(-alpha*abs(di-dj)) * exp(-beta*abs(ri-rj));
        var predBoost = 0.5 + 0.5*(G_Pred[i]*G_Pred[j]);
        var propBoost = 0.5 + 0.5*( (G_Prop[i] + G_Prop[j]) );  // favors high-proportion participants
        w *= predBoost * propBoost;

        // Optional: DTREE pair advice boost
        var pairAdv = advisePair(i,j,0,0,0,0);  // safe call; if untrained ? ~0
        w *= (0.75 + 0.25*(0.5*(pairAdv+1.0))); // 0.75..1.0 range

        sumw += w; acc += w*G_State[j];
        if(w>bestW){bestW=w; bestJ=j;}
    }
    if(outTopEq) *outTopEq = bestJ;
    if(outTopW)  *outTopW  = ifelse(sumw>0, bestW/sumw, 0);
    if(sumw>0) return acc/sumw; return 0;
}

// --------- symbolic expression builder (now includes Prop[i]) ----------
void buildSymbolicExpr(int i, int n1, int n2)
{
    string s = G_Sym[i]; strcpy(s,"");
    string a1 = strf("(%.3f*x[%i] + %.3f*lam + %.3f*mean + %.5f*E + %.3f*P + %.3f*i + %.3f)",
                     A1x[i], n1, A1lam[i], A1mean[i], A1E[i], A1P[i], A1i[i], A1c[i]);
    string a2 = strf("(%.3f*x[%i] + %.3f*lam + %.3f*mean + %.5f*E + %.3f*P + %.3f*i + %.3f)",
                     A2x[i], n2, A2lam[i], A2mean[i], A2E[i], A2P[i], A2i[i], A2c[i]);

    strcat(s, "x[i]_next = ");
    strcat(s, strf("%.3f*x[i] + ", G_WSelf[i]));
    if(G_Mode[i]==0){ strcat(s, strf("%.3f*sin%s + ",  G_WN1[i], a1)); strcat(s, strf("%.3f*cos%s + ",  G_WN2[i], a2)); }
    else if(G_Mode[i]==1){ strcat(s, strf("%.3f*tanh%s + ", G_WN1[i], a1)); strcat(s, strf("%.3f*sin%s + ",  G_WN2[i], a2)); }
    else if(G_Mode[i]==2){ strcat(s, strf("%.3f*cos%s + ",  G_WN1[i], a1)); strcat(s, strf("%.3f*tanh%s + ", G_WN2[i], a2)); }
    else { strcat(s, strf("%.3f*sin%s + ",  G_WN1[i], a1)); strcat(s, strf("%.3f*cos%s + ",  G_WN2[i], a2)); }

    strcat(s, strf("%.3f*tanh(%.3f*mean + %.5f*E) + ", G_WGlob1[i], G1mean[i], G1E[i]));
    strcat(s, strf("%.3f*sin(%.3f*P + %.3f*lam) + ",   G_WGlob2[i], G2P[i],   G2lam[i]));
    strcat(s, strf("%.3f*(x[i]-x_prev[i]) + ",         G_WMom[i]));
    strcat(s, strf("Prop[i]=%.4f; ",                    G_Prop[i]));
    strcat(s, strf("%.3f*DT(i) + ",                    G_WTree[i]));
    strcat(s, strf("%.3f*DTREE(i)",                    G_WAdv[i]  ));
}

// --------- one-time rewire init (build mapping) ----------
void rewireInit()
{
    randomizeRP(); computeProjection();

    // Build D-tree index and eq->tree mapping
    G_TreeN=0; indexTreeDFS(Root);
    int i; for(i=0;i<G_N;i++) G_EqTreeId[i] = i % G_TreeN;
}

// --------- full "rewire epoch": adjacency by DTREE + coefficients by DTREE + proportions ----------
void rewireEpoch(var lambda, var mean, var energy, var power)
{
    // 1) refresh predictability before synthesis
    int i;
    for(i=0;i<G_N;i++){ Node* t=G_TreeIdx[G_EqTreeId[i]]; G_Pred[i]=nodePredictability(t); }

    // 2) topology chosen by DTREE
    rewireAdjacency_DTREE(lambda,mean,energy,power);

    // 3) coefficients/modes/penalties/proportions created by DTREE
    for(i=0;i<G_N;i++) synthesizeEquationFromDTREE(i,lambda,mean,energy,power);

    // 4) normalize proportions across equations
    normalizeProportions();

    // 5) update context id (from adjacency)
    int D=G_D; int h=0; for(i=0;i<G_N*D;i++) h = (h*1315423911) ^ G_Adj[i];
    G_CtxID = (h ^ (G_Epoch<<8)) & 0x7FFFFFFF;

    // 6) rebuild symbolic strings with current neighbors
    for(i=0;i<G_N;i++){ int n1=G_Adj[i*G_D+0], n2=G_Adj[i*G_D+1]; buildSymbolicExpr(i,n1,n2); }
}

// --------- update step (per bar) ----------
var projectNet()
{
    int N=G_N,i; var sum=0,sumsq=0,cross=0;
    for(i=0;i<N;i++){ sum+=G_State[i]; sumsq+=G_State[i]*G_State[i]; if(i+1<N) cross+=G_State[i]*G_State[i+1]; }
    var mean=sum/N, corr=cross/(N-1);
    return 0.6*tanh(mean + 0.001*sumsq) + 0.4*sin(corr);
}

void updateNet(var driver, var* outMean, var* outEnergy, var* outPower, int writeMeta)
{
    int N=G_N, D=G_D, i;

    // aggregates for this bar (before update)
    var sum=0,sumsq=0; for(i=0;i<N;i++){ sum+=G_State[i]; sumsq+=G_State[i]*G_State[i]; }
    var mean=sum/N, energy=sumsq, power=sumsq/N;

    // refresh predictability & (optional) cached DT advice per equation
    for(i=0;i<N;i++){ Node* t=G_TreeIdx[G_EqTreeId[i]]; G_Pred[i]=nodePredictability(t); }

    // update each equation
    for(i=0;i<N;i++){
        int n1=G_Adj[i*D+0], n2=G_Adj[i*D+1];
        var xi=G_State[i], xn1=G_State[n1], xn2=G_State[n2], mom=xi-G_Prev[i];

        // structural consensus first (uses proportions & predictability internally)
        int topEq=-1; var topW=0;
        var dt = dtreeTerm(i,&topEq,&topW);
        G_TreeTerm[i]=dt; G_TopEq[i]=topEq; G_TopW[i]=topW;

        // built-in DTREE advice from current features
        var adv = adviseEq(i, driver, mean, energy, power);
        G_AdvScore[i] = adv;

        // nonlinear arguments (from DTREE-generated coeffs)
        var arg1=A1x[i]*xn1 + A1lam[i]*driver + A1mean[i]*mean + A1E[i]*energy + A1P[i]*power + A1i[i]*i + A1c[i];
        var arg2=A2x[i]*xn2 + A2lam[i]*driver + A2mean[i]*mean + A2E[i]*energy + A2P[i]*power + A2i[i]*i + A2c[i];

        var nl1,nl2;
        if(G_Mode[i]==0){ nl1=sin(arg1); nl2=cos(arg2); }
        else if(G_Mode[i]==1){ nl1=tanh(arg1); nl2=sin(arg2); }
        else if(G_Mode[i]==2){ nl1=cos(arg1);  nl2=tanh(arg2); }
        else { nl1=sin(arg1); nl2=cos(arg2); }

        var glob1=tanh(G1mean[i]*mean + G1E[i]*energy);
        var glob2=sin (G2P[i]*power + G2lam[i]*driver);

        var xNew =
            G_WSelf[i]*xi +
            G_WN1[i]*nl1 +
            G_WN2[i]*nl2 +
            G_WGlob1[i]*glob1 +
            G_WGlob2[i]*glob2 +
            G_WMom[i]*mom +
            G_WTree[i]*dt +
            G_WAdv[i] *adv;

        G_Prev[i]=xi; G_Vel[i]=xNew-xi; G_State[i]=xNew;

        // META on rewire bars
        if(writeMeta){
            char fname[64]; buildEqFileName(i,fname);
            int tid=G_EqTreeId[i]; Node* t=G_TreeIdx[tid];
            int nn1=G_Adj[i*D+0], nn2=G_Adj[i*D+1];
            file_append(fname,
                strf("META,%i,%i,%i,%i,%i,%i,%i,%i,%.6f,Pred=%.4f,Adv=%.4f,Prop=%.6f,Mode=%i,WAdv=%.3f,WTree=%.3f,\"%s\"\n",
                    G_Epoch, G_CtxID, NET_EQNS, i, nn1, nn2, tid, t->d, t->r,
                    G_Pred[i], G_AdvScore[i], G_Prop[i], G_Mode[i], G_WAdv[i], G_WTree[i], G_Sym[i]));
        }
    }

    if(outMean) *outMean=mean; if(outEnergy) *outEnergy=energy; if(outPower) *outPower=power;
}

// ----------------- MAIN -----------------
function run()
{
    static int initialized=0;
    static var lambda;
    if(is(INITRUN) && !initialized){
        if(LookBack < NWIN) LookBack = NWIN;

        Root=createNode(MAX_DEPTH);
        allocateNet();

        G_DTreeExp = randu(1.10,1.60);
        G_FB_A     = randu(0.60,0.85);
        G_FB_B     = 1.0 - G_FB_A;

        randomizeRP(); computeProjection();

        // Build tree index + mapping once
        rewireInit();

        // First epoch synthesis (uses current states as context)
        G_Epoch = 0;
        rewireEpoch(0,0,0,0);

        // Prepare files: header per equation
        char fname[64]; int i;
        for(i=0;i<NET_EQNS;i++){
            buildEqFileName(i,fname);
            file_append(fname,
                "Bar,lambda,gamma,i,State,n1,n2,mean,energy,power,Vel,Mode,WAdv,WSelf,WN1,WN2,WGlob1,WGlob2,WMom,WTree,Pred,Adv,Prop,TreeTerm,TopEq,TopW,TreeId,Depth,Rate\n");
        }

        // Initial META dump (epoch 0)
        for(i=0;i<G_N;i++){
            int n1=G_Adj[i*G_D+0], n2=G_Adj[i*G_D+1]; int tid=G_EqTreeId[i]; Node* t=G_TreeIdx[tid];
            char fname2[64]; buildEqFileName(i,fname2);
            file_append(fname2,
                strf("META,%i,%i,%i,%i,%i,%i,%i,%i,%.6f,Pred=%.4f,Adv=%.4f,Prop=%.6f,Mode=%i,WAdv=%.3f,WTree=%.3f,\"%s\"\n",
                    G_Epoch, G_CtxID, NET_EQNS, i, n1, n2, tid, t->d, t->r,
                    G_Pred[i], G_AdvScore[i], G_Prop[i], G_Mode[i], G_WAdv[i], G_WTree[i], G_Sym[i]));
        }

        initialized=1;
        printf("\nRoot nodes: %i | Net equations: %i (deg=%i, kproj=%i)", countNodes(Root), G_N, G_D, G_K);
    }

    // 1) Tree ? lambda
    lambda = evaluateNode(Root);

    // 2) Rewire epoch?
    int doRewire = ((Bar % REWIRE_EVERY) == 0);
    if(doRewire){
        G_Epoch++;
        // Use current aggregates as context for synthesis
        // quick pre-aggregates for better guidance
        int i; var sum=0; for(i=0;i<G_N;i++) sum += G_State[i];
        var mean = sum/G_N;
        var energy=0; for(i=0;i<G_N;i++) energy += G_State[i]*G_State[i];
        var power = energy/G_N;

        rewireEpoch(lambda,mean,energy,power);
    }

    // 3) Update net this bar (write META only if rewired)
    var meanB, energyB, powerB;
    updateNet(lambda, &meanB, &energyB, &powerB, doRewire);

    // 4) Feedback blend
    var gamma = projectNet();
    lambda = G_FB_A*lambda + G_FB_B*gamma;

    // 5) Plots
    plot("lambda", lambda, LINE, 0);
    plot("gamma",  gamma,  LINE, 0);
    plot("P_win",  powerB, LINE, 0);

    // 6) Numeric logging
    if(Bar % LOG_EVERY == 0){
        char fname[64]; int i;
        for(i=0;i<NET_EQNS;i++){
            int n1=G_Adj[i*G_D+0], n2=G_Adj[i*G_D+1]; int tid=G_EqTreeId[i]; Node* t=G_TreeIdx[tid];
            buildEqFileName(i,fname);
            file_append(fname,
                strf("%i,%.9f,%.9f,%i,%.9f,%i,%i,%.9f,%.9f,%.9f,%.9f,%i,%.6f,%.6f,%.6f,%.6f,%.6f,%.6f,%.6f,%.6f,%.4f,%.4f,%.6f,%.9f,%i,%.6f,%i,%i,%.6f\n",
                    Bar, lambda, gamma, i, G_State[i], n1, n2,
                    meanB, energyB, powerB, G_Vel[i], G_Mode[i],
                    G_WAdv[i], G_WSelf[i], G_WN1[i], G_WN2[i], G_WGlob1[i], G_WGlob2[i], G_WMom[i], G_WTree[i],
                    G_Pred[i], G_AdvScore[i], G_Prop[i], G_TreeTerm[i], G_TopEq[i], G_TopW[i], tid, t->d, t->r));
        }
    }

    if(lambda > 0.9) enterLong();
}

// Clean up memory
function cleanup()
{
    if(Root) freeTree(Root);
    freeNet();
}

Last edited by TipmyPip; 09/04/25 16:58.
Gate-and-Field Adaptive Engine (GFAE) [Re: TipmyPip] #488874
09/04/25 16:56
09/04/25 16:56
Joined: Sep 2017
Posts: 164
TipmyPip Online OP
Member
TipmyPip  Online OP
Member

Joined: Sep 2017
Posts: 164
Gate-and-Field Adaptive Engine (GFAE)

A. Finite language of situations
Each moment is mapped to a single symbol from a small alphabet. From experience, the system remembers which symbols tend to follow which, and how concentrated those follow-ups are. Two summaries are read each moment: a lean (which way the next step tilts) and a clarity (how decisive that tilt is). They serve as a gate—sometimes permissive, sometimes not.

B. Continuous field of influences
Alongside the gate runs a smooth field of interacting elements. Each element updates by blending:

a trace of itself, a couple of neighbor signals passed through simple bends, a soft background rhythm spanning slow to fast, coarse crowd summaries, a touch of recent change, and a bias toward kindred elements.

All ingredients are bounded; attention is a scarce resource shared proportionally.

C. Periodic seat-reshaping
At appointed intervals the system revisits who listens to whom, how much weight each path receives, and which bends are in play. Preference goes to regular, well-behaved parts, kindred pairings along the rhythm ladder, and compact formulas. The structure molts rather than resets: same scaffold, refreshed connections.

D. Permission meets timing
Actions arise only when the gate’s lean is convincing and the field’s rhythm agrees. The field then chooses the when and the how-much.

E. Self-explanation
As it runs, the system writes short, human-readable snippets: the current symbol, the gate’s lean and clarity, and concise sketches of how elements combined their inputs. The result is a living ledger of conditions and responses.

F. What emerges
Coherence without uniformity: clusters coordinate when the rhythm invites them, solos recede when clarity fades, and adaptability is maintained through small, proportional adjustments spread across the whole.


Code
// ======================================================================
// Markov-augmented Harmonic D-Tree Engine (Candlestick 122-directional)
// ======================================================================

// ================= USER CONFIG =================
#define ASSET_SYMBOL   "EUR/USD"   // symbol to trade
#define ALGO_NAME      "Alpha10b"  // algo tag (keeps models/files separated)

// Markov gating thresholds
#define MC_ACT         0.30        // min |CDL| ([-1..1]) to mark a pattern active
#define PBULL_LONG_TH  0.60        // Markov gate for long entries
#define PBULL_SHORT_TH 0.40        // Markov gate for short entries

// ================= ENGINE PARAMETERS =================
#define MAX_BRANCHES    3
#define MAX_DEPTH       4
#define NWIN            256
#define NET_EQNS        100
#define DEGREE          4
#define KPROJ           16
#define REWIRE_EVERY    127
#define LOG_EVERY       1

// DTREE-driven rewiring candidates per neighbor slot
#define CAND_NEIGH      8

// ---- DTREE feature sizes (extended with Markov features) ----
#define ADV_EQ_NF       12
#define ADV_PAIR_NF     12

// ================= Candles ? 122-state Markov =================
#define MC_NPAT    61
#define MC_STATES  (1 + 2*MC_NPAT)  // 0=NONE, 1..122 directional
#define MC_NONE    0
#define MC_LAPLACE 1.0

// ---------- helpers ----------
var clamp01(var x){ if(x<0) return 0; if(x>1) return 1; return x; }

int isInvalid(var x)
{
  if(x != x) return 1;                // NaN
  if(x > 1e100 || x < -1e100) return 1; // ±INF or astronomic values
  return 0;
}

var safeSig(var x){
  if(x != x) return 0;            // NaN -> 0
  if(x >  999.) return  999.;
  if(x < -999.) return -999.;
  return x;
}

// ========== Heuristic 61-candle feature builder ==========
int buildCDL_TA61(var* out, string* names)
{
  int i; for(i=0;i<MC_NPAT;i++){ out[i]=0; if(names) names[i]="UNUSED"; }

  var O = priceOpen();
  var H = priceHigh();
  var L = priceLow();
  var C = priceClose();

  var rng = H - L; if(rng <= 0) rng = 1e-8;
  var body = C - O;
  var dir  = ifelse(body >= 0, 1.0, -1.0);
  var bodyFrac  = clamp(body/rng, -1, 1);                  // [-1..1]
  var upperFrac = clamp01( (H - max(O,C)) / rng );         // [0..1]
  var lowerFrac = clamp01( (min(O,C) - L) / rng );         // [0..1]
  var absBody   = abs(body)/rng;                           // [0..1]

  // 0: body direction & size
  out[0] = bodyFrac; if(names) names[0] = "BODY";

  // 1..2: upper/lower dominance (signed)
  out[1] = clamp( upperFrac - lowerFrac, -1, 1 ); if(names) names[1] = "UPPER_DOM";
  out[2] = clamp( lowerFrac - upperFrac, -1, 1 ); if(names) names[2] = "LOWER_DOM";

  // 3: doji-ish (very small body), signed by direction
  var dojiT = 0, thresh = 0.10;
  if(absBody < thresh) dojiT = 1.0 - absBody/thresh;   // 0..1
  out[3] = dir * dojiT; if(names) names[3] = "DOJI";

  // 4: marubozu-ish (both shadows tiny), signed by direction
  var shadowSum = upperFrac + lowerFrac;
  var maru = 0; if(shadowSum < 0.10) maru = 1.0 - shadowSum/0.10;
  out[4] = dir * clamp01(maru); if(names) names[4] = "MARUBOZU";

  // 5: hammer-ish (long lower, tiny upper)
  var hamm = 0; if(lowerFrac > 0.60 && upperFrac < 0.10) hamm = (lowerFrac - 0.60)/0.40;
  out[5] = dir * clamp01(hamm); if(names) names[5] = "HAMMER";

  // 6: shooting-star-ish (long upper, tiny lower), bearish by shape
  var star = 0; if(upperFrac > 0.60 && lowerFrac < 0.10) star = (upperFrac - 0.60)/0.40;
  out[6] = -clamp01(star); if(names) names[6] = "SHOOTING";

  // 7: long body strength (signed)
  var longB = 0; if(absBody > 0.70) longB = (absBody - 0.70)/0.30;
  out[7] = dir * clamp01(longB); if(names) names[7] = "LONG_BODY";

  // 8: small body (spinning top-ish)
  out[8] = dir * (1.0 - absBody); if(names) names[8] = "SPIN_TOP";

  // 9: momentum-ish scalar from body size (signed)
  out[9] = dir * (2.0*absBody - 1.0); if(names) names[9] = "BODY_MOM";

  // 10..60 left as zero/UNUSED
  return 61;
}

// ===== Markov storage =====
#define MC_IDX(s,t) ((s)*MC_STATES + (t))
static int*  MC_Count;        // size MC_STATES*MC_STATES
static int*  MC_RowSum;       // size MC_STATES
static int   MC_Prev = -1;
static int   MC_Cur  = 0;
static var   MC_PBullNext = 0.5;
static var   MC_Entropy   = 0.0;
static string MC_Names[MC_NPAT];

void MC_alloc()
{
  int i;
  MC_Count  = (int*)malloc(MC_STATES*MC_STATES*sizeof(int));
  MC_RowSum = (int*)malloc(MC_STATES*sizeof(int));
  for(i=0;i<MC_STATES*MC_STATES;i++) MC_Count[i]=0;
  for(i=0;i<MC_STATES;i++) MC_RowSum[i]=0;
  MC_Prev = -1; MC_Cur = 0; MC_PBullNext = 0.5; MC_Entropy = 0.;
}
void MC_free(){ if(MC_Count){free(MC_Count);MC_Count=0;} if(MC_RowSum){free(MC_RowSum);MC_RowSum=0;} }

int MC_stateFromCDL(var* cdl /*len=61*/, var thr)
{
  int i, best=-1; var besta=0;
  for(i=0;i<MC_NPAT;i++){ var a=abs(cdl[i]); if(a>besta){ besta=a; best=i; } }
  if(best<0) return MC_NONE;
  if(besta < thr) return MC_NONE;
  int bull = (cdl[best] > 0);
  return 1 + 2*best + bull;
}
int MC_indexFromState(int s){ if(s<=0) return -1; return (s-1)/2; }
int MC_isBull(int s){ if(s<=0) return 0; return ((s-1)%2)==1; }

void MC_update(int sPrev,int sCur)
{
  if(sPrev<0) return;
  MC_Count[MC_IDX(sPrev,sCur)] += 1;
  MC_RowSum[sPrev]             += 1;
}

var MC_prob(int s,int t)
{
  var num = (var)MC_Count[MC_IDX(s,t)] + MC_LAPLACE;
  var den = (var)MC_RowSum[s] + MC_LAPLACE*MC_STATES;
  if(den<=0) return 1.0/MC_STATES;
  return num/den;
}

var MC_nextBullishProb(int s)
{
  if(s<0) return 0.5;
  int t; var pBull=0, pTot=0;
  for(t=1;t<MC_STATES;t++){ var p=MC_prob(s,t); pTot += p; if(MC_isBull(t)) pBull += p; }
  if(pTot<=0) return 0.5;
  return pBull/pTot;
}

var MC_rowEntropy01(int s)
{
  if(s<0) return 1.0;
  int t; var H=0, Z=0;
  for(t=1;t<MC_STATES;t++){ var p=MC_prob(s,t); Z+=p; }
  if(Z<=0) return 1.0;
  for(t=1;t<MC_STATES;t++){
    var p=MC_prob(s,t)/Z;
    if(p>0) H += -p*log(p);
  }
  var Hmax = log(MC_STATES-1);
  if(Hmax<=0) return 0;
  return H/Hmax;
}

// ================= HARMONIC D-TREE (engine) =================
typedef struct Node { var v; var r; void* c; int n; int d; } Node;
Node* Root;
Node** G_TreeIdx;  int G_TreeN; int G_TreeCap; var G_DTreeExp;

// ------- helpers (built-ins used) -------
int randint(int lo,int hi){ return lo + (int)random(hi - lo + 1); }      // [lo..hi]
var randu(var a,var b){ return a + random(b - a); }                       // [a..b)
var randsign(){ return ifelse(random(1) < 0.5, -1.0, 1.0); }
var mapUnit(var u,var lo,var hi){ u = clamp(u,-1.,1.); var t=0.5*(u+1.0); return lo + t*(hi-lo); }

void pushTreeNode(Node* u){ if(G_TreeN < G_TreeCap) G_TreeIdx[G_TreeN++] = u; }
void indexTreeDFS(Node* u){ if(!u) return; pushTreeNode(u); int i; for(i=0;i<u->n;i++) indexTreeDFS(((Node**)u->c)[i]); }

Node* createNode(int depth)
{
  Node* u = (Node*)malloc(sizeof(Node));
  u->v = 2*random(1)-1;                             // [-1..1)
  u->r = 0.01 + 0.02*depth + random(1)*0.005;       // small positive
  u->d = depth;
  if(depth > 0){
    u->n = randint(1, MAX_BRANCHES);
    u->c = malloc(u->n * sizeof(void*));
    int i; for(i=0;i<u->n;i++) ((Node**)u->c)[i] = createNode(depth - 1);
  } else { u->n = 0; u->c = 0; }
  return u;
}
var evaluateNode(Node* u)
{
  if(!u) return 0;
  var sum=0; int i; for(i=0;i<u->n;i++) sum += evaluateNode(((Node**)u->c)[i]);
  var phase  = sin(u->r * Bar + sum);
  var weight = 1.0 / pow(u->d + 1, G_DTreeExp);
  u->v = (1 - weight)*u->v + weight*phase;
  return u->v;
}
int countNodes(Node* u){ if(!u) return 0; int c=1,i; for(i=0;i<u->n;i++) c += countNodes(((Node**)u->c)[i]); return c; }
void freeTree(Node* u){ if(!u) return; int i; for(i=0;i<u->n;i++) freeTree(((Node**)u->c)[i]); if(u->c) free(u->c); free(u); }

// =========== NETWORK STATE & COEFFICIENTS ===========
int   G_N  = NET_EQNS;
int   G_D  = DEGREE;
int   G_K  = KPROJ;

var*  G_State; var*  G_Prev; var*  G_Vel;
int*  G_Adj;  var*   G_RP;   var*  G_Z;

int*  G_Mode;
var*  G_WSelf; var*  G_WN1; var*  G_WN2; var*  G_WGlob1; var*  G_WGlob2; var*  G_WMom; var*  G_WTree; var*  G_WAdv;

var*  A1x;  var*  A1lam;  var*  A1mean;  var*  A1E;  var*  A1P;  var*  A1i;  var*  A1c;
var*  A2x;  var*  A2lam;  var*  A2mean;  var*  A2E;  var*  A2P;  var*  A2i;  var*  A2c;

var*  G1mean; var*  G1E; var*  G2P; var*  G2lam;

var*  G_TreeTerm; int*  G_TopEq; var*  G_TopW; int*  G_EqTreeId; var*  TAlpha; var*  TBeta;
var*  G_Pred; var*  G_AdvScore;

var*  G_PropRaw; var*  G_Prop;
string* G_Sym;

// Markov features exposed to DTREE
var G_MCF_PBull, G_MCF_Entropy, G_MCF_State;

// epoch/context & feedback
int    G_Epoch = 0;
int    G_CtxID = 0;
var    G_FB_A = 0.7;
var    G_FB_B = 0.3;

// ---------- predictability from D-tree ----------
var nodePredictability(Node* t)
{
  if(!t) return 0.5;
  var disp=0; int n=t->n, i;
  for(i=0;i<n;i++){ Node* c=((Node**)t->c)[i]; disp += abs(c->v - t->v); }
  if(n>0) disp /= n;
  var depthFac = 1.0/(1+t->d);
  var rateBase = 0.01 + 0.02*t->d;
  var rateFac  = exp(-25.0*abs(t->r - rateBase));
  var p = 0.5*(depthFac + rateFac);
  p = 0.5*p + 0.5*(1.0/(1.0 + disp));
  return clamp(p,0,1);
}

// filenames
void buildEqFileName(int idx, char* outName /*>=64*/) { strcpy(outName, strf("Log\\%s_eq_%03i.csv", ALGO_NAME, idx)); }

// --------- allocation ----------
void allocateNet()
{
  int N=G_N, D=G_D, K=G_K;
  G_State=(var*)malloc(N*sizeof(var));  G_Prev=(var*)malloc(N*sizeof(var));  G_Vel=(var*)malloc(N*sizeof(var));
  G_Adj=(int*)malloc(N*D*sizeof(int));
  G_RP=(var*)malloc(K*N*sizeof(var));   G_Z=(var*)malloc(K*sizeof(var));
  G_Mode=(int*)malloc(N*sizeof(int));
  G_WSelf=(var*)malloc(N*sizeof(var));  G_WN1=(var*)malloc(N*sizeof(var));   G_WN2=(var*)malloc(N*sizeof(var));
  G_WGlob1=(var*)malloc(N*sizeof(var)); G_WGlob2=(var*)malloc(N*sizeof(var));
  G_WMom=(var*)malloc(N*sizeof(var));   G_WTree=(var*)malloc(N*sizeof(var)); G_WAdv=(var*)malloc(N*sizeof(var));
  A1x=(var*)malloc(N*sizeof(var)); A1lam=(var*)malloc(N*sizeof(var)); A1mean=(var*)malloc(N*sizeof(var));
  A1E=(var*)malloc(N*sizeof(var)); A1P=(var*)malloc(N*sizeof(var));   A1i=(var*)malloc(N*sizeof(var)); A1c=(var*)malloc(N*sizeof(var));
  A2x=(var*)malloc(N*sizeof(var)); A2lam=(var*)malloc(N*sizeof(var)); A2mean=(var*)malloc(N*sizeof(var));
  A2E=(var*)malloc(N*sizeof(var)); A2P=(var*)malloc(N*sizeof(var));   A2i=(var*)malloc(N*sizeof(var)); A2c=(var*)malloc(N*sizeof(var));
  G1mean=(var*)malloc(N*sizeof(var)); G1E=(var*)malloc(N*sizeof(var));
  G2P=(var*)malloc(N*sizeof(var));    G2lam=(var*)malloc(N*sizeof(var));
  G_TreeTerm=(var*)malloc(N*sizeof(var)); G_TopEq=(int*)malloc(N*sizeof(int)); G_TopW=(var*)malloc(N*sizeof(var));
  TAlpha=(var*)malloc(N*sizeof(var));     TBeta=(var*)malloc(N*sizeof(var));
  G_Pred=(var*)malloc(N*sizeof(var)); G_AdvScore=(var*)malloc(N*sizeof(var));
  G_PropRaw=(var*)malloc(N*sizeof(var));  G_Prop=(var*)malloc(N*sizeof(var));
  G_Sym=(string*)malloc(N*sizeof(string));
  G_TreeCap=512; G_TreeIdx=(Node**)malloc(G_TreeCap*sizeof(Node*)); G_TreeN=0;
  G_EqTreeId=(int*)malloc(N*sizeof(int));

  int i;
  for(i=0;i<N;i++){
    G_State[i]=2*random(1)-1; G_Prev[i]=G_State[i]; G_Vel[i]=0;
    G_Mode[i]=0;
    G_WSelf[i]=0.5; G_WN1[i]=0.2; G_WN2[i]=0.2; G_WGlob1[i]=0.1; G_WGlob2[i]=0.1; G_WMom[i]=0.05; G_WTree[i]=0.15; G_WAdv[i]=0.15;
    A1x[i]=1; A1lam[i]=0.1; A1mean[i]=0; A1E[i]=0; A1P[i]=0; A1i[i]=0; A1c[i]=0;
    A2x[i]=1; A2lam[i]=0.1; A2mean[i]=0; A2E[i]=0; A2P[i]=0; A2i[i]=0; A2c[i]=0;
    G1mean[i]=1.0; G1E[i]=0.001; G2P[i]=0.6; G2lam[i]=0.3;
    TAlpha[i]=0.8; TBeta[i]=25.0;
    G_TreeTerm[i]=0; G_TopEq[i]=-1; G_TopW[i]=0;
    G_Pred[i]=0.5;   G_AdvScore[i]=0;
    G_PropRaw[i]=1;  G_Prop[i]=1.0/G_N;
    G_Sym[i]=(char*)malloc(1024); strcpy(G_Sym[i],"");
  }
}
void freeNet()
{
  int i;
  if(G_State)free(G_State); if(G_Prev)free(G_Prev); if(G_Vel)free(G_Vel);
  if(G_Adj)free(G_Adj); if(G_RP)free(G_RP); if(G_Z)free(G_Z);
  if(G_Mode)free(G_Mode); if(G_WSelf)free(G_WSelf); if(G_WN1)free(G_WN1); if(G_WN2)free(G_WN2);
  if(G_WGlob1)free(G_WGlob1); if(G_WGlob2)free(G_WGlob2); if(G_WMom)free(G_WMom);
  if(G_WTree)free(G_WTree); if(G_WAdv)free(G_WAdv);
  if(A1x)free(A1x); if(A1lam)free(A1lam); if(A1mean)free(A1mean); if(A1E)free(A1E); if(A1P)free(A1P); if(A1i)free(A1i); if(A1c)free(A1c);
  if(A2x)free(A2x); if(A2lam)free(A2lam); if(A2mean)free(A2mean); if(A2E)free(A2E); if(A2P)free(A2P); if(A2i)free(A2i); if(A2c)free(A2c);
  if(G1mean)free(G1mean); if(G1E)free(G1E); if(G2P)free(G2P); if(G2lam)free(G2lam);
  if(G_TreeTerm)free(G_TreeTerm); if(G_TopEq)free(G_TopEq); if(G_TopW)free(G_TopW);
  if(TAlpha)free(TAlpha); if(TBeta)free(TBeta);
  if(G_Pred)free(G_Pred); if(G_AdvScore)free(G_AdvScore);
  if(G_PropRaw)free(G_PropRaw); if(G_Prop)free(G_Prop);
  if(G_Sym){ for(i=0;i<G_N;i++) if(G_Sym[i]) free(G_Sym[i]); free(G_Sym); }
  if(G_TreeIdx)free(G_TreeIdx); if(G_EqTreeId)free(G_EqTreeId);
}

// --------- random projection ----------
void randomizeRP(){ int K=G_K,N=G_N,k,j; for(k=0;k<K;k++) for(j=0;j<N;j++) G_RP[k*N+j]=ifelse(random(1)<0.5,-1.0,1.0); }
void computeProjection(){ int K=G_K,N=G_N,k,j; for(k=0;k<K;k++){ var acc=0; for(j=0;j<N;j++) acc+=G_RP[k*N+j]*(G_State[j]*G_State[j]); G_Z[k]=acc; }}

// --------- build features for DTREE (EXTENDED with Markov) ----------
void buildEqFeatures(int i, var lambda, var mean, var energy, var power, var* S /*ADV_EQ_NF*/)
{
  Node* t=G_TreeIdx[G_EqTreeId[i]];
  S[0]=safeSig(G_State[i]);   S[1]=safeSig(mean);     S[2]=safeSig(power); S[3]=safeSig(energy);
  S[4]=safeSig(lambda);       S[5]=safeSig(G_Pred[i]); S[6]=safeSig(t->d);  S[7]=safeSig(t->r);
  S[8]=safeSig(G_TreeTerm[i]); S[9]=safeSig(G_Mode[i]);
  S[10]=safeSig(G_MCF_PBull);  S[11]=safeSig(G_MCF_Entropy);
}
void buildPairFeatures(int i,int j, var lambda, var mean, var energy, var power, var* P /*ADV_PAIR_NF*/)
{
  Node* ti=G_TreeIdx[G_EqTreeId[i]];
  Node* tj=G_TreeIdx[G_EqTreeId[j]];
  P[0]=safeSig(G_State[i]); P[1]=safeSig(G_State[j]);
  P[2]=safeSig(ti->d);      P[3]=safeSig(tj->d);
  P[4]=safeSig(ti->r);      P[5]=safeSig(tj->r);
  P[6]=safeSig(abs(P[2]-P[3])); P[7]=safeSig(abs(P[4]-P[5]));
  P[8]=safeSig(G_Pred[i]*G_Pred[j]);
  P[9]=safeSig(lambda); P[10]=safeSig(mean); P[11]=safeSig(power);
}

// --------- DTREE advice wrappers ----------
var adviseEq(int i, var lambda, var mean, var energy, var power)
{
  var S[ADV_EQ_NF]; buildEqFeatures(i,lambda,mean,energy,power,S);
  var a = adviseLong(DTREE+RETURNS, 0, S, ADV_EQ_NF); // RETURNS => use next trade return as target in Train
  return a/100.;
}
var advisePair(int i,int j, var lambda, var mean, var energy, var power)
{
  var P[ADV_PAIR_NF]; buildPairFeatures(i,j,lambda,mean,energy,power,P);
  var a = adviseLong(DTREE+RETURNS, 0, P, ADV_PAIR_NF);
  return a/100.;
}

// --------- DTREE-driven adjacency selection ----------
void rewireAdjacency_DTREE(var lambda, var mean, var energy, var power)
{
  int N=G_N, D=G_D, i, d, c, best, cand;
  for(i=0;i<N;i++){
    for(d=0; d<D; d++){
      var bestScore = -2; best = -1;
      for(c=0;c<CAND_NEIGH;c++){
        cand = randint(0,N-1);
        if(cand==i) continue;
        int clash=0, k; for(k=0;k<d;k++) if(G_Adj[i*D+k]==cand){clash=1; break;}
        if(clash) continue;
        var s = advisePair(i,cand,lambda,mean,energy,power);
        if(s > bestScore){ bestScore=s; best=cand; }
      }
      if(best<0){ do{ best = randint(0,N-1);} while(best==i); }
      G_Adj[i*D + d] = best;
    }
  }
}

// --------- DTREE-created coefficients, modes & proportions ----------
void synthesizeEquationFromDTREE(int i, var lambda, var mean, var energy, var power)
{
  var a_mode = adviseEq(i,lambda,mean,energy,power);
  G_Mode[i] = (int)(abs(a_mode*1000)) & 3;

  var a_wself = adviseEq(i,lambda,mean,energy,power);
  var a_wn1   = adviseEq(i,lambda,mean,energy,power);
  var a_wn2   = adviseEq(i,lambda,mean,energy,power);
  var a_g1    = adviseEq(i,lambda,mean,energy,power);
  var a_g2    = adviseEq(i,lambda,mean,energy,power);
  var a_mom   = adviseEq(i,lambda,mean,energy,power);
  var a_tree  = adviseEq(i,lambda,mean,energy,power);
  var a_adv   = adviseEq(i,lambda,mean,energy,power);

  G_WSelf[i]  = mapUnit(a_wself, 0.15, 0.85);
  G_WN1[i]    = mapUnit(a_wn1,   0.05, 0.35);
  G_WN2[i]    = mapUnit(a_wn2,   0.05, 0.35);
  G_WGlob1[i] = mapUnit(a_g1,    0.05, 0.30);
  G_WGlob2[i] = mapUnit(a_g2,    0.05, 0.30);
  G_WMom[i]   = mapUnit(a_mom,   0.02, 0.15);
  G_WTree[i]  = mapUnit(a_tree,  0.05, 0.35);
  G_WAdv[i]   = mapUnit(a_adv,   0.05, 0.35);

  var a1=adviseEq(i,lambda,mean,energy,power);
  var a2=adviseEq(i,lambda,mean,energy,power);
  var a3=adviseEq(i,lambda,mean,energy,power);
  var a4=adviseEq(i,lambda,mean,energy,power);
  var a5=adviseEq(i,lambda,mean,energy,power);
  var a6=adviseEq(i,lambda,mean,energy,power);
  var a7=adviseEq(i,lambda,mean,energy,power);

  A1x[i]   = randsign()*mapUnit(a1, 0.6, 1.2);
  A1lam[i] = randsign()*mapUnit(a2, 0.05,0.35);
  A1mean[i]= mapUnit(a3,-0.30,0.30);
  A1E[i]   = mapUnit(a4,-0.0015,0.0015);
  A1P[i]   = mapUnit(a5,-0.30,0.30);
  A1i[i]   = mapUnit(a6,-0.02,0.02);
  A1c[i]   = mapUnit(a7,-0.20,0.20);

  var b1=adviseEq(i,lambda,mean,energy,power);
  var b2=adviseEq(i,lambda,mean,energy,power);
  var b3=adviseEq(i,lambda,mean,energy,power);
  var b4=adviseEq(i,lambda,mean,energy,power);
  var b5=adviseEq(i,lambda,mean,energy,power);
  var b6=adviseEq(i,lambda,mean,energy,power);
  var b7=adviseEq(i,lambda,mean,energy,power);

  A2x[i]   = randsign()*mapUnit(b1, 0.6, 1.2);
  A2lam[i] = randsign()*mapUnit(b2, 0.05,0.35);
  A2mean[i]= mapUnit(b3,-0.30,0.30);
  A2E[i]   = mapUnit(b4,-0.0015,0.0015);
  A2P[i]   = mapUnit(b5,-0.30,0.30);
  A2i[i]   = mapUnit(b6,-0.02,0.02);
  A2c[i]   = mapUnit(b7,-0.20,0.20);

  var c1=adviseEq(i,lambda,mean,energy,power);
  var c2=adviseEq(i,lambda,mean,energy,power);
  var d1=adviseEq(i,lambda,mean,energy,power);
  var d2=adviseEq(i,lambda,mean,energy,power);
  G1mean[i] = mapUnit(c1, 0.4, 1.6);
  G1E[i]    = mapUnit(c2,-0.004,0.004);
  G2P[i]    = mapUnit(d1, 0.1, 1.2);
  G2lam[i]  = mapUnit(d2, 0.05,0.7);

  var e1=adviseEq(i,lambda,mean,energy,power);
  var e2=adviseEq(i,lambda,mean,energy,power);
  TAlpha[i] = mapUnit(e1, 0.3, 1.5);
  TBeta[i]  = mapUnit(e2, 6.0, 50.0);

  var p = adviseEq(i,lambda,mean,energy,power);
  G_PropRaw[i] = 0.01 + 0.99 * (0.5*(p+1.0));
}

void normalizeProportions()
{
  int N=G_N,i; var s=0; for(i=0;i<N;i++) s += G_PropRaw[i];
  if(s<=0) { for(i=0;i<N;i++) G_Prop[i] = 1.0/N; return; }
  for(i=0;i<N;i++) G_Prop[i] = G_PropRaw[i]/s;
}

// --------- DTree proportional coupling ----------
var dtreeTerm(int i, int* outTopEq, var* outTopW)
{
  int N=G_N,j;
  int tid_i=G_EqTreeId[i]; Node* ti=G_TreeIdx[tid_i]; int di=ti->d; var ri=ti->r;
  var alpha=TAlpha[i], beta=TBeta[i];

  var sumw=0, acc=0, bestW=-1; int bestJ=-1;
  for(j=0;j<N;j++){
    if(j==i) continue;
    int tid_j=G_EqTreeId[j]; Node* tj=G_TreeIdx[tid_j]; int dj=tj->d; var rj=tj->r;

    var w = exp(-alpha*abs(di-dj)) * exp(-beta*abs(ri-rj));
    var predBoost = 0.5 + 0.5*(G_Pred[i]*G_Pred[j]);
    var propBoost = 0.5 + 0.5*( (G_Prop[i] + G_Prop[j]) );
    w *= predBoost * propBoost;

    var pairAdv = advisePair(i,j,0,0,0,0);
    w *= (0.75 + 0.25*(0.5*(pairAdv+1.0)));

    sumw += w; acc += w*G_State[j];
    if(w>bestW){bestW=w; bestJ=j;}
  }
  if(outTopEq) *outTopEq = bestJ;
  if(outTopW)  *outTopW  = ifelse(sumw>0, bestW/sumw, 0);
  if(sumw>0) return acc/sumw; return 0;
}

// --------- symbolic expression builder ----------
void buildSymbolicExpr(int i, int n1, int n2)
{
  string s = G_Sym[i]; strcpy(s,"");
  string a1 = strf("(%.3f*x[%i] + %.3f*lam + %.3f*mean + %.5f*E + %.3f*P + %.3f*i + %.3f)",
                   A1x[i], n1, A1lam[i], A1mean[i], A1E[i], A1P[i], A1i[i], A1c[i]);
  string a2 = strf("(%.3f*x[%i] + %.3f*lam + %.3f*mean + %.5f*E + %.3f*P + %.3f*i + %.3f)",
                   A2x[i], n2, A2lam[i], A2mean[i], A2E[i], A2P[i], A2i[i], A2c[i]);

  strcat(s, "x[i]_next = ");
  strcat(s, strf("%.3f*x[i] + ", G_WSelf[i]));
  if(G_Mode[i]==0){ strcat(s, strf("%.3f*sin%s + ", G_WN1[i], a1)); strcat(s, strf("%.3f*cos%s + ", G_WN2[i], a2)); }
  else if(G_Mode[i]==1){ strcat(s, strf("%.3f*tanh%s + ", G_WN1[i], a1)); strcat(s, strf("%.3f*sin%s + ", G_WN2[i], a2)); }
  else if(G_Mode[i]==2){ strcat(s, strf("%.3f*cos%s + ", G_WN1[i], a1)); strcat(s, strf("%.3f*tanh%s + ", G_WN2[i], a2)); }
  else { strcat(s, strf("%.3f*sin%s + ", G_WN1[i], a1)); strcat(s, strf("%.3f*cos%s + ", G_WN2[i], a2)); }

  strcat(s, strf("%.3f*tanh(%.3f*mean + %.5f*E) + ", G_WGlob1[i], G1mean[i], G1E[i]));
  strcat(s, strf("%.3f*sin(%.3f*P + %.3f*lam) + ",   G_WGlob2[i], G2P[i],   G2lam[i]));
  strcat(s, strf("%.3f*(x[i]-x_prev[i]) + ",         G_WMom[i]));
  strcat(s, strf("Prop[i]=%.4f; ",                    G_Prop[i]));
  strcat(s, strf("%.3f*DT(i) + ",                    G_WTree[i]));
  strcat(s, strf("%.3f*DTREE(i)",                    G_WAdv[i]  ));
}

// --------- one-time rewire init ----------
void rewireInit()
{
  randomizeRP(); computeProjection();
  G_TreeN=0; indexTreeDFS(Root);
  int i; for(i=0;i<G_N;i++) G_EqTreeId[i] = i % G_TreeN;
}

// --------- epoch rewire ----------
void rewireEpoch(var lambda, var mean, var energy, var power)
{
  int i;
  for(i=0;i<G_N;i++){ Node* t=G_TreeIdx[G_EqTreeId[i]]; G_Pred[i]=nodePredictability(t); }
  rewireAdjacency_DTREE(lambda,mean,energy,power);
  for(i=0;i<G_N;i++) synthesizeEquationFromDTREE(i,lambda,mean,energy,power);
  normalizeProportions();
  int D=G_D; int h=0; for(i=0;i<G_N*D;i++) h = (h*1315423911) ^ G_Adj[i];
  G_CtxID = (h ^ (G_Epoch<<8)) & 0x7FFFFFFF;
  for(i=0;i<G_N;i++){ int n1=G_Adj[i*G_D+0], n2=G_Adj[i*G_D+1]; buildSymbolicExpr(i,n1,n2); }
}

// --------- compact driver ----------
var projectNet()
{
  int N=G_N,i; var sum=0,sumsq=0,cross=0;
  for(i=0;i<N;i++){ sum+=G_State[i]; sumsq+=G_State[i]*G_State[i]; if(i+1<N) cross+=G_State[i]*G_State[i+1]; }
  var mean=sum/N, corr=cross/(N-1);
  return 0.6*tanh(mean + 0.001*sumsq) + 0.4*sin(corr);
}

// --------- per-bar update ----------
void updateNet(var driver, var* outMean, var* outEnergy, var* outPower, int writeMeta)
{
  int N=G_N, D=G_D, i;

  var sum=0,sumsq=0; for(i=0;i<N;i++){ sum+=G_State[i]; sumsq+=G_State[i]*G_State[i]; }
  var mean=sum/N, energy=sumsq, power=sumsq/N;

  for(i=0;i<N;i++){ Node* t=G_TreeIdx[G_EqTreeId[i]]; G_Pred[i]=nodePredictability(t); }

  for(i=0;i<N;i++){
    int n1=G_Adj[i*D+0], n2=G_Adj[i*D+1];
    var xi=G_State[i], xn1=G_State[n1], xn2=G_State[n2], mom=xi-G_Prev[i];

    int topEq=-1; var topW=0;
    var dt = dtreeTerm(i,&topEq,&topW);
    G_TreeTerm[i]=dt; G_TopEq[i]=topEq; G_TopW[i]=topW;

    var adv = adviseEq(i, driver, mean, energy, power);
    G_AdvScore[i] = adv;

    var arg1=A1x[i]*xn1 + A1lam[i]*driver + A1mean[i]*mean + A1E[i]*energy + A1P[i]*power + A1i[i]*i + A1c[i];
    var arg2=A2x[i]*xn2 + A2lam[i]*driver + A2mean[i]*mean + A2E[i]*energy + A2P[i]*power + A2i[i]*i + A2c[i];

    var nl1,nl2;
    if(G_Mode[i]==0){ nl1=sin(arg1); nl2=cos(arg2); }
    else if(G_Mode[i]==1){ nl1=tanh(arg1); nl2=sin(arg2); }
    else if(G_Mode[i]==2){ nl1=cos(arg1);  nl2=tanh(arg2); }
    else { nl1=sin(arg1); nl2=cos(arg2); }

    var glob1=tanh(G1mean[i]*mean + G1E[i]*energy);
    var glob2=sin (G2P[i]*power + G2lam[i]*driver);

    var xNew =
      G_WSelf[i]*xi +
      G_WN1[i]*nl1 +
      G_WN2[i]*nl2 +
      G_WGlob1[i]*glob1 +
      G_WGlob2[i]*glob2 +
      G_WMom[i]*mom +
      G_WTree[i]*dt +
      G_WAdv[i] *adv;
		
	 // prevent runaway values
    if(xNew != xNew) xNew = 0;       // NaN -> 0
    else {
             if(xNew > 1e6) xNew = 1e6;
             if(xNew < -1e6) xNew = -1e6;
    }

    G_Prev[i]=xi; G_Vel[i]=xNew-xi; G_State[i]=xNew;

    if(writeMeta){
      char fname[64]; buildEqFileName(i,fname);
      int tid=G_EqTreeId[i]; Node* t=G_TreeIdx[tid];
      int nn1=G_Adj[i*D+0], nn2=G_Adj[i*D+1];
      file_append(fname,
        strf("META,%i,%i,%i,%i,%i,%i,%i,%i,%.6f,Pred=%.4f,Adv=%.4f,Prop=%.6f,Mode=%i,WAdv=%.3f,WTree=%.3f,PBull=%.4f,Ent=%.4f,State=%i,\"%s\"\n",
          G_Epoch, G_CtxID, NET_EQNS, i, nn1, nn2, tid, t->d, t->r,
          G_Pred[i], G_AdvScore[i], G_Prop[i], G_Mode[i], G_WAdv[i], G_WTree[i],
          G_MCF_PBull, G_MCF_Entropy, MC_Cur, G_Sym[i]));
    }
  }
  if(outMean) *outMean=mean; if(outEnergy) *outEnergy=energy; if(outPower) *outPower=power;
}

// ----------------- MAIN -----------------
function run()
{
  // ===== required for ML training / auto-test =====
  NumWFOCycles = 5;                 // WFO is recommended for ML
  set(RULES|TESTNOW|PLOTNOW);       // generate rules; auto-test after Train
  if(Train){                        // RETURNS target = next trade's P/L
    Hedge   = 2;                    // allow simultaneous L/S during training
    LifeTime= 1;                    // 1-bar horizon for return labeling
  } else {
    MaxLong = MaxShort = 1;         // clean behavior in Test/Trade
  }

  // ===== init once =====
  static int initialized=0;
  static var lambda;
  static int fileInit=0;

  if(LookBack < NWIN) LookBack = NWIN;
  asset(ASSET_SYMBOL);
  algo(ALGO_NAME);

  if(is(INITRUN) && !initialized){
    seed(365);  // <<< ensure deterministic Train/Test advise order

    var tmp[MC_NPAT]; buildCDL_TA61(tmp, MC_Names);

    Root=createNode(MAX_DEPTH);
    allocateNet();
    MC_alloc();

    G_DTreeExp = randu(1.10,1.60);
    G_FB_A     = randu(0.60,0.85);
    G_FB_B     = 1.0 - G_FB_A;

    randomizeRP(); computeProjection();
    rewireInit();

    // First epoch synthesis
    G_Epoch = 0;
    rewireEpoch(0,0,0,0);

    // Prepare per-equation CSVs
    char fname[64]; int i;
    for(i=0;i<NET_EQNS;i++){
      buildEqFileName(i,fname);
      file_append(fname,
        "Bar,lambda,gamma,i,State,n1,n2,mean,energy,power,Vel,Mode,WAdv,WSelf,WN1,WN2,WGlob1,WGlob2,WMom,WTree,Pred,Adv,Prop,TreeTerm,TopEq,TopW,TreeId,Depth,Rate,PBull,Entropy,MCState\n");
    }
    if(!fileInit){
      file_append(strf("Log\\%s_markov.csv",ALGO_NAME),"Bar,State,PBullNext,Entropy,RowSum\n");
      fileInit=1;
    }

    // Initial META
    for(i=0;i<G_N;i++){
      int n1=G_Adj[i*G_D+0], n2=G_Adj[i*G_D+1]; int tid=G_EqTreeId[i]; Node* t=G_TreeIdx[tid];
      char fname2[64]; buildEqFileName(i,fname2);
      file_append(fname2,
        strf("META,%i,%i,%i,%i,%i,%i,%i,%i,%.6f,Pred=%.4f,Adv=%.4f,Prop=%.6f,Mode=%i,WAdv=%.3f,WTree=%.3f,PBull=%.4f,Ent=%.4f,State=%i,\"%s\"\n",
          G_Epoch, G_CtxID, NET_EQNS, i, n1, n2, tid, t->d, t->r,
          G_Pred[i], G_AdvScore[i], G_Prop[i], G_Mode[i], G_WAdv[i], G_WTree[i],
          G_MCF_PBull, G_MCF_Entropy, MC_Cur, G_Sym[i]));
    }
    initialized=1;
    printf("\nRoot nodes: %i | Net equations: %i (deg=%i, kproj=%i)", countNodes(Root), G_N, G_D, G_K);
  }

  // ====== Per bar: Candles ? Markov
  static var CDL[MC_NPAT];
  buildCDL_TA61(CDL,0);
  MC_Cur = MC_stateFromCDL(CDL, MC_ACT);
  if(Bar > LookBack) MC_update(MC_Prev, MC_Cur);
  MC_Prev = MC_Cur;

  MC_PBullNext = MC_nextBullishProb(MC_Cur);
  MC_Entropy   = MC_rowEntropy01(MC_Cur);

  // expose global Markov features
  G_MCF_PBull   = MC_PBullNext;
  G_MCF_Entropy = MC_Entropy;
  G_MCF_State   = (var)MC_Cur;

  // ====== Tree driver lambda
  lambda = evaluateNode(Root);

  // Rewire epoch?
  int doRewire = ((Bar % REWIRE_EVERY) == 0);
  if(doRewire){
    G_Epoch++;
    int i; var sum=0; for(i=0;i<G_N;i++) sum += G_State[i];
    var mean = sum/G_N;
    var energy=0; for(i=0;i<G_N;i++) energy += G_State[i]*G_State[i];
    var power = energy/G_N;
    rewireEpoch(lambda,mean,energy,power);
  }

  // Update net this bar (write META only if rewired)
  var meanB, energyB, powerB;
  updateNet(lambda, &meanB, &energyB, &powerB, doRewire);

  // Feedback blend
  var gamma = projectNet();
  lambda = G_FB_A*lambda + G_FB_B*gamma;

  // --- safe plotting (after LookBack only) ---
if(!is(LOOKBACK))
{
  var lam = safeSig(lambda);
  var gam = safeSig(gamma);
  var pw  = safeSig(powerB);
  var pb  = clamp01(MC_PBullNext);
  var ent = clamp01(MC_Entropy);

  plot("lambda",     lam, LINE, 0);
  plot("gamma",      gam, LINE, 0);
  plot("P_win",      pw,  LINE, 0);
  plot("PBullNext",  pb,  LINE, 0);
  plot("MC_Entropy", ent, LINE, 0);
}


  // Markov CSV log
  if(Bar % LOG_EVERY == 0){
    file_append(strf("Log\\%s_markov.csv",ALGO_NAME),
      strf("%i,%i,%.6f,%.6f,%i\n", Bar, MC_Cur, MC_PBullNext, MC_Entropy, MC_RowSum[MC_Cur]));
  }

  // ====== Entries ======
  if(Train){
    // Ensure samples for RETURNS training (hedged & 1-bar life set above)
    if(NumOpenLong  == 0) enterLong();
    if(NumOpenShort == 0) enterShort();
  } else {
    // Markov-gated live logic
    if( MC_PBullNext > PBULL_LONG_TH  && lambda >  0.7 ) enterLong();
    if( MC_PBullNext < PBULL_SHORT_TH && lambda < -0.7 ) enterShort();
  }
}

// Clean up memory
function cleanup()
{
  if(Root) freeTree(Root);
  MC_free();
  freeNet();
}

Last edited by TipmyPip; 09/04/25 18:18.
Gate-and-Flow Adaptive Navigator [Re: TipmyPip] #488876
09/06/25 00:26
09/06/25 00:26
Joined: Sep 2017
Posts: 164
TipmyPip Online OP
Member
TipmyPip  Online OP
Member

Joined: Sep 2017
Posts: 164
Gate-and-Flow Adaptive Navigator

A. A small market lexicon
Market moments are compressed into a compact set of archetypes. Think of it as a modest alphabet describing what the tape “looks like” right now. From the stream of past archetypes, the system develops expectations about what tends to follow what, and how tightly those follow-ups cluster. Each moment it reads two quiet dials: a lean (directional tilt for the next step) and a clarity (how decisive that tilt appears). This pair forms a permission gate; sometimes it opens wide, sometimes it holds.

B. A soft landscape of influences
Running beneath the gate is a smooth, continuously evolving field of interacting influences. Many small components co-exist; each carries a trace of its own past, listens to a couple of peers, feels market pulse across multiple horizons, and heeds coarse crowd summaries (central tendency, dispersion, co-movement). A hint of recent change is included so the field can tip faster when the tape turns. All signals are bounded. Attention is scarce—capital and focus are rationed proportionally, so louder agreement earns more weight while lone, weak voices fade.

C. Occasional reshaping of who listens to whom
At planned intervals, the “seating chart” is refreshed: which components attend to which others, how much weight each pathway receives, and which simple bends or transforms are active. Selection favors regular, well-behaved contributors, kindred pairings along a rhythm ladder (slow with slow, fast with fast when it helps), and compact combinations that don’t overfit. The structure molts rather than resets—the scaffold persists while connections and strengths are updated, preserving continuity.

D. Capacity that breathes, with guardrails
Model size is flexible. When resources are tight or added detail stops paying, the form trims the deepest, least helpful nuance. When there’s headroom and clear benefit, it adds a thin layer. Each change is tentative and reversible: small growth trials are evaluated after a delay; harmful expansions are rolled back. Utility balances two things: quality of alignment (signal usefulness) and a mild cost for resource consumption. The system seeks useful complexity, not maximal complexity.

E. Permission meets timing and size
Trades happen only when permission (lean & clarity) is convincing and the soft landscape hums in the same key. The landscape then suggests when to act and how much to commit. Strength of agreement raises size; disagreement or ambiguity pushes toward patience. “No trade” is treated as a first-class decision, not a failure mode.

F. A brief, human-readable diary
The engine keeps a compact ledger for audit and research: the current archetype, the two dials of the gate, and terse sketches of how influences combined to justify recent posture. The emphasis is on explainability without revealing recipes—clear enough for oversight, lean enough for speed.

G. What tends to emerge
Coherence without rigidity. When rhythms align, groups of components move as a unit; when clarity fades, solos recede. Adaptation is maintained through small, distributed adjustments rather than dramatic overhauls. The result is regime-following behavior that can stand down gracefully between regimes.

H. Risk doctrine as a controlling atmosphere
Exposure is capped at both gross and net levels; incremental size responds to confidence and recent variability. When the environment becomes noisy or resources get tight, the system de-emphasizes fine detail, concentrates on the few strongest agreements, and allows activity to fall toward neutral rather than force action. This keeps drawdown sensitivity and operational risk in check.

I. Memory, recency, and drift
Assessments use decaying memory so recent tape action matters more while older evidence fades. Because the gate and the landscape both learn from the stream, their alignment naturally drifts with the market: when relationships change, the system doesn’t snap—it glides toward the new posture at a controlled pace.

J. Separation of roles

The lexicon gives a compact, discrete view of market context and provides the permission signal.

The landscape offers a continuous, multi-horizon view that shapes timing and size.

The reshaper keeps connections healthy and simple.

The capacity governor ensures the form remains useful under constraints.
Together they reduce overreaction to noise while still allowing timely response to structural change.

K. Practical trading implications

Expect fewer, stronger actions when the tape is muddled; more decisive engagement when the lexicon and landscape agree.

Expect the sizing to track consensus strength, not single-indicator extremes.

Expect the structure to age well: it refreshes itself without discarding accumulated context, aiming for stable behavior across regime shifts.

L. Philosophy in one line
Trade when the story is both clear and corroborated; keep the model light, the adjustments small, and the diary open.

Code
// ======================================================================
// Alpha12 - Markov-augmented Harmonic D-Tree Engine (Candlestick 122-dir)
// (with runtime memory shaping, selective depth pruning, and elastic accuracy-aware depth growth)
// ======================================================================

// ================= USER CONFIG =================
#define ASSET_SYMBOL   "EUR/USD"
#define BAR_PERIOD     60
#define MC_ACT         0.30       // threshold on |CDL| in [-1..1] to accept a pattern
#define PBULL_LONG_TH  0.60       // Markov gate for long
#define PBULL_SHORT_TH 0.40       // Markov gate for short

// ===== Debug toggles (Fix #1 - chart/watch growth off by default) =====
#define ENABLE_PLOTS   0    // 0 = no plot buffers; 1 = enable plot() calls
#define ENABLE_WATCH   0    // 0 = disable watch() probes; 1 = enable

// ================= ENGINE PARAMETERS =================
#define MAX_BRANCHES    3
#define MAX_DEPTH       4
#define NWIN            256
#define NET_EQNS        100
#define DEGREE          4
#define KPROJ           16
#define REWIRE_EVERY    127
#define CAND_NEIGH      8

// ===== LOGGING CONTROLS (memory management) =====
#define LOG_EQ_TO_ONE_FILE   1    // 1: single consolidated EQ CSV; 0: per-eq files
#define LOG_EXPR_TEXT        0    // 0: omit full expression (store signature only); 1: include text
#define META_EVERY           4    // write META every N rewires
#define LOG_EQ_SAMPLE        NET_EQNS // limit number of equations logged
#define EXPR_MAXLEN          512  // cap expression string

// decimate Markov log cadence
#define LOG_EVERY            16

// ---- DTREE feature sizes (extended for Markov features) ----
#define ADV_EQ_NF       12   // per-equation features
#define ADV_PAIR_NF     12   // per-pair features (kept for completeness; DTREE pair disabled)

// ================= Candles ? 122-state Markov =================
#define MC_NPAT    61
#define MC_STATES  123   // 1 + 2*MC_NPAT
#define MC_NONE    0
#define MC_LAPLACE 1.0

// ================= Runtime Memory / Accuracy Manager =================
#define MEM_BUDGET_MB        50
#define MEM_HEADROOM_MB       5
#define DEPTH_STEP_BARS      16
#define KEEP_CHILDREN_HI      2
#define KEEP_CHILDREN_LO      1
#define RUNTIME_MIN_DEPTH     2

int  G_ShedStage        = 0;        // 0..2
int  G_LastDepthActBar  = -999999;
int  G_ChartsOff        = 0;        // gates plot()
int  G_LogsOff          = 0;        // gates file_append cadence
int  G_SymFreed         = 0;        // expression buffers freed
int  G_RT_TreeMaxDepth  = MAX_DEPTH;

// ---- Accuracy sentinel (EW correlation of lambda vs gamma) ----
var  ACC_mx=0, ACC_my=0, ACC_mx2=0, ACC_my2=0, ACC_mxy=0;
var  G_AccCorr = 0;      // [-1..1]
var  G_AccBase = 0;      // first seen sentinel
int  G_HaveBase = 0;

// ---- Elastic depth tuner (small growth trials with rollback) ----
#define DEPTH_TUNE_BARS   64   // start a growth “trial” this often (when memory allows)
#define TUNE_DELAY_BARS   64   // evaluate the trial after this many bars

var  G_UtilBefore = 0, G_UtilAfter = 0;
int  G_TunePending = 0;
int  G_TuneStartBar = 0;
int  G_TuneAction   = 0;  // +1 grow trial, 0 none

// ======================================================================
//  (FIX) Move the type and globals used by mem_bytes_est() up here
// ======================================================================

// HARMONIC D-TREE type (we define it early so globals below compile fine)
typedef struct Node { var v; var r; void* c; int n; int d; } Node;

// Minimal globals needed before mem_bytes_est()
Node*  Root = 0;
Node** G_TreeIdx = 0; 
int    G_TreeN = 0; 
int    G_TreeCap = 0; 
var    G_DTreeExp = 0;

Node   G_DummyNode;   // defined early so treeAt() can return &G_DummyNode

// Network sizing globals (used by mem_bytes_est)
int   G_N  = NET_EQNS;
int   G_D  = DEGREE;
int   G_K  = KPROJ;

// Optional expression buffer pointer (referenced by mem_bytes_est)
string* G_Sym = 0;

// Forward decls that reference Node
var  nodeImportance(Node* u); // fwd decl (uses nodePredictability below)
void pruneSelectiveAtDepth(Node* u, int targetDepth, int keepK);
void reindexTreeAndMap();

// Forward decls for advisor functions (so adviseSeed can call them)
var adviseEq(int i, var lambda, var mean, var energy, var power);
var advisePair(int i,int j, var lambda, var mean, var energy, var power);

// ---- Advise budget/rotation (Fix #2) ----
#define ADVISE_MAX_EQ   16   // how many equations may use DTREE per bar
#define ADVISE_ROTATE    1   // 1 = rotate which equations get DTREE each bar

int allowAdvise(int i)
{
    if(ADVISE_ROTATE){
        int groups = NET_EQNS / ADVISE_MAX_EQ;
        if(groups < 1) groups = 1;
        return ((i / ADVISE_MAX_EQ) % groups) == (Bar % groups);
    } else {
        return (i < ADVISE_MAX_EQ);
    }
}


// ---- tree byte size (counts nodes + child pointer arrays) ----
int tree_bytes(Node* u)
{
    if(!u) return 0;
    int SZV = sizeof(var), SZI = sizeof(int), SZP = sizeof(void*);
    int sz_node = 2*SZV + SZP + 2*SZI;
    int total = sz_node;
    if(u->n > 0 && u->c) total += u->n * SZP;
    int i;
    for(i=0;i<u->n;i++)
        total += tree_bytes(((Node**)u->c)[i]);
    return total;
}

// ======================================================================
// Conservative in-script memory estimator (arrays + pointers)
// ======================================================================
int mem_bytes_est()
{
    int N = G_N, D = G_D, K = G_K;
    int SZV = sizeof(var), SZI = sizeof(int), SZP = sizeof(void*);
    int b = 0;

    b += N*SZV*(3 + 8 + 7 + 7 + 4 + 2 + 2 + 2 + 2);
    b += N*SZI*(3);                    // G_Mode, G_TopEq, G_EqTreeId
    b += N*D*SZI;                      // G_Adj
    b += K*N*SZV;                      // G_RP
    b += K*SZV;                        // G_Z
    b += G_TreeCap*SZP;                // G_TreeIdx pointer vector
    if(G_Sym && !G_SymFreed) b += N*EXPR_MAXLEN; // optional expression buffers
    b += MC_STATES*MC_STATES*SZI + MC_STATES*SZI; // Markov
    b += tree_bytes(Root);                            // include D-Tree
    return b;
}

int mem_mb_est(){ return mem_bytes_est() / (1024*1024); }

// === total memory (Zorro-wide) in MB ===
int memMB(){ return (int)(memory(0)/(1024*1024)); }

// light one-shot shedding
void shed_zero_cost_once()
{
    if(G_ShedStage > 0) return;
    set(PLOTNOW|OFF); G_ChartsOff = 1;  // stop chart buffers
    G_LogsOff = 1;                      // decimate logs (gated later)
    G_ShedStage = 1;
}

void freeExprBuffers()
{
    if(!G_Sym || G_SymFreed) return;
    int i; for(i=0;i<G_N;i++) if(G_Sym[i]) free(G_Sym[i]);
    free(G_Sym); G_Sym = 0; G_SymFreed = 1;
}

// depth manager (prune & shedding)
void depth_manager_runtime()
{
    int trigger = MEM_BUDGET_MB - MEM_HEADROOM_MB;
    int mb = mem_mb_est();
    if(mb < trigger) return;

    if(G_ShedStage == 0) shed_zero_cost_once();

    if(G_ShedStage <= 1){
        if(LOG_EXPR_TEXT==0 && !G_SymFreed) freeExprBuffers();
        G_ShedStage = 2;
    }

    int overBudget = (mb >= MEM_BUDGET_MB);
    if(!overBudget && (Bar - G_LastDepthActBar < DEPTH_STEP_BARS))
        return;

    while(G_RT_TreeMaxDepth > RUNTIME_MIN_DEPTH)
    {
        int keepK = ifelse(mem_mb_est() < MEM_BUDGET_MB + 2, KEEP_CHILDREN_HI, KEEP_CHILDREN_LO);
        pruneSelectiveAtDepth((Node*)Root, G_RT_TreeMaxDepth, keepK);
        G_RT_TreeMaxDepth--;
        reindexTreeAndMap();

        mb = mem_mb_est();
        printf("\n[DepthMgr] depth=%i keepK=%i est=%i MB", G_RT_TreeMaxDepth, keepK, mb);

        if(mb < trigger) break;
    }

    G_LastDepthActBar = Bar;
}

// ----------------------------------------------------------------------
// 61 candlestick patterns (Zorro spellings kept). Each returns [-100..100].
// We rescale to [-1..1] for Markov state construction.
// ----------------------------------------------------------------------
int buildCDL_TA61(var* out, string* names)
{
    int n = 0;
    #define ADD(Name, Call) do{ var v = (Call); out[n] = v/100.; if(names) names[n] = Name; n++; }while(0)

    ADD("CDL2Crows",              CDL2Crows());
    ADD("CDL3BlackCrows",         CDL3BlackCrows());
    ADD("CDL3Inside",             CDL3Inside());
    ADD("CDL3LineStrike",         CDL3LineStrike());
    ADD("CDL3Outside",            CDL3Outside());
    ADD("CDL3StarsInSouth",       CDL3StarsInSouth());
    ADD("CDL3WhiteSoldiers",      CDL3WhiteSoldiers());
    ADD("CDLAbandonedBaby",       CDLAbandonedBaby(0.3));
    ADD("CDLAdvanceBlock",        CDLAdvanceBlock());
    ADD("CDLBeltHold",            CDLBeltHold());
    ADD("CDLBreakaway",           CDLBreakaway());
    ADD("CDLClosingMarubozu",     CDLClosingMarubozu());
    ADD("CDLConcealBabysWall",    CDLConcealBabysWall());
    ADD("CDLCounterAttack",       CDLCounterAttack());
    ADD("CDLDarkCloudCover",      CDLDarkCloudCover(0.3));
    ADD("CDLDoji",                CDLDoji());
    ADD("CDLDojiStar",            CDLDojiStar());
    ADD("CDLDragonflyDoji",       CDLDragonflyDoji());
    ADD("CDLEngulfing",           CDLEngulfing());
    ADD("CDLEveningDojiStar",     CDLEveningDojiStar(0.3));
    ADD("CDLEveningStar",         CDLEveningStar(0.3));
    ADD("CDLGapSideSideWhite",    CDLGapSideSideWhite());
    ADD("CDLGravestoneDoji",      CDLGravestoneDoji());
    ADD("CDLHammer",              CDLHammer());
    ADD("CDLHangingMan",          CDLHangingMan());
    ADD("CDLHarami",              CDLHarami());
    ADD("CDLHaramiCross",         CDLHaramiCross());
    ADD("CDLHignWave",            CDLHignWave());
    ADD("CDLHikkake",             CDLHikkake());
    ADD("CDLHikkakeMod",          CDLHikkakeMod());
    ADD("CDLHomingPigeon",        CDLHomingPigeon());
    ADD("CDLIdentical3Crows",     CDLIdentical3Crows());
    ADD("CDLInNeck",              CDLInNeck());
    ADD("CDLInvertedHammer",      CDLInvertedHammer());
    ADD("CDLKicking",             CDLKicking());
    ADD("CDLKickingByLength",     CDLKickingByLength());
    ADD("CDLLadderBottom",        CDLLadderBottom());
    ADD("CDLLongLeggedDoji",      CDLLongLeggedDoji());
    ADD("CDLLongLine",            CDLLongLine());
    ADD("CDLMarubozu",            CDLMarubozu());
    ADD("CDLMatchingLow",         CDLMatchingLow());
    ADD("CDLMatHold",             CDLMatHold(0.5));
    ADD("CDLMorningDojiStar",     CDLMorningDojiStar(0.3));
    ADD("CDLMorningStar",         CDLMorningStar(0.3));
    ADD("CDLOnNeck",              CDLOnNeck());
    ADD("CDLPiercing",            CDLPiercing());
    ADD("CDLRickshawMan",         CDLRickshawMan());
    ADD("CDLRiseFall3Methods",    CDLRiseFall3Methods());
    ADD("CDLSeperatingLines",     CDLSeperatingLines());
    ADD("CDLShootingStar",        CDLShootingStar());
    ADD("CDLShortLine",           CDLShortLine());
    ADD("CDLSpinningTop",         CDLSpinningTop());
    ADD("CDLStalledPattern",      CDLStalledPattern());
    ADD("CDLStickSandwhich",      CDLStickSandwhich());
    ADD("CDLTakuri",              CDLTakuri());
    ADD("CDLTasukiGap",           CDLTasukiGap());
    ADD("CDLThrusting",           CDLThrusting());
    ADD("CDLTristar",             CDLTristar());
    ADD("CDLUnique3River",        CDLUnique3River());
    ADD("CDLUpsideGap2Crows",     CDLUpsideGap2Crows());
    ADD("CDLXSideGap3Methods",    CDLXSideGap3Methods());

    #undef ADD
    return n; // 61
}

// ================= Markov storage & helpers =================
static int* MC_Count;   // [MC_STATES*MC_STATES]
static int* MC_RowSum;  // [MC_STATES]
static int  MC_Prev = -1;
static int  MC_Cur  = 0;
static var  MC_PBullNext = 0.5;
static var  MC_Entropy   = 0.0;
static string MC_Names[MC_NPAT];

#define MC_IDX(fr,to) ((fr)*MC_STATES + (to))

int MC_stateFromCDL(var* cdl /*len=61*/, var thr)
{
    int i, best=-1; var besta=0;
    for(i=0;i<MC_NPAT;i++){
        var a = abs(cdl[i]);
        if(a>besta){ besta=a; best=i; }
    }
    if(best<0) return MC_NONE;
    if(besta < thr) return MC_NONE;
    int bull = (cdl[best] > 0);
    return 1 + 2*best + bull;  // 1..122
}
int MC_isBull(int s){ if(s<=0) return 0; return ((s-1)%2)==1; }

void MC_update(int sPrev,int sCur){ if(sPrev<0) return; MC_Count[MC_IDX(sPrev,sCur)]++; MC_RowSum[sPrev]++; }

var MC_prob(int s,int t){
    var num = (var)MC_Count[MC_IDX(s,t)] + MC_LAPLACE;
    var den = (var)MC_RowSum[s] + MC_LAPLACE*MC_STATES;
    if(den<=0) return 1.0/MC_STATES;
    return num/den;
}
var MC_nextBullishProb(int s){
    if(s<0) return 0.5;
    int t; var pBull=0, pTot=0;
    for(t=1;t<MC_STATES;t++){ var p=MC_prob(s,t); pTot+=p; if(MC_isBull(t)) pBull+=p; }
    if(pTot<=0) return 0.5;
    return pBull/pTot;
}
var MC_rowEntropy01(int s){
    if(s<0) return 1.0;
    int t; var H=0, Z=0;
    for(t=1;t<MC_STATES;t++){ var p=MC_prob(s,t); Z+=p; }
    if(Z<=0) return 1.0;
    for(t=1;t<MC_STATES;t++){ var p=MC_prob(s,t)/Z; if(p>0) H += -p*log(p); }
    var Hmax = log(MC_STATES-1);
    if(Hmax<=0) return 0;
    return H/Hmax;
}

// ================= HARMONIC D-TREE ENGINE =================

// ---------- utils ----------
var randsign(){ return ifelse(random(1) < 0.5, -1.0, 1.0); }
var mapUnit(var u,var lo,var hi){ if(u<-1) u=-1; if(u>1) u=1; var t=0.5*(u+1.0); return lo + t*(hi-lo); }

// ---- safety helpers ----
var safeNum(var x){ if(x!=x) return 0; if(x > 1e100) return 1e100; if(x < -1e100) return -1e100; return x; }
void sanitize(var* A,int n){ int k; for(k=0;k<n;k++) A[k]=safeNum(A[k]); }
var sat100(var x){ return clamp(x,-100,100); }

// ---- small string helpers (for memory-safe logging) ----
void strlcat_safe(string dst, string src, int cap)
{
    if(!dst || !src || cap <= 0) return;
    int dl = strlen(dst);
    int sl = strlen(src);
    int room = cap - 1 - dl;
    if(room <= 0){ if(cap > 0) dst[cap-1] = 0; return; }
    int i; for(i = 0; i < room && i < sl; i++) dst[dl + i] = src[i];
    dst[dl + i] = 0;
}

int countSubStr(string s, string sub){
    if(!s || !sub) return 0;
    int n=0; string p=s;
    int sublen = strlen(sub);
    if(sublen<=0) return 0;
    while((p=strstr(p,sub))){ n++; p += sublen; }
    return n;
}

// ---------- FIXED: use int (lite-C) and keep non-negative ----------
int djb2_hash(string s){
    int h = 5381, c, i = 0;
    if(!s) return h;
    while((c = s[i++])) h = ((h<<5)+h) ^ c;  // h*33 ^ c
    return h & 0x7fffffff;                   // force non-negative
}

// ---- tree helpers ----
int  validTreeIndex(int tid){ if(!G_TreeIdx) return 0; if(tid<0||tid>=G_TreeN) return 0; return (G_TreeIdx[tid]!=0); }
Node* treeAt(int tid){ if(validTreeIndex(tid)) return G_TreeIdx[tid]; return &G_DummyNode; }
int safeTreeIndexFromEq(int eqi){
    int denom = ifelse(G_TreeN>0, G_TreeN, 1);
    int tid = eqi;
    if(tid < 0) tid = 0;
    if(denom > 0) tid = tid % denom;
    if(tid < 0) tid = 0;
    return tid;
}

// ---- tree indexing ----
void pushTreeNode(Node* u){
    if(G_TreeN >= G_TreeCap){
        int newCap = G_TreeCap*2;
        if(newCap < 64) newCap = 64;
        G_TreeIdx = (Node**)realloc(G_TreeIdx, newCap*sizeof(Node*));
        G_TreeCap = newCap;
    }
    G_TreeIdx[G_TreeN++] = u;
}
void indexTreeDFS(Node* u){ if(!u) return; pushTreeNode(u); int i; for(i=0;i<u->n;i++) indexTreeDFS(((Node**)u->c)[i]); }

// ---- shrink index capacity after pruning (Fix #3) ----
void maybeShrinkTreeIdx(){
    if(!G_TreeIdx) return;
    if(G_TreeCap > 64 && G_TreeN < (G_TreeCap >> 1)){
        int newCap = (G_TreeCap >> 1);
        if(newCap < 64) newCap = 64;
        G_TreeIdx = (Node**)realloc(G_TreeIdx, newCap*sizeof(Node*));
        G_TreeCap = newCap;
    }
}

// ---- tree create/eval ----
Node* createNode(int depth)
{
    Node* u = (Node*)malloc(sizeof(Node));
    u->v = random();
    u->r = 0.01 + 0.02*depth + random(0.005);
    u->d = depth;
    if(depth > 0){
        u->n = 1 + (int)random(MAX_BRANCHES);
        u->c = malloc(u->n * sizeof(void*));
        int i; for(i=0;i<u->n;i++) ((Node**)u->c)[i] = createNode(depth - 1);
    } else { u->n = 0; u->c = 0; }
    return u;
}
var evaluateNode(Node* u)
{
    if(!u) return 0;
    var sum=0; int i; for(i=0;i<u->n;i++) sum += evaluateNode(((Node**)u->c)[i]);
    var phase  = sin(u->r * Bar + sum);
    var weight = 1.0 / pow(u->d + 1, G_DTreeExp);
    u->v = (1 - weight)*u->v + weight*phase;
    return u->v;
}
int countNodes(Node* u){ if(!u) return 0; int c=1,i; for(i=0;i<u->n;i++) c += countNodes(((Node**)u->c)[i]); return c; }
void freeTree(Node* u){ if(!u) return; int i; for(i=0;i<u->n;i++) freeTree(((Node**)u->c)[i]); if(u->c) free(u->c); free(u); }

// =========== NETWORK STATE & COEFFICIENTS ===========
var*  G_State; var*  G_Prev; var*  G_Vel;
int*  G_Adj;
var*  G_RP; var*  G_Z;
int*  G_Mode;
var*  G_WSelf; var*  G_WN1; var*  G_WN2; var*  G_WGlob1; var*  G_WGlob2; var*  G_WMom; var*  G_WTree; var*  G_WAdv;
var*  A1x; var*  A1lam; var*  A1mean; var*  A1E; var*  A1P; var*  A1i; var*  A1c;
var*  A2x; var*  A2lam; var*  A2mean; var*  A2E; var*  A2P; var*  A2i; var*  A2c;
var*  G1mean; var*  G1E; var*  G2P; var*  G2lam;
var*  G_TreeTerm; int*  G_TopEq; var*  G_TopW; int*  G_EqTreeId; var*  TAlpha; var*  TBeta;
var*  G_Pred; var*  G_AdvScore;
var*  G_PropRaw; var*  G_Prop;

// ===== Markov features exposed to DTREE =====
var G_MCF_PBull;   // 0..1
var G_MCF_Entropy; // 0..1
var G_MCF_State;   // 0..122

// epoch/context & feedback
int    G_Epoch = 0;
int    G_CtxID = 0;
var    G_FB_A = 0.7;
var    G_FB_B = 0.3;

// ---------- predictability ----------
var nodePredictability(Node* t)
{
    if(!t) return 0.5;
    var disp=0; int n=t->n, i;
    for(i=0;i<n;i++){ Node* c=((Node**)t->c)[i]; disp += abs(c->v - t->v); }
    if(n>0) disp /= n;
    var depthFac = 1.0/(1+t->d);
    var rateBase = 0.01 + 0.02*t->d;
    var rateFac  = exp(-25.0*abs(t->r - rateBase));
    var p = 0.5*(depthFac + rateFac);
    p = 0.5*p + 0.5*(1.0 + (-disp))/(1.0);
    if(p<0) p=0; if(p>1) p=1;
    return p;
}

// importance for selective pruning
var nodeImportance(Node* u)
{
    if(!u) return 0;
    var amp = abs(u->v); if(amp>1) amp=1;
    var p = nodePredictability(u);
    var depthW = 1.0/(1.0 + u->d);
    var imp = (0.6*p + 0.4*amp) * depthW;
    return imp;
}

// ====== Elastic growth helpers ======

// create a leaf at depth d (no children)
Node* createLeafDepth(int d){
    Node* u = (Node*)malloc(sizeof(Node));
    u->v = random();
    u->r = 0.01 + 0.02*d + random(0.005);
    u->d = d;
    u->n = 0;
    u->c = 0;
    return u;
}

// add up to addK new children to all nodes at frontierDepth
void growSelectiveAtDepth(Node* u, int frontierDepth, int addK)
{
    if(!u) return;
    if(u->d == frontierDepth){
        int want = addK;
        if(want <= 0) return;
        int oldN = u->n;
        int newN = oldN + want;
        Node** Cnew = (Node**)malloc(newN * sizeof(void*));
        int i;
        for(i=0;i<oldN;i++) Cnew[i] = ((Node**)u->c)[i];
        for(i=oldN;i<newN;i++) Cnew[i] = createLeafDepth(frontierDepth-1);
        if(u->c) free(u->c);
        u->c = Cnew; u->n = newN;
        return;
    }
    int j; for(j=0;j<u->n;j++) growSelectiveAtDepth(((Node**)u->c)[j], frontierDepth, addK);
}

// keep top-K children by importance at targetDepth, drop the rest
void freeChildAt(Node* parent, int idx)
{
    if(!parent || !parent->c) return;
    Node** C = (Node**)parent->c;
    freeTree(C[idx]);
    int i;
    for(i=idx+1;i<parent->n;i++) C[i-1] = C[i];
    parent->n--;
    if(parent->n==0){ free(parent->c); parent->c=0; }
}
void pruneSelectiveAtDepth(Node* u, int targetDepth, int keepK)
{
    if(!u) return;

    if(u->d == targetDepth-1 && u->n > 0){
        int n = u->n, i, kept = 0;
        int mark[16]; for(i=0;i<16;i++) mark[i]=0;

        int iter;
        for(iter=0; iter<keepK && iter<n; iter++){
            int bestI = -1; var bestImp = -1;
            for(i=0;i<n;i++){
                if(i<16 && mark[i]==1) continue;
                var imp = nodeImportance(((Node**)u->c)[i]);
                if(imp > bestImp){ bestImp = imp; bestI = i; }
            }
            if(bestI>=0 && bestI<16){ mark[bestI]=1; kept++; }
        }
        for(i=n-1;i>=0;i--) if(i<16 && mark[i]==0) freeChildAt(u,i);
        return;
    }

    int j; for(j=0;j<u->n;j++) pruneSelectiveAtDepth(((Node**)u->c)[j], targetDepth, keepK);
}

void reindexTreeAndMap()
{
    G_TreeN = 0;
    indexTreeDFS(Root);
    if(G_TreeN<=0){ G_TreeN=1; if(G_TreeIdx) G_TreeIdx[0]=Root; }
    int i; for(i=0;i<G_N;i++) G_EqTreeId[i] = i % G_TreeN;
    maybeShrinkTreeIdx(); // Fix #3
}

// ====== Accuracy sentinel & elastic-depth controller ======

void acc_update(var x /*lambda*/, var y /*gamma*/)
{
    var a = 0.01; // ~100-bar half-life
    ACC_mx  = (1-a)*ACC_mx  + a*x;
    ACC_my  = (1-a)*ACC_my  + a*y;
    ACC_mx2 = (1-a)*ACC_mx2 + a*(x*x);
    ACC_my2 = (1-a)*ACC_my2 + a*(y*y);
    ACC_mxy = (1-a)*ACC_mxy + a*(x*y);

    var vx = ACC_mx2 - ACC_mx*ACC_mx;
    var vy = ACC_my2 - ACC_my*ACC_my;
    var cv = ACC_mxy - ACC_mx*ACC_my;
    if(vx>0 && vy>0) G_AccCorr = cv / sqrt(vx*vy); else G_AccCorr = 0;
    if(!G_HaveBase){ G_AccBase = G_AccCorr; G_HaveBase = 1; }
}

// utility to maximize: accuracy minus gentle memory penalty
var util_now()
{
    int mb = mem_mb_est();
    var mem_pen = 0;
    if(mb > MEM_BUDGET_MB) mem_pen = (mb - MEM_BUDGET_MB)/(var)MEM_BUDGET_MB; else mem_pen = 0;
    return G_AccCorr - 0.5*mem_pen;
}

// apply a +1 “grow one level” action if safe memory headroom
int apply_grow_step()
{
    int mb = mem_mb_est();
    if(G_RT_TreeMaxDepth >= MAX_DEPTH) return 0;
    if(mb > MEM_BUDGET_MB - 2*MEM_HEADROOM_MB) return 0;
    int newFrontier = G_RT_TreeMaxDepth;
    growSelectiveAtDepth(Root, newFrontier, KEEP_CHILDREN_HI);
    G_RT_TreeMaxDepth++;
    reindexTreeAndMap();
    printf("\n[EDC] Grew depth to %i (est %i MB)", G_RT_TreeMaxDepth, mem_mb_est());
    return 1;
}

// revert last growth (drop newly-added frontier children)
void revert_last_grow()
{
    pruneSelectiveAtDepth((Node*)Root, G_RT_TreeMaxDepth, 0);
    G_RT_TreeMaxDepth--;
    reindexTreeAndMap();
    printf("\n[EDC] Reverted growth to %i (est %i MB)", G_RT_TreeMaxDepth, mem_mb_est());
}

// main elastic-depth controller; call once per bar (after acc_update)
void edc_runtime()
{
    int mb = mem_mb_est();

    if(G_TunePending){
        if(Bar - G_TuneStartBar >= TUNE_DELAY_BARS){
            G_UtilAfter = util_now();
            var eps = 0.01;
            if(G_UtilAfter + eps < G_UtilBefore){
                revert_last_grow();
            } else {
                printf("\n[EDC] Growth kept (U: %.4f -> %.4f)", G_UtilBefore, G_UtilAfter);
            }
            G_TunePending = 0; G_TuneAction = 0;
        }
        return;
    }

    if( (Bar % DEPTH_TUNE_BARS)==0 && mb <= MEM_BUDGET_MB - 2*MEM_HEADROOM_MB && G_RT_TreeMaxDepth < MAX_DEPTH ){
        G_UtilBefore = util_now();
        if(apply_grow_step()){
            G_TunePending = 1; G_TuneAction = 1; G_TuneStartBar = Bar;
        }
    }
}

// filenames (legacy; still used if LOG_EQ_TO_ONE_FILE==0)
void buildEqFileName(int idx, char* outName /*>=64*/)
{
    strcpy(outName, "Log\\Alpha12_eq_");
    string idxs = strf("%03i", idx);
    strcat(outName, idxs);
    strcat(outName, ".csv");
}

// ===== consolidated EQ log =====
void writeEqHeaderOnce()
{
    static int done=0; if(done) return; done=1;
    file_append("Log\\Alpha12_eq_all.csv",
        "Bar,Epoch,Ctx,EqCount,i,n1,n2,TreeId,Depth,Rate,Pred,Adv,Prop,Mode,WAdv,WTree,PBull,Entropy,MCState,ExprLen,ExprHash,tanhN,sinN,cosN\n");
}

void appendEqMetaLine(
    int bar, int epoch, int ctx, int i, int n1, int n2, int tid, int depth, var rate,
    var pred, var adv, var prop, int mode, var wadv, var wtree,
    var pbull, var ent, int mcstate, string expr)
{
    if(i >= LOG_EQ_SAMPLE) return;

    int eLen  = (int)ifelse(expr != 0, strlen(expr), 0);
    int eHash = (int)ifelse(expr != 0, djb2_hash(expr), djb2_hash(""));
    int cT    = (int)ifelse(expr != 0, countSubStr(expr,"tanh("), 0);
    int cS    = (int)ifelse(expr != 0, countSubStr(expr,"sin("),  0);
    int cC    = (int)ifelse(expr != 0, countSubStr(expr,"cos("),  0);

    file_append("Log\\Alpha12_eq_all.csv",
    strf("%i,%i,%i,%i,%i,%i,%i,%i,%i,%.6f,%.4f,%.4f,%.6f,%i,%.3f,%.3f,%.4f,%.4f,%i,%i,%i,%i,%i,%i\n",
        bar, epoch, ctx, NET_EQNS, i, n1, n2, tid, depth, rate,
        pred, adv, prop, mode, wadv, wtree,
        pbull, ent, mcstate, eLen, eHash, cT, cS, cC));
}

// --------- allocation ----------
void randomizeRP()
{
    int K=G_K,N=G_N,k,j;
    for(k=0;k<K;k++)
        for(j=0;j<N;j++)
            G_RP[k*N+j] = ifelse(random(1) < 0.5, -1.0, 1.0);
}

void computeProjection(){ int K=G_K,N=G_N,k,j; for(k=0;k<K;k++){ var acc=0; for(j=0;j<N;j++) acc+=G_RP[k*N+j]*(G_State[j]*G_State[j]); G_Z[k]=acc; }}

void allocateNet()
{
    int N=G_N, D=G_D, K=G_K;
    G_State=(var*)malloc(N*sizeof(var));  G_Prev=(var*)malloc(N*sizeof(var));  G_Vel=(var*)malloc(N*sizeof(var));
    G_Adj=(int*)malloc(N*D*sizeof(int));
    G_RP=(var*)malloc(K*N*sizeof(var));   G_Z=(var*)malloc(K*sizeof(var));
    G_Mode=(int*)malloc(N*sizeof(int));
    G_WSelf=(var*)malloc(N*sizeof(var));  G_WN1=(var*)malloc(N*sizeof(var));   G_WN2=(var*)malloc(N*sizeof(var));
    G_WGlob1=(var*)malloc(N*sizeof(var)); G_WGlob2=(var*)malloc(N*sizeof(var));
    G_WMom=(var*)malloc(N*sizeof(var));   G_WTree=(var*)malloc(N*sizeof(var)); G_WAdv=(var*)malloc(N*sizeof(var));
    A1x=(var*)malloc(N*sizeof(var)); A1lam=(var*)malloc(N*sizeof(var)); A1mean=(var*)malloc(N*sizeof(var));
    A1E=(var*)malloc(N*sizeof(var)); A1P=(var*)malloc(N*sizeof(var));   A1i=(var*)malloc(N*sizeof(var)); A1c=(var*)malloc(N*sizeof(var));
    A2x=(var*)malloc(N*sizeof(var)); A2lam=(var*)malloc(N*sizeof(var)); A2mean=(var*)malloc(N*sizeof(var));
    A2E=(var*)malloc(N*sizeof(var)); A2P=(var*)malloc(N*sizeof(var));   A2i=(var*)malloc(N*sizeof(var)); A2c=(var*)malloc(N*sizeof(var));
    G1mean=(var*)malloc(N*sizeof(var)); G1E=(var*)malloc(N*sizeof(var));
    G2P=(var*)malloc(N*sizeof(var));    G2lam=(var*)malloc(N*sizeof(var));
    G_TreeTerm=(var*)malloc(N*sizeof(var)); G_TopEq=(int*)malloc(N*sizeof(int)); G_TopW=(var*)malloc(N*sizeof(var));
    TAlpha=(var*)malloc(N*sizeof(var));     TBeta=(var*)malloc(N*sizeof(var));
    G_Pred=(var*)malloc(N*sizeof(var)); G_AdvScore=(var*)malloc(N*sizeof(var));
    G_PropRaw=(var*)malloc(N*sizeof(var));  G_Prop=(var*)malloc(N*sizeof(var));

    if(LOG_EXPR_TEXT){
        G_Sym=(string*)malloc(N*sizeof(char*));
    } else {
        G_Sym=0;
    }

    G_TreeCap=128; // was 512 (Fix #3: start smaller; still grows if needed)
    G_TreeIdx=(Node**)malloc(G_TreeCap*sizeof(Node*)); G_TreeN=0;
    G_EqTreeId=(int*)malloc(N*sizeof(int));

    // Pre-init adjacency to safe value
    int tInit; for(tInit=0; tInit<N*D; tInit++) G_Adj[tInit] = -1;

    int i;
    for(i=0;i<N;i++){
        G_State[i]=random();
        G_Prev[i]=G_State[i]; G_Vel[i]=0;
        G_Mode[i]=0;
        G_WSelf[i]=0.5; G_WN1[i]=0.2; G_WN2[i]=0.2; G_WGlob1[i]=0.1; G_WGlob2[i]=0.1; G_WMom[i]=0.05; G_WTree[i]=0.15; G_WAdv[i]=0.15;
        A1x[i]=1; A1lam[i]=0.1; A1mean[i]=0; A1E[i]=0; A1P[i]=0; A1i[i]=0; A1c[i]=0;
        A2x[i]=1; A2lam[i]=0.1; A2mean[i]=0; A2E[i]=0; A2P[i]=0; A2i[i]=0; A2c[i]=0;
        G1mean[i]=1.0; G1E[i]=0.001; G2P[i]=0.6; G2lam[i]=0.3;
        TAlpha[i]=0.8; TBeta[i]=25.0;
        G_TreeTerm[i]=0; G_TopEq[i]=-1; G_TopW[i]=0;
        G_Pred[i]=0.5;   G_AdvScore[i]=0;
        G_PropRaw[i]=1;  G_Prop[i]=1.0/G_N;

        if(LOG_EXPR_TEXT){
            G_Sym[i] = (char*)malloc(EXPR_MAXLEN);
            if(G_Sym[i]) strcpy(G_Sym[i], "");
        }
    }
}

void freeNet()
{
    int i;
    if(G_State)free(G_State); if(G_Prev)free(G_Prev); if(G_Vel)free(G_Vel);
    if(G_Adj)free(G_Adj); if(G_RP)free(G_RP); if(G_Z)free(G_Z);
    if(G_Mode)free(G_Mode); if(G_WSelf)free(G_WSelf); if(G_WN1)free(G_WN1); if(G_WN2)free(G_WN2);
    if(G_WGlob1)free(G_WGlob1); if(G_WGlob2)free(G_WGlob2); if(G_WMom)free(G_WMom);
    if(G_WTree)free(G_WTree); if(G_WAdv)free(G_WAdv);
    if(A1x)free(A1x); if(A1lam)free(A1lam); if(A1mean)free(A1mean); if(A1E)free(A1E); if(A1P)free(A1P); if(A1i)free(A1i); if(A1c)free(A1c);
    if(A2x)free(A2x); if(A2lam)free(A2lam); if(A2mean)free(A2mean); if(A2E)free(A2E); if(A2P)free(A2P); if(A2i)free(A2i); if(A2c)free(A2c);
    if(G1mean)free(G1mean); if(G1E)free(G1E); if(G2P)free(G2P); if(G2lam)free(G2lam);
    if(G_TreeTerm)free(G_TreeTerm); if(G_TopEq)free(G_TopEq); if(G_TopW)free(G_TopW);
    if(TAlpha)free(TAlpha); if(TBeta)free(TBeta);
    if(G_Pred)free(G_Pred); if(G_AdvScore)free(G_AdvScore);
    if(G_PropRaw)free(G_PropRaw); if(G_Prop)free(G_Prop);
    if(G_Sym){ for(i=0;i<G_N;i++) if(G_Sym[i]) free(G_Sym[i]); free(G_Sym); }
    if(G_TreeIdx)free(G_TreeIdx); if(G_EqTreeId)free(G_EqTreeId);
}

// --------- DTREE feature builders ----------
var nrm_s(var x){ return sat100(100.0*tanh(x)); }
var nrm_scl(var x,var s){ return sat100(100.0*tanh(s*x)); }

void buildEqFeatures(int i, var lambda, var mean, var energy, var power, var* S /*ADV_EQ_NF*/)
{
    int tid = safeTreeIndexFromEq(G_EqTreeId[i]);
    Node* t = treeAt(tid);

    S[0]  = nrm_s(G_State[i]);
    S[1]  = nrm_s(mean);
    S[2]  = nrm_scl(power,0.05);
    S[3]  = nrm_scl(energy,0.01);
    S[4]  = nrm_s(lambda);
    S[5]  = sat100(200.0*(G_Pred[i]-0.5));
    S[6]  = sat100(200.0*((var)t->d/MAX_DEPTH)-100.0);
    S[7]  = sat100(1000.0*t->r);
    S[8]  = nrm_s(G_TreeTerm[i]);
    S[9]  = sat100(200.0*((var)G_Mode[i]/3.0)-100.0);
    S[10] = sat100(200.0*(G_MCF_PBull-0.5));
    S[11] = sat100(200.0*(G_MCF_Entropy-0.5));
    sanitize(S,ADV_EQ_NF);
}

// (Kept for completeness; not used by DTREE anymore)
void buildPairFeatures(int i,int j, var lambda, var mean, var energy, var power, var* P /*ADV_PAIR_NF*/)
{
    int tid_i = safeTreeIndexFromEq(G_EqTreeId[i]);
    int tid_j = safeTreeIndexFromEq(G_EqTreeId[j]);
    Node* ti = treeAt(tid_i);
    Node* tj = treeAt(tid_j);

    P[0]=nrm_s(G_State[i]); P[1]=nrm_s(G_State[j]);
    P[2]=sat100(200.0*((var)ti->d/MAX_DEPTH)-100.0);
    P[3]=sat100(200.0*((var)tj->d/MAX_DEPTH)-100.0);
    P[4]=sat100(1000.0*ti->r); P[5]=sat100(1000.0*tj->r);
    P[6]=sat100(abs(P[2]-P[3]));
    P[7]=sat100(abs(P[4]-P[5]));
    P[8]=sat100(100.0*(G_Pred[i]+G_Pred[j]-1.0));
    P[9]=nrm_s(lambda); P[10]=nrm_s(mean); P[11]=nrm_scl(power,0.05);
    sanitize(P,ADV_PAIR_NF);
}

// --- Safe neighbor helpers & adjacency sanitizer ---
int adjSafe(int i, int d){
    int N = G_N, D = G_D;
    if(!G_Adj || N <= 1 || D <= 0) return 0;
    if(d < 0) d = 0;
    if(d >= D) d = d % D;
    int v = G_Adj[i*D + d];
    if(v < 0 || v >= N || v == i){
        v = (i + 1) % N;
    }
    return v;
}

void sanitizeAdjacency(){
    if(!G_Adj) return;
    int N = G_N, D = G_D;
    int i, d;
    for(i=0;i<N;i++){
        for(d=0; d<D; d++){
            int *p = &G_Adj[i*D + d];
            if(*p < 0 || *p >= N || *p == i){
                int r = (int)random(N);
                if(r == i) r = (r+1) % N;
                *p = r;
            }
        }
        if(D >= 2 && G_Adj[i*D+0] == G_Adj[i*D+1]){
            int r2 = (G_Adj[i*D+1] + 1) % N;
            if(r2 == i) r2 = (r2+1) % N;
            G_Adj[i*D+1] = r2;
        }
    }
}

// --------- advisor helpers (NEW) ----------

// cache one advisor value per equation per bar
var adviseSeed(int i, var lambda, var mean, var energy, var power)
{
    static int seedBar = -1;
    static int haveSeed[NET_EQNS];
    static var seedVal[NET_EQNS];

    if(seedBar != Bar){
        int k; for(k=0;k<NET_EQNS;k++) haveSeed[k] = 0;
        seedBar = Bar;
    }
    if(i < 0) i = 0;
    if(i >= NET_EQNS) i = i % NET_EQNS;

    // Fix #2: obey advisor budget/rotation for seed too
    if(!allowAdvise(i)) return 0;

    if(!haveSeed[i]){
        seedVal[i] = adviseEq(i, lambda, mean, energy, power); // trains (once) in Train mode
        haveSeed[i] = 1;
    }
    return seedVal[i];
}

// simple deterministic mixer for diversity in [-1..1] without extra advise calls
var mix01(var a, int salt){
    var z = sin(123.456*a + 0.001*salt) + cos(98.765*a + 0.002*salt);
    return tanh(0.75*z);
}

// --------- advise wrappers (single-equation only) ----------
// Use estimator to halt when tight; respect rotation budget.
var adviseEq(int i, var lambda, var mean, var energy, var power)
{
    if(!allowAdvise(i)) return 0;

    var S[ADV_EQ_NF];
    buildEqFeatures(i,lambda,mean,energy,power,S);

    if(is(INITRUN)) return 0;

    // Fix #2: stop early based on our estimator, not memory(0)
    int tight = (mem_mb_est() >= MEM_BUDGET_MB - MEM_HEADROOM_MB);
    if(tight) return 0;

    var obj = 0;
    if(Train && !tight)
        obj = sat100(100.0*tanh(0.6*lambda + 0.4*mean));

    int objI = (int)obj;
    var a = adviseLong(DTREE, objI, S, ADV_EQ_NF);
    return a/100.;
}

// --------- advisePair disabled: never call DTREE here ----------
var advisePair(int i,int j, var lambda, var mean, var energy, var power)
{
    return 0;
}

// --------- heuristic pair scoring ----------
var scorePairSafe(int i, int j, var lambda, var mean, var energy, var power)
{
    int ti = safeTreeIndexFromEq(G_EqTreeId[i]);
    int tj = safeTreeIndexFromEq(G_EqTreeId[j]);
    Node *ni = treeAt(ti), *nj = treeAt(tj);
    var simD  = 1.0 / (1.0 + abs((var)ni->d - (var)nj->d));
    var simR  = 1.0 / (1.0 + 50.0*abs(ni->r - nj->r));
    var pred  = 0.5*(G_Pred[i] + G_Pred[j]);
    var score = 0.5*pred + 0.3*simD + 0.2*simR;
    return 2.0*score - 1.0;
}

// --------- adjacency selection (heuristic only) ----------
// safer clash check using prev>=0
void rewireAdjacency_DTREE(var lambda, var mean, var energy, var power)
{
    int N=G_N, D=G_D, i, d, c, best, cand;
    for(i=0;i<N;i++){
        for(d=0; d<D; d++){
            var bestScore = -2; best = -1;
            for(c=0;c<CAND_NEIGH;c++){
                cand = (int)random(N);
                if(cand==i) continue;
                int clash=0, k;
                for(k=0;k<d;k++){
                    int prev = G_Adj[i*D+k];
                    if(prev>=0 && prev==cand){ clash=1; break; }
                }
                if(clash) continue;
                var s = scorePairSafe(i,cand,lambda,mean,energy,power);
                if(s > bestScore){ bestScore=s; best=cand; }
            }
            if(best<0){ do{ best = (int)random(N);} while(best==i); }
            G_Adj[i*D + d] = best;
        }
    }
}

// --------- DTREE-created coefficients, modes & proportions ----------
var mapA(var a,var lo,var hi){ return mapUnit(a,lo,hi); }

void synthesizeEquationFromDTREE(int i, var lambda, var mean, var energy, var power)
{
    var seed = adviseSeed(i,lambda,mean,energy,power);
    G_Mode[i] = (int)(abs(1000*seed)) & 3;

    // derive weights & params deterministically from the single seed
    G_WSelf[i]  = mapA(mix01(seed, 11), 0.15, 0.85);
    G_WN1[i]    = mapA(mix01(seed, 12), 0.05, 0.35);
    G_WN2[i]    = mapA(mix01(seed, 13), 0.05, 0.35);
    G_WGlob1[i] = mapA(mix01(seed, 14), 0.05, 0.30);
    G_WGlob2[i] = mapA(mix01(seed, 15), 0.05, 0.30);
    G_WMom[i]   = mapA(mix01(seed, 16), 0.02, 0.15);
    G_WTree[i]  = mapA(mix01(seed, 17), 0.05, 0.35);
    G_WAdv[i]   = mapA(mix01(seed, 18), 0.05, 0.35);

    A1x[i]   = randsign()*mapA(mix01(seed, 21), 0.6, 1.2);
    A1lam[i] = randsign()*mapA(mix01(seed, 22), 0.05,0.35);
    A1mean[i]=                  mapA(mix01(seed, 23),-0.30,0.30);
    A1E[i]   =                  mapA(mix01(seed, 24),-0.0015,0.0015);
    A1P[i]   =                  mapA(mix01(seed, 25),-0.30,0.30);
    A1i[i]   =                  mapA(mix01(seed, 26),-0.02,0.02);
    A1c[i]   =                  mapA(mix01(seed, 27),-0.20,0.20);

    A2x[i]   = randsign()*mapA(mix01(seed, 31), 0.6, 1.2);
    A2lam[i] = randsign()*mapA(mix01(seed, 32), 0.05,0.35);
    A2mean[i]=                  mapA(mix01(seed, 33),-0.30,0.30);
    A2E[i]   =                  mapA(mix01(seed, 34),-0.0015,0.0015);
    A2P[i]   =                  mapA(mix01(seed, 35),-0.30,0.30);
    A2i[i]   =                  mapA(mix01(seed, 36),-0.02,0.02);
    A2c[i]   =                  mapA(mix01(seed, 37),-0.20,0.20);

    G1mean[i] =                  mapA(mix01(seed, 41), 0.4, 1.6);
    G1E[i]    =                  mapA(mix01(seed, 42),-0.004,0.004);
    G2P[i]    =                  mapA(mix01(seed, 43), 0.1, 1.2);
    G2lam[i]  =                  mapA(mix01(seed, 44), 0.05, 0.7);

    TAlpha[i] =                  mapA(mix01(seed, 51), 0.3, 1.5);
    TBeta[i]  =                  mapA(mix01(seed, 52), 6.0, 50.0);

    G_PropRaw[i] = 0.01 + 0.99*(0.5*(seed+1.0));
}

void normalizeProportions()
{
    int N=G_N,i; var s=0; for(i=0;i<N;i++) s += G_PropRaw[i];
    if(s<=0) { for(i=0;i<N;i++) G_Prop[i] = 1.0/N; return; }
    for(i=0;i<N;i++) G_Prop[i] = G_PropRaw[i]/s;
}

var dtreeTerm(int i, int* outTopEq, var* outTopW)
{
    int N=G_N,j;
    int tid_i = safeTreeIndexFromEq(G_EqTreeId[i]);
    Node* ti=treeAt(tid_i); int di=ti->d; var ri=ti->r;
    var alpha=TAlpha[i], beta=TBeta[i];
    var sumw=0, acc=0, bestW=-1; int bestJ=-1;
    for(j=0;j<N;j++){
        if(j==i) continue;
        int tid_j = safeTreeIndexFromEq(G_EqTreeId[j]);
        Node* tj=treeAt(tid_j); int dj=tj->d; var rj=tj->r;
        var w = exp(-alpha*abs(di-dj)) * exp(-beta*abs(ri-rj));
        var predBoost = 0.5 + 0.5*(G_Pred[i]*G_Pred[j]);
        var propBoost = 0.5 + 0.5*( (G_Prop[i] + G_Prop[j]) );
        w *= predBoost * propBoost;
        var pairAdv = scorePairSafe(i,j,0,0,0,0);
        var pairBoost = 0.75 + 0.25*(0.5*(pairAdv+1.0));
        w *= pairBoost;
        sumw += w; acc += w*G_State[j];
        if(w>bestW){bestW=w; bestJ=j;}
    }
    if(outTopEq) *outTopEq = bestJ;
    if(outTopW)  *outTopW  = ifelse(sumw>0, bestW/sumw, 0);
    if(sumw>0) return acc/sumw; return 0;
}

// --------- expression builder (capped & optional) ----------
void buildSymbolicExpr(int i, int n1, int n2)
{
    if(LOG_EXPR_TEXT){
        string s = G_Sym[i]; s[0]=0;
        string a1 = strf("(%.3f*x[%i] + %.3f*lam + %.3f*mean + %.5f*E + %.3f*P + %.3f*i + %.3f)",
                         A1x[i], n1, A1lam[i], A1mean[i], A1E[i], A1P[i], A1i[i], A1c[i]);
        string a2 = strf("(%.3f*x[%i] + %.3f*lam + %.3f*mean + %.5f*E + %.3f*P + %.3f*i + %.3f)",
                         A2x[i], n2, A2lam[i], A2mean[i], A2E[i], A2P[i], A2i[i], A2c[i]);

        strlcat_safe(s, "x[i]_next = ", EXPR_MAXLEN);
        strlcat_safe(s, strf("%.3f*x[i] + ", G_WSelf[i]), EXPR_MAXLEN);

        if(G_Mode[i]==1){
            strlcat_safe(s, strf("%.3f*tanh%s + ", G_WN1[i], a1), EXPR_MAXLEN);
            strlcat_safe(s, strf("%.3f*sin%s + ",  G_WN2[i], a2), EXPR_MAXLEN);
        } else if(G_Mode[i]==2){
            strlcat_safe(s, strf("%.3f*cos%s + ",  G_WN1[i], a1), EXPR_MAXLEN);
            strlcat_safe(s, strf("%.3f*tanh%s + ", G_WN2[i], a2), EXPR_MAXLEN);
        } else {
            strlcat_safe(s, strf("%.3f*sin%s + ",  G_WN1[i], a1), EXPR_MAXLEN);
            strlcat_safe(s, strf("%.3f*cos%s + ",  G_WN2[i], a2), EXPR_MAXLEN);
        }

        strlcat_safe(s, strf("%.3f*tanh(%.3f*mean + %.5f*E) + ", G_WGlob1[i], G1mean[i], G1E[i]), EXPR_MAXLEN);
        strlcat_safe(s, strf("%.3f*sin(%.3f*P + %.3f*lam) + ",   G_WGlob2[i], G2P[i],   G2lam[i]), EXPR_MAXLEN);
        strlcat_safe(s, strf("%.3f*(x[i]-x_prev[i]) + ",         G_WMom[i]), EXPR_MAXLEN);
        strlcat_safe(s, strf("Prop[i]=%.4f; ",                   G_Prop[i]), EXPR_MAXLEN);
        strlcat_safe(s, strf("%.3f*DT(i) + ",                    G_WTree[i]), EXPR_MAXLEN);
        strlcat_safe(s, strf("%.3f*DTREE(i)",                    G_WAdv[i]), EXPR_MAXLEN);
    }
}

// --------- one-time rewire init ----------
void rewireInit()
{
    randomizeRP(); computeProjection();
    G_TreeN=0; indexTreeDFS(Root);
    if(G_TreeN<=0){ G_TreeN=1; if(G_TreeIdx) G_TreeIdx[0]=Root; }
    int i; for(i=0;i<G_N;i++) G_EqTreeId[i] = i % G_TreeN;
}

// probes & unsigned context hash
void rewireEpoch(var lambda, var mean, var energy, var power)
{
    int i;

    if(ENABLE_WATCH) watch("?A");   // before predictability
    for(i=0;i<G_N;i++){
        int  tid = safeTreeIndexFromEq(G_EqTreeId[i]);
        Node* t  = treeAt(tid);
        G_Pred[i] = nodePredictability(t);
    }

    if(ENABLE_WATCH) watch("?B");   // after predictability, before adjacency
    rewireAdjacency_DTREE(lambda,mean,energy,power);

    if(ENABLE_WATCH) watch("?C");   // after adjacency, before synthesize
    sanitizeAdjacency();

    for(i=0;i<G_N;i++)
        synthesizeEquationFromDTREE(i,lambda,mean,energy,power);

    if(ENABLE_WATCH) watch("?D");   // before normalize / ctx hash
    normalizeProportions();

    // Unsigned context hash of current adjacency (+ epoch) for logging
    {
        int D = G_D;
        unsigned int h = 2166136261u;
        int total = G_N * D;
        for(i=0;i<total;i++){
            unsigned int x = (unsigned int)G_Adj[i];
            h ^= x + 0x9e3779b9u + (h<<6) + (h>>2);
        }
        G_CtxID = (int)((h ^ ((unsigned int)G_Epoch<<8)) & 0x7fffffff);
    }

    // Optional expression text (only when LOG_EXPR_TEXT==1)
    for(i=0;i<G_N;i++){
        int n1 = adjSafe(i,0);
        int n2 = (int)ifelse(G_D >= 2, adjSafe(i,1), n1);
        if(LOG_EXPR_TEXT) buildSymbolicExpr(i,n1,n2);
    }
}

var projectNet()
{
    int N=G_N,i; var sum=0,sumsq=0,cross=0;
    for(i=0;i<N;i++){ sum+=G_State[i]; sumsq+=G_State[i]*G_State[i]; if(i+1<N) cross+=G_State[i]*G_State[i+1]; }
    var mean=sum/N, corr=cross/(N-1);
    return 0.6*tanh(mean + 0.001*sumsq) + 0.4*sin(corr);
}

void updateNet(var driver, var* outMean, var* outEnergy, var* outPower, int writeMeta)
{
    int N = G_N, D = G_D, i;

    var sum = 0, sumsq = 0;
    for(i = 0; i < N; i++){
        sum   += G_State[i];
        sumsq += G_State[i]*G_State[i];
    }
    var mean   = sum / N;
    var energy = sumsq;
    var power  = sumsq / N;

    for(i = 0; i < N; i++){
        int  tid = safeTreeIndexFromEq(G_EqTreeId[i]);
        Node* t  = treeAt(tid);
        G_Pred[i] = nodePredictability(t);
    }

    for(i = 0; i < N; i++){
        int n1 = adjSafe(i,0);
        int n2 = (int)ifelse(D >= 2, adjSafe(i,1), n1);

        var xi   = G_State[i];
        var xn1  = G_State[n1];
        var xn2  = G_State[n2];
        var mom  = xi - G_Prev[i];

        int topEq = -1;
        var topW  = 0;
        var dt    = dtreeTerm(i, &topEq, &topW);
        G_TreeTerm[i] = dt;
        G_TopEq[i]    = topEq;
        G_TopW[i]     = topW;

        // Fix #2: call advisor only when allowed
        var adv = 0;
        if(allowAdvise(i))
             adv = adviseEq(i, driver, mean, energy, power);
 
        G_AdvScore[i] = adv;

        var arg1 = A1x[i]*xn1 + A1lam[i]*driver + A1mean[i]*mean + A1E[i]*energy + A1P[i]*power + A1i[i]*i + A1c[i];
        var arg2 = A2x[i]*xn2 + A2lam[i]*driver + A2mean[i]*mean + A2E[i]*energy + A2P[i]*power + A2i[i]*i + A2c[i];

        var nl1, nl2;
        if(G_Mode[i] == 0){ nl1 = sin(arg1);  nl2 = cos(arg2); }
        else if(G_Mode[i] == 1){ nl1 = tanh(arg1); nl2 = sin(arg2); }
        else if(G_Mode[i] == 2){ nl1 = cos(arg1);  nl2 = tanh(arg2); }
        else { nl1 = sin(arg1); nl2 = cos(arg2); }

        var glob1 = tanh(G1mean[i]*mean + G1E[i]*energy);
        var glob2 = sin (G2P[i]*power + G2lam[i]*driver);

        var xNew =
            G_WSelf[i]*xi +
            G_WN1[i]*nl1 +
            G_WN2[i]*nl2 +
            G_WGlob1[i]*glob1 +
            G_WGlob2[i]*glob2 +
            G_WMom[i]*mom +
            G_WTree[i]*dt +
            G_WAdv[i]*adv;

        G_Prev[i]  = xi;
        G_Vel[i]   = xNew - xi;
        G_State[i] = clamp(xNew, -10, 10);

        if(writeMeta && (G_Epoch % META_EVERY == 0) && !G_LogsOff){
            int  tid2 = safeTreeIndexFromEq(G_EqTreeId[i]);
            Node* t2  = treeAt(tid2);
            int  nn1  = adjSafe(i,0);
            int  nn2  = (int)ifelse(G_D >= 2, adjSafe(i,1), nn1);

            if(LOG_EQ_TO_ONE_FILE){
                string expr = "";
                if(LOG_EXPR_TEXT) expr = G_Sym[i];
                appendEqMetaLine(
                    Bar, G_Epoch, G_CtxID, i, nn1, nn2, tid2, t2->d, t2->r,
                    G_Pred[i], G_AdvScore[i], G_Prop[i], G_Mode[i], G_WAdv[i], G_WTree[i],
                    MC_PBullNext, MC_Entropy, MC_Cur, expr
                );
            } else {
                char fname[64];
                buildEqFileName(i, fname);
                string expr2 = "";
                if(LOG_EXPR_TEXT) expr2 = G_Sym[i];
                file_append(fname,
                    strf("META,%i,%i,%i,%i,%i,%i,%i,%i,%.6f,Pred=%.4f,Adv=%.4f,Prop=%.6f,Mode=%i,WAdv=%.3f,WTree=%.3f,PBull=%.4f,Ent=%.4f,State=%i,\"%s\"\n",
                        G_Epoch, G_CtxID, NET_EQNS, i, nn1, nn2, tid2, t2->d, t2->r,
                        G_Pred[i], G_AdvScore[i], G_Prop[i], G_Mode[i], G_WAdv[i], G_WTree[i],
                        MC_PBullNext, MC_Entropy, MC_Cur, expr2));
            }
        }
    }

    if(outMean)   *outMean   = mean;
    if(outEnergy) *outEnergy = energy;
    if(outPower)  *outPower  = power;
}

// ----------------- MAIN -----------------
function run()
{
    static int initialized = 0;
    static var lambda;
    static int fileInit = 0;

    BarPeriod = BAR_PERIOD;
    if(LookBack < NWIN) LookBack = NWIN;
    if(Train) Hedge = 2;

    // Plots are opt-in via ENABLE_PLOTS
    set(RULES|LEAN);
    if(ENABLE_PLOTS) set(PLOTNOW);
    asset(ASSET_SYMBOL);

    if(is(INITRUN) && !initialized){

        // init dummy node
        G_DummyNode.v = 0;
        G_DummyNode.r = 0;
        G_DummyNode.c = 0;
        G_DummyNode.n = 0;
        G_DummyNode.d = 0;

        // allocate Markov matrices (zeroed)
        MC_Count  = (int*)malloc(MC_STATES*MC_STATES*sizeof(int));
        MC_RowSum = (int*)malloc(MC_STATES*sizeof(int));
        int k;
        for(k=0;k<MC_STATES*MC_STATES;k++) MC_Count[k]=0;
        for(k=0;k<MC_STATES;k++) MC_RowSum[k]=0;

        // capture pattern names (optional)
        var tmp[MC_NPAT];
        buildCDL_TA61(tmp, MC_Names);

        // build tree + network
        Root = createNode(MAX_DEPTH);
        allocateNet();

        // engine params
        G_DTreeExp = 1.10 + random(0.50);   // [1.10..1.60)
        G_FB_A     = 0.60 + random(0.25);   // [0.60..0.85)
        G_FB_B     = 1.0 - G_FB_A;

        randomizeRP();
        computeProjection();
        rewireInit();

        G_Epoch = 0;
        rewireEpoch(0,0,0,0);

        // Header setup (consolidated vs legacy)
        if(LOG_EQ_TO_ONE_FILE){
            writeEqHeaderOnce();
        } else {
            char fname[64];
            int i2;
            for(i2=0;i2<NET_EQNS;i2++){
                buildEqFileName(i2,fname);
                file_append(fname,
                    "Bar,lambda,gamma,i,State,n1,n2,mean,energy,power,Vel,Mode,WAdv,WSelf,WN1,WN2,WGlob1,WGlob2,WMom,WTree,Pred,Adv,Prop,TreeTerm,TopEq,TopW,TreeId,Depth,Rate,PBull,Entropy,MCState\n");
            }
        }

        // Markov CSV header
        if(!fileInit){
            file_append("Log\\Alpha12_markov.csv","Bar,State,PBullNext,Entropy,RowSum\n");
            fileInit=1;
        }

        // initial META dump (consolidated or legacy)
        int i;
        for(i=0;i<G_N;i++){
            int n1 = adjSafe(i,0);
            int n2 = (int)ifelse(G_D >= 2, adjSafe(i,1), n1);
            int tid = safeTreeIndexFromEq(G_EqTreeId[i]);
            Node* t = treeAt(tid);

            if(LOG_EQ_TO_ONE_FILE){
                string expr = "";
                if(LOG_EXPR_TEXT) expr = G_Sym[i];
                appendEqMetaLine(
                    Bar, G_Epoch, G_CtxID, i, n1, n2, tid, t->d, t->r,
                    G_Pred[i], G_AdvScore[i], G_Prop[i], G_Mode[i], G_WAdv[i], G_WTree[i],
                    MC_PBullNext, MC_Entropy, MC_Cur, expr
                );
            } else {
                char fname2[64];
                buildEqFileName(i,fname2);
                string expr2 = "";
                if(LOG_EXPR_TEXT) expr2 = G_Sym[i];
                file_append(fname2,
                    strf("META,%i,%i,%i,%i,%i,%i,%i,%i,%.6f,Pred=%.4f,Adv=%.4f,Prop=%.6f,Mode=%i,WAdv=%.3f,WTree=%.3f,PBull=%.4f,Ent=%.4f,State=%i,\"%s\"\n",
                        G_Epoch, G_CtxID, NET_EQNS, i, n1, n2, tid, t->d, t->r,
                        G_Pred[i], G_AdvScore[i], G_Prop[i], G_Mode[i], G_WAdv[i], G_WTree[i],
                        MC_PBullNext, MC_Entropy, MC_Cur, expr2));
            }
        }

        initialized=1;
        printf("\nRoot nodes: %i | Net equations: %i (degree=%i, kproj=%i)",
               countNodes(Root), G_N, G_D, G_K);
    }

    // Fix #4: earlier zero-cost shedding when approaching cap
    if(mem_mb_est() >= MEM_BUDGET_MB - 2*MEM_HEADROOM_MB && G_ShedStage == 0)
        shed_zero_cost_once();

    // ==== Runtime memory / depth manager (acts only when near the cap)
    depth_manager_runtime();

    // ====== Per bar: Candles ? Markov
    static var CDL[MC_NPAT];
    buildCDL_TA61(CDL,0);
    MC_Cur = MC_stateFromCDL(CDL, MC_ACT);
    if(Bar > LookBack) MC_update(MC_Prev, MC_Cur);
    MC_Prev = MC_Cur;

    MC_PBullNext = MC_nextBullishProb(MC_Cur);
    MC_Entropy   = MC_rowEntropy01(MC_Cur);

    // expose Markov features
    G_MCF_PBull   = MC_PBullNext;
    G_MCF_Entropy = MC_Entropy;
    G_MCF_State   = (var)MC_Cur;

    // ====== Tree driver lambda
    lambda = evaluateNode(Root);

    // Rewire epoch?
    {
        int doRewire = ((Bar % REWIRE_EVERY) == 0);
        if(doRewire){
            G_Epoch++;

            int ii;
            var sum=0;
            for(ii=0;ii<G_N;ii++) sum += G_State[ii];
            var mean = sum/G_N;

            var energy=0;
            for(ii=0;ii<G_N;ii++) energy += G_State[ii]*G_State[ii];
            var power = energy/G_N;

            rewireEpoch(lambda,mean,energy,power);
        }

        // Update net this bar (write META only if rewired and not shedding logs)
        var meanB, energyB, powerB;
        updateNet(lambda, &meanB, &energyB, &powerB, doRewire);

        // Feedback blend
        var gamma = projectNet();
        lambda = G_FB_A*lambda + G_FB_B*gamma;

        // --- Accuracy sentinel update & elastic depth controller ---
        acc_update(lambda, gamma);
        edc_runtime();

        // Plot/log gating
        int doPlot = (ENABLE_PLOTS && !G_ChartsOff);
        int doLog = ifelse(G_LogsOff, ((Bar % (LOG_EVERY*4)) == 0), ((Bar % LOG_EVERY) == 0));

        // Plots
        if(doPlot){
            plot("lambda", lambda, LINE, 0);
            plot("gamma",  gamma,  LINE, 0);
            plot("P_win",  powerB, LINE, 0);
            plot("PBullNext", MC_PBullNext, LINE, 0);
            plot("MC_Entropy", MC_Entropy, LINE, 0);
            plot("MemMB", memory(0)/(1024.*1024.), LINE, 0);
            plot("Allocs", (var)memory(2), LINE, 0);
        }

        // Markov CSV log (decimated; further decimated when shedding)
        if(doLog){
            file_append("Log\\Alpha12_markov.csv",
                strf("%i,%i,%.6f,%.6f,%i\n", Bar, MC_Cur, MC_PBullNext, MC_Entropy, MC_RowSum[MC_Cur]));
        }

        // ====== Entries (Markov-gated) ======
        if( MC_PBullNext > PBULL_LONG_TH && lambda > 0.7 )  enterLong();
        if( MC_PBullNext < PBULL_SHORT_TH && lambda < -0.7 ) enterShort();
    }
}

// Clean up memory
function cleanup()
{
    if(Root) freeTree(Root);
    if(MC_Count)  free(MC_Count);
    if(MC_RowSum) free(MC_RowSum);
    freeNet();
}

Last edited by TipmyPip; 09/06/25 01:08.
Regime-Responsive Graph Rewiring of Influences [Re: TipmyPip] #488877
09/06/25 19:35
09/06/25 19:35
Joined: Sep 2017
Posts: 164
TipmyPip Online OP
Member
TipmyPip  Online OP
Member

Joined: Sep 2017
Posts: 164
Regime-Responsive Graph Rewiring of Influences

A. A small market lexicon
The pattern alphabet now self-calibrates. It keeps a steady share of “active” moments even as volatility changes, and it lets early uncertainty be smoothed more generously while sharpening as evidence accumulates. The two dials—lean and clarity—stay comparable across regimes, so permission means the same thing in quiet and in storm.

B. A soft landscape of influences
The continuous field underneath reallocates attention as conditions evolve. Its effective dimensionality breathes: higher when structure is clean, lower when noise rises. Multiple drivers are blended with a bias toward whichever one is currently more predictive. Effort is budgeted—more attention when order emerges, less when the tape is muddled or resources are tight. Signals remain bounded; only the emphasis moves.

C. Occasional reshaping of who listens to whom
Connectivity refresh widens its search when the environment is organized and narrows it when it’s chaotic. The refresh can also trigger early after a utility dip, helping the structure realign quickly after shocks without constant churn.

D. Capacity that breathes, with guardrails
Form adjusts to usefulness. When added detail stops paying or resources tighten, it trims deepest, least helpful nuance; when there’s headroom and benefit, it adds a thin layer. Changes are small, tested, and reversible. Depth emphasis also adapts, shifting weight between shallow and deep context as regimes change.

E. Permission meets timing and size
Action still requires both a clear lean and sufficient clarity, plus harmony from the broader landscape. Because the dictionary stays rate-stable and sharpens with evidence, and because drivers are blended by current informativeness, timing improves around transitions. Position size tracks agreement strength; ambiguity defaults to patience.

F. A brief, human-readable diary
The ledger stays compact and comparable across regimes: current archetype, the two dials, and a terse sketch of how influences combined. It aims for oversight-grade clarity without sacrificing speed.

G. What tends to emerge
In ordered tape: broader search, richer projection, more active guidance, and a tilt toward deeper context. In choppy tape: narrower search, leaner projection, tighter guidance, and a tilt toward shallower, more robust cues. The posture glides between modes via small, distributed adjustments.

H. Risk doctrine as a controlling atmosphere
Exposure respects caps at all times. When noise rises or resources get tight, the system automatically de-emphasizes fine detail, focuses on the strongest agreements, and lets activity drift toward neutral rather than forcing trades—keeping drawdown sensitivity in check.

I. Memory, recency, and drift
Assessments use decaying memory so recent tape matters more while stale evidence fades. Permission and landscape both learn continuously, producing controlled drift—no whiplash, no stickiness.

J. Separation of roles

The lexicon offers a compact, discrete view of context and stable permission.

The landscape provides a continuous, multi-horizon view that shapes timing and size.

The reshaper keeps connections healthy, widening or narrowing search as needed.

The capacity governor ensures useful complexity under constraints.
Together they reduce overreaction to noise while preserving responsiveness to structural change.

K. Practical trading implications

Expect fewer, stronger actions when clarity is low; more decisive engagement when agreement is broad.

Expect size to follow consensus strength, not single-indicator extremes.

Expect quicker realignment after shocks, but without perpetual reshuffling.

Expect behavior to remain stable across regime shifts, with measured changes rather than leaps.

L. Philosophy in one line
Trade when the story is both clear and corroborated; keep the model light, the adjustments small, and the diary open.



Code
// ======================================================================
// Alpha12 - Markov-augmented Harmonic D-Tree Engine (Candlestick 122-dir)
// with runtime memory shaping, selective depth pruning, 
// and elastic accuracy-aware depth growth + 8 adaptive improvements.
// ======================================================================

// ================= USER CONFIG =================
#define ASSET_SYMBOL   "EUR/USD"
#define BAR_PERIOD     60
#define MC_ACT         0.30       // initial threshold on |CDL| in [-1..1] to accept a pattern
#define PBULL_LONG_TH  0.60       // Markov gate for long
#define PBULL_SHORT_TH 0.40       // Markov gate for short

// ===== Debug toggles (Fix #1 - chart/watch growth off by default) =====
#define ENABLE_PLOTS   0    // 0 = no plot buffers; 1 = enable plot() calls
#define ENABLE_WATCH   0    // 0 = disable watch() probes; 1 = enable

// ================= ENGINE PARAMETERS =================
#define MAX_BRANCHES    3
#define MAX_DEPTH       4
#define NWIN            256
#define NET_EQNS        100
#define DEGREE          4
#define KPROJ           16
#define REWIRE_EVERY    127
#define CAND_NEIGH      8

// ===== LOGGING CONTROLS (memory management) =====
#define LOG_EQ_TO_ONE_FILE   1    // 1: single consolidated EQ CSV; 0: per-eq files
#define LOG_EXPR_TEXT        0    // 0: omit full expression (store signature only); 1: include text
#define META_EVERY           4    // write META every N rewires
#define LOG_EQ_SAMPLE        NET_EQNS // limit number of equations logged
#define EXPR_MAXLEN          512  // cap expression string

// decimate Markov log cadence
#define LOG_EVERY            16

// ---- DTREE feature sizes (extended for Markov features) ----
#define ADV_EQ_NF       12   // per-equation features
#define ADV_PAIR_NF     12   // per-pair features (kept for completeness; DTREE pair disabled)

// ================= Candles ? 122-state Markov =================
#define MC_NPAT    61
#define MC_STATES  123   // 1 + 2*MC_NPAT
#define MC_NONE    0
#define MC_LAPLACE 1.0   // kept for reference; runtime uses G_MC_Alpha

// ================= Runtime Memory / Accuracy Manager =================
#define MEM_BUDGET_MB        50
#define MEM_HEADROOM_MB       5
#define DEPTH_STEP_BARS      16
#define KEEP_CHILDREN_HI      2
#define KEEP_CHILDREN_LO      1
#define RUNTIME_MIN_DEPTH     2

int  G_ShedStage        = 0;        // 0..2
int  G_LastDepthActBar  = -999999;
int  G_ChartsOff        = 0;        // gates plot()
int  G_LogsOff          = 0;        // gates file_append cadence
int  G_SymFreed         = 0;        // expression buffers freed
int  G_RT_TreeMaxDepth  = MAX_DEPTH;

// ---- Accuracy sentinel (EW correlation of lambda vs gamma) ----
var  ACC_mx=0, ACC_my=0, ACC_mx2=0, ACC_my2=0, ACC_mxy=0;
var  G_AccCorr = 0;      // [-1..1]
var  G_AccBase = 0;      // first seen sentinel
int  G_HaveBase = 0;

// ---- Elastic depth tuner (small growth trials with rollback) ----
#define DEPTH_TUNE_BARS   64   // start a growth “trial” this often (when memory allows)
#define TUNE_DELAY_BARS   64   // evaluate the trial after this many bars

var  G_UtilBefore = 0, G_UtilAfter = 0;
int  G_TunePending = 0;
int  G_TuneStartBar = 0;
int  G_TuneAction   = 0;  // +1 grow trial, 0 none

// ======================================================================
//  (FIX) Move the type and globals used by mem_bytes_est() up here
// ======================================================================

// HARMONIC D-TREE type (we define it early so globals below compile fine)
typedef struct Node { var v; var r; void* c; int n; int d; } Node;

// Minimal globals needed before mem_bytes_est()
Node*  Root = 0;
Node** G_TreeIdx = 0; 
int    G_TreeN = 0; 
int    G_TreeCap = 0; 
var    G_DTreeExp = 0;

Node   G_DummyNode;   // defined early so treeAt() can return &G_DummyNode

// Network sizing globals (used by mem_bytes_est)
int   G_N  = NET_EQNS;
int   G_D  = DEGREE;
int   G_K  = KPROJ;

// Optional expression buffer pointer (referenced by mem_bytes_est)
string* G_Sym = 0;

// Forward decls that reference Node
var  nodeImportance(Node* u); // fwd decl (uses nodePredictability below)
void pruneSelectiveAtDepth(Node* u, int targetDepth, int keepK);
void reindexTreeAndMap();

// Forward decls for advisor functions (so adviseSeed can call them)
var adviseEq(int i, var lambda, var mean, var energy, var power);
var advisePair(int i,int j, var lambda, var mean, var energy, var power);

// ----------------------------------------------------------------------
// === Adaptive knobs & sentinels (NEW) ===
// ----------------------------------------------------------------------
var G_FB_W = 0.70;     // (1) dynamic lambda?gamma blend weight 0..1
var G_MC_ACT = MC_ACT; // (2) adaptive candlestick acceptance threshold
var G_AccRate = 0;     // (2) EW acceptance rate of (state != 0)

// (3) advisor budget per bar (replaces the macro)
int G_AdviseMax = 16;

// (6) Markov Laplace smoothing (runtime ?)
var G_MC_Alpha = 1.0;

// (7) adaptive candidate breadth for adjacency search
int G_CandNeigh = CAND_NEIGH;

// (8) effective projection dimension (? KPROJ)
int G_Keff = KPROJ;

// (5) depth emphasis hill-climber
var G_DTreeExpStep = 0.05;
int  G_DTreeExpDir  = 1;

// ---- Advise budget/rotation (Fix #2) ----
#define ADVISE_ROTATE    1   // 1 = rotate which equations get DTREE each bar

int allowAdvise(int i)
{
    if(ADVISE_ROTATE){
        int groups = NET_EQNS / G_AdviseMax; 
        if(groups < 1) groups = 1;
        return ((i / G_AdviseMax) % groups) == (Bar % groups);
    } else {
        return (i < G_AdviseMax);
    }
}

// ---- tree byte size (counts nodes + child pointer arrays) ----
int tree_bytes(Node* u)
{
    if(!u) return 0;
    int SZV = sizeof(var), SZI = sizeof(int), SZP = sizeof(void*);
    int sz_node = 2*SZV + SZP + 2*SZI;
    int total = sz_node;
    if(u->n > 0 && u->c) total += u->n * SZP;
    int i;
    for(i=0;i<u->n;i++)
        total += tree_bytes(((Node**)u->c)[i]);
    return total;
}

// ======================================================================
// Conservative in-script memory estimator (arrays + pointers)
// ======================================================================
int mem_bytes_est()
{
    int N = G_N, D = G_D, K = G_K;
    int SZV = sizeof(var), SZI = sizeof(int), SZP = sizeof(void*);
    int b = 0;

    b += N*SZV*(3 + 8 + 7 + 7 + 4 + 2 + 2 + 2 + 2);
    b += N*SZI*(3);                    // G_Mode, G_TopEq, G_EqTreeId
    b += N*D*SZI;                      // G_Adj
    b += K*N*SZV;                      // G_RP
    b += K*SZV;                        // G_Z
    b += G_TreeCap*SZP;                // G_TreeIdx pointer vector
    if(G_Sym && !G_SymFreed) b += N*EXPR_MAXLEN; // optional expression buffers
    b += MC_STATES*MC_STATES*SZI + MC_STATES*SZI; // Markov
    b += tree_bytes(Root);                            // include D-Tree
    return b;
}

int mem_mb_est(){ return mem_bytes_est() / (1024*1024); }

// === total memory (Zorro-wide) in MB ===
int memMB(){ return (int)(memory(0)/(1024*1024)); }

// light one-shot shedding
void shed_zero_cost_once()
{
    if(G_ShedStage > 0) return;
    set(PLOTNOW|OFF); G_ChartsOff = 1;  // stop chart buffers
    G_LogsOff = 1;                      // decimate logs (gated later)
    G_ShedStage = 1;
}

void freeExprBuffers()
{
    if(!G_Sym || G_SymFreed) return;
    int i; for(i=0;i<G_N;i++) if(G_Sym[i]) free(G_Sym[i]);
    free(G_Sym); G_Sym = 0; G_SymFreed = 1;
}

// depth manager (prune & shedding)
void depth_manager_runtime()
{
    int trigger = MEM_BUDGET_MB - MEM_HEADROOM_MB;
    int mb = mem_mb_est();
    if(mb < trigger) return;

    if(G_ShedStage == 0) shed_zero_cost_once();

    if(G_ShedStage <= 1){
        if(LOG_EXPR_TEXT==0 && !G_SymFreed) freeExprBuffers();
        G_ShedStage = 2;
    }

    int overBudget = (mb >= MEM_BUDGET_MB);
    if(!overBudget && (Bar - G_LastDepthActBar < DEPTH_STEP_BARS))
        return;

    while(G_RT_TreeMaxDepth > RUNTIME_MIN_DEPTH)
    {
        int keepK = ifelse(mem_mb_est() < MEM_BUDGET_MB + 2, KEEP_CHILDREN_HI, KEEP_CHILDREN_LO);
        pruneSelectiveAtDepth((Node*)Root, G_RT_TreeMaxDepth, keepK);
        G_RT_TreeMaxDepth--;
        reindexTreeAndMap();

        mb = mem_mb_est();
        printf("\n[DepthMgr] depth=%i keepK=%i est=%i MB", G_RT_TreeMaxDepth, keepK, mb);

        if(mb < trigger) break;
    }

    G_LastDepthActBar = Bar;
}

// ----------------------------------------------------------------------
// 61 candlestick patterns (Zorro spellings kept). Each returns [-100..100].
// We rescale to [-1..1] for Markov state construction.
// ----------------------------------------------------------------------
int buildCDL_TA61(var* out, string* names)
{
    int n = 0;
    #define ADD(Name, Call) do{ var v = (Call); out[n] = v/100.; if(names) names[n] = Name; n++; }while(0)

    ADD("CDL2Crows",              CDL2Crows());
    ADD("CDL3BlackCrows",         CDL3BlackCrows());
    ADD("CDL3Inside",             CDL3Inside());
    ADD("CDL3LineStrike",         CDL3LineStrike());
    ADD("CDL3Outside",            CDL3Outside());
    ADD("CDL3StarsInSouth",       CDL3StarsInSouth());
    ADD("CDL3WhiteSoldiers",      CDL3WhiteSoldiers());
    ADD("CDLAbandonedBaby",       CDLAbandonedBaby(0.3));
    ADD("CDLAdvanceBlock",        CDLAdvanceBlock());
    ADD("CDLBeltHold",            CDLBeltHold());
    ADD("CDLBreakaway",           CDLBreakaway());
    ADD("CDLClosingMarubozu",     CDLClosingMarubozu());
    ADD("CDLConcealBabysWall",    CDLConcealBabysWall());
    ADD("CDLCounterAttack",       CDLCounterAttack());
    ADD("CDLDarkCloudCover",      CDLDarkCloudCover(0.3));
    ADD("CDLDoji",                CDLDoji());
    ADD("CDLDojiStar",            CDLDojiStar());
    ADD("CDLDragonflyDoji",       CDLDragonflyDoji());
    ADD("CDLEngulfing",           CDLEngulfing());
    ADD("CDLEveningDojiStar",     CDLEveningDojiStar(0.3));
    ADD("CDLEveningStar",         CDLEveningStar(0.3));
    ADD("CDLGapSideSideWhite",    CDLGapSideSideWhite());
    ADD("CDLGravestoneDoji",      CDLGravestoneDoji());
    ADD("CDLHammer",              CDLHammer());
    ADD("CDLHangingMan",          CDLHangingMan());
    ADD("CDLHarami",              CDLHarami());
    ADD("CDLHaramiCross",         CDLHaramiCross());
    ADD("CDLHignWave",            CDLHignWave());
    ADD("CDLHikkake",             CDLHikkake());
    ADD("CDLHikkakeMod",          CDLHikkakeMod());
    ADD("CDLHomingPigeon",        CDLHomingPigeon());
    ADD("CDLIdentical3Crows",     CDLIdentical3Crows());
    ADD("CDLInNeck",              CDLInNeck());
    ADD("CDLInvertedHammer",      CDLInvertedHammer());
    ADD("CDLKicking",             CDLKicking());
    ADD("CDLKickingByLength",     CDLKickingByLength());
    ADD("CDLLadderBottom",        CDLLadderBottom());
    ADD("CDLLongLeggedDoji",      CDLLongLeggedDoji());
    ADD("CDLLongLine",            CDLLongLine());
    ADD("CDLMarubozu",            CDLMarubozu());
    ADD("CDLMatchingLow",         CDLMatchingLow());
    ADD("CDLMatHold",             CDLMatHold(0.5));
    ADD("CDLMorningDojiStar",     CDLMorningDojiStar(0.3));
    ADD("CDLMorningStar",         CDLMorningStar(0.3));
    ADD("CDLOnNeck",              CDLOnNeck());
    ADD("CDLPiercing",            CDLPiercing());
    ADD("CDLRickshawMan",         CDLRickshawMan());
    ADD("CDLRiseFall3Methods",    CDLRiseFall3Methods());
    ADD("CDLSeperatingLines",     CDLSeperatingLines());
    ADD("CDLShootingStar",        CDLShootingStar());
    ADD("CDLShortLine",           CDLShortLine());
    ADD("CDLSpinningTop",         CDLSpinningTop());
    ADD("CDLStalledPattern",      CDLStalledPattern());
    ADD("CDLStickSandwhich",      CDLStickSandwhich());
    ADD("CDLTakuri",              CDLTakuri());
    ADD("CDLTasukiGap",           CDLTasukiGap());
    ADD("CDLThrusting",           CDLThrusting());
    ADD("CDLTristar",             CDLTristar());
    ADD("CDLUnique3River",        CDLUnique3River());
    ADD("CDLUpsideGap2Crows",     CDLUpsideGap2Crows());
    ADD("CDLXSideGap3Methods",    CDLXSideGap3Methods());

    #undef ADD
    return n; // 61
}

// ================= Markov storage & helpers =================
static int* MC_Count;   // [MC_STATES*MC_STATES]
static int* MC_RowSum;  // [MC_STATES]
static int  MC_Prev = -1;
static int  MC_Cur  = 0;
static var  MC_PBullNext = 0.5;
static var  MC_Entropy   = 0.0;
static string MC_Names[MC_NPAT];

#define MC_IDX(fr,to) ((fr)*MC_STATES + (to))

int MC_stateFromCDL(var* cdl /*len=61*/, var thr)
{
    int i, best=-1; var besta=0;
    for(i=0;i<MC_NPAT;i++){
        var a = abs(cdl[i]);
        if(a>besta){ besta=a; best=i; }
    }
    if(best<0) return MC_NONE;
    if(besta < thr) return MC_NONE;
    int bull = (cdl[best] > 0);
    return 1 + 2*best + bull;  // 1..122
}
int MC_isBull(int s){ if(s<=0) return 0; return ((s-1)%2)==1; }

void MC_update(int sPrev,int sCur){ if(sPrev<0) return; MC_Count[MC_IDX(sPrev,sCur)]++; MC_RowSum[sPrev]++; }

// === (6) Use runtime Laplace ? (G_MC_Alpha) ===
var MC_prob(int s,int t){
    var num = (var)MC_Count[MC_IDX(s,t)] + G_MC_Alpha;
    var den = (var)MC_RowSum[s] + G_MC_Alpha*MC_STATES;
    if(den<=0) return 1.0/MC_STATES;
    return num/den;
}

var MC_nextBullishProb(int s){
    if(s<0) return 0.5;
    int t; var pBull=0, pTot=0;
    for(t=1;t<MC_STATES;t++){ var p=MC_prob(s,t); pTot+=p; if(MC_isBull(t)) pBull+=p; }
    if(pTot<=0) return 0.5;
    return pBull/pTot;
}
var MC_rowEntropy01(int s){
    if(s<0) return 1.0;
    int t; var H=0, Z=0;
    for(t=1;t<MC_STATES;t++){ var p=MC_prob(s,t); Z+=p; }
    if(Z<=0) return 1.0;
    for(t=1;t<MC_STATES;t++){ var p=MC_prob(s,t)/Z; if(p>0) H += -p*log(p); }
    var Hmax = log(MC_STATES-1);
    if(Hmax<=0) return 0;
    return H/Hmax;
}

// ================= HARMONIC D-TREE ENGINE =================

// ---------- utils ----------
var randsign(){ return ifelse(random(1) < 0.5, -1.0, 1.0); }
var mapUnit(var u,var lo,var hi){ if(u<-1) u=-1; if(u>1) u=1; var t=0.5*(u+1.0); return lo + t*(hi-lo); }

// ---- safety helpers ----
var safeNum(var x){ if(x!=x) return 0; if(x > 1e100) return 1e100; if(x < -1e100) return -1e100; return x; }
void sanitize(var* A,int n){ int k; for(k=0;k<n;k++) A[k]=safeNum(A[k]); }
var sat100(var x){ return clamp(x,-100,100); }

// ---- small string helpers (for memory-safe logging) ----
void strlcat_safe(string dst, string src, int cap)
{
    if(!dst || !src || cap <= 0) return;
    int dl = strlen(dst);
    int sl = strlen(src);
    int room = cap - 1 - dl;
    if(room <= 0){ if(cap > 0) dst[cap-1] = 0; return; }
    int i; for(i = 0; i < room && i < sl; i++) dst[dl + i] = src[i];
    dst[dl + i] = 0;
}

int countSubStr(string s, string sub){
    if(!s || !sub) return 0;
    int n=0; string p=s;
    int sublen = strlen(sub);
    if(sublen<=0) return 0;
    while((p=strstr(p,sub))){ n++; p += sublen; }
    return n;
}

// ---------- FIXED: use int (lite-C) and keep non-negative ----------
int djb2_hash(string s){
    int h = 5381, c, i = 0;
    if(!s) return h;
    while((c = s[i++])) h = ((h<<5)+h) ^ c;  // h*33 ^ c
    return h & 0x7fffffff;                   // force non-negative
}

// ---- tree helpers ----
int  validTreeIndex(int tid){ if(!G_TreeIdx) return 0; if(tid<0||tid>=G_TreeN) return 0; return (G_TreeIdx[tid]!=0); }
Node* treeAt(int tid){ if(validTreeIndex(tid)) return G_TreeIdx[tid]; return &G_DummyNode; }
int safeTreeIndexFromEq(int eqi){
    int denom = ifelse(G_TreeN>0, G_TreeN, 1);
    int tid = eqi;
    if(tid < 0) tid = 0;
    if(denom > 0) tid = tid % denom;
    if(tid < 0) tid = 0;
    return tid;
}

// ---- tree indexing ----
void pushTreeNode(Node* u){
    if(G_TreeN >= G_TreeCap){
        int newCap = G_TreeCap*2;
        if(newCap < 64) newCap = 64;
        G_TreeIdx = (Node**)realloc(G_TreeIdx, newCap*sizeof(Node*));
        G_TreeCap = newCap;
    }
    G_TreeIdx[G_TreeN++] = u;
}
void indexTreeDFS(Node* u){ if(!u) return; pushTreeNode(u); int i; for(i=0;i<u->n;i++) indexTreeDFS(((Node**)u->c)[i]); }

// ---- shrink index capacity after pruning (Fix #3) ----
void maybeShrinkTreeIdx(){
    if(!G_TreeIdx) return;
    if(G_TreeCap > 64 && G_TreeN < (G_TreeCap >> 1)){
        int newCap = (G_TreeCap >> 1);
        if(newCap < 64) newCap = 64;
        G_TreeIdx = (Node**)realloc(G_TreeIdx, newCap*sizeof(Node*));
        G_TreeCap = newCap;
    }
}

// ---- tree create/eval ----
Node* createNode(int depth)
{
    Node* u = (Node*)malloc(sizeof(Node));
    u->v = random();
    u->r = 0.01 + 0.02*depth + random(0.005);
    u->d = depth;
    if(depth > 0){
        u->n = 1 + (int)random(MAX_BRANCHES);
        u->c = malloc(u->n * sizeof(void*));
        int i; for(i=0;i<u->n;i++) ((Node**)u->c)[i] = createNode(depth - 1);
    } else { u->n = 0; u->c = 0; }
    return u;
}
var evaluateNode(Node* u)
{
    if(!u) return 0;
    var sum=0; int i; for(i=0;i<u->n;i++) sum += evaluateNode(((Node**)u->c)[i]);
    var phase  = sin(u->r * Bar + sum);
    var weight = 1.0 / pow(u->d + 1, G_DTreeExp);
    u->v = (1 - weight)*u->v + weight*phase;
    return u->v;
}
int countNodes(Node* u){ if(!u) return 0; int c=1,i; for(i=0;i<u->n;i++) c += countNodes(((Node**)u->c)[i]); return c; }
void freeTree(Node* u){ if(!u) return; int i; for(i=0;i<u->n;i++) freeTree(((Node**)u->c)[i]); if(u->c) free(u->c); free(u); }

// =========== NETWORK STATE & COEFFICIENTS ===========
var*  G_State; var*  G_Prev; var*  G_Vel;
int*  G_Adj;
var*  G_RP; var*  G_Z;
int*  G_Mode;
var*  G_WSelf; var*  G_WN1; var*  G_WN2; var*  G_WGlob1; var*  G_WGlob2; var*  G_WMom; var*  G_WTree; var*  G_WAdv;
var*  A1x; var*  A1lam; var*  A1mean; var*  A1E; var*  A1P; var*  A1i; var*  A1c;
var*  A2x; var*  A2lam; var*  A2mean; var*  A2E; var*  A2P; var*  A2i; var*  A2c;
var*  G1mean; var*  G1E; var*  G2P; var*  G2lam;
var*  G_TreeTerm; int*  G_TopEq; var*  G_TopW; int*  G_EqTreeId; var*  TAlpha; var*  TBeta;
var*  G_Pred; var*  G_AdvScore;
var*  G_PropRaw; var*  G_Prop;

// ===== Markov features exposed to DTREE =====
var G_MCF_PBull;   // 0..1
var G_MCF_Entropy; // 0..1
var G_MCF_State;   // 0..122

// epoch/context & feedback
int    G_Epoch = 0;
int    G_CtxID = 0;
var    G_FB_A = 0.7;  // kept (not used in blend now)
var    G_FB_B = 0.3;  // kept (not used in blend now)

// ---------- predictability ----------
var nodePredictability(Node* t)
{
    if(!t) return 0.5;
    var disp=0; int n=t->n, i;
    for(i=0;i<n;i++){ Node* c=((Node**)t->c)[i]; disp += abs(c->v - t->v); }
    if(n>0) disp /= n;
    var depthFac = 1.0/(1+t->d);
    var rateBase = 0.01 + 0.02*t->d;
    var rateFac  = exp(-25.0*abs(t->r - rateBase));
    var p = 0.5*(depthFac + rateFac);
    p = 0.5*p + 0.5*(1.0 + (-disp))/(1.0);
    if(p<0) p=0; if(p>1) p=1;
    return p;
}

// importance for selective pruning
var nodeImportance(Node* u)
{
    if(!u) return 0;
    var amp = abs(u->v); if(amp>1) amp=1;
    var p = nodePredictability(u);
    var depthW = 1.0/(1.0 + u->d);
    var imp = (0.6*p + 0.4*amp) * depthW;
    return imp;
}

// ====== Elastic growth helpers ======

// create a leaf at depth d (no children)
Node* createLeafDepth(int d){
    Node* u = (Node*)malloc(sizeof(Node));
    u->v = random();
    u->r = 0.01 + 0.02*d + random(0.005);
    u->d = d;
    u->n = 0;
    u->c = 0;
    return u;
}

// add up to addK new children to all nodes at frontierDepth
void growSelectiveAtDepth(Node* u, int frontierDepth, int addK)
{
    if(!u) return;
    if(u->d == frontierDepth){
        int want = addK;
        if(want <= 0) return;
        int oldN = u->n;
        int newN = oldN + want;
        Node** Cnew = (Node**)malloc(newN * sizeof(void*));
        int i;
        for(i=0;i<oldN;i++) Cnew[i] = ((Node**)u->c)[i];
        for(i=oldN;i<newN;i++) Cnew[i] = createLeafDepth(frontierDepth-1);
        if(u->c) free(u->c);
        u->c = Cnew; u->n = newN;
        return;
    }
    int j; for(j=0;j<u->n;j++) growSelectiveAtDepth(((Node**)u->c)[j], frontierDepth, addK);
}

// keep top-K children by importance at targetDepth, drop the rest
void freeChildAt(Node* parent, int idx)
{
    if(!parent || !parent->c) return;
    Node** C = (Node**)parent->c;
    freeTree(C[idx]);
    int i;
    for(i=idx+1;i<parent->n;i++) C[i-1] = C[i];
    parent->n--;
    if(parent->n==0){ free(parent->c); parent->c=0; }
}
void pruneSelectiveAtDepth(Node* u, int targetDepth, int keepK)
{
    if(!u) return;

    if(u->d == targetDepth-1 && u->n > 0){
        int n = u->n, i, kept = 0;
        int mark[16]; for(i=0;i<16;i++) mark[i]=0;

        int iter;
        for(iter=0; iter<keepK && iter<n; iter++){
            int bestI = -1; var bestImp = -1;
            for(i=0;i<n;i++){
                if(i<16 && mark[i]==1) continue;
                var imp = nodeImportance(((Node**)u->c)[i]);
                if(imp > bestImp){ bestImp = imp; bestI = i; }
            }
            if(bestI>=0 && bestI<16){ mark[bestI]=1; kept++; }
        }
        for(i=n-1;i>=0;i--) if(i<16 && mark[i]==0) freeChildAt(u,i);
        return;
    }

    int j; for(j=0;j<u->n;j++) pruneSelectiveAtDepth(((Node**)u->c)[j], targetDepth, keepK);
}

void reindexTreeAndMap()
{
    G_TreeN = 0;
    indexTreeDFS(Root);
    if(G_TreeN<=0){ G_TreeN=1; if(G_TreeIdx) G_TreeIdx[0]=Root; }
    int i; for(i=0;i<G_N;i++) G_EqTreeId[i] = i % G_TreeN;
    maybeShrinkTreeIdx(); // Fix #3
}

// ====== Accuracy sentinel & elastic-depth controller ======

void acc_update(var x /*lambda*/, var y /*gamma*/)
{
    var a = 0.01; // ~100-bar half-life
    ACC_mx  = (1-a)*ACC_mx  + a*x;
    ACC_my  = (1-a)*ACC_my  + a*y;
    ACC_mx2 = (1-a)*ACC_mx2 + a*(x*x);
    ACC_my2 = (1-a)*ACC_my2 + a*(y*y);
    ACC_mxy = (1-a)*ACC_mxy + a*(x*y);

    var vx = ACC_mx2 - ACC_mx*ACC_mx;
    var vy = ACC_my2 - ACC_my*ACC_my;
    var cv = ACC_mxy - ACC_mx*ACC_my;
    if(vx>0 && vy>0) G_AccCorr = cv / sqrt(vx*vy); else G_AccCorr = 0;
    if(!G_HaveBase){ G_AccBase = G_AccCorr; G_HaveBase = 1; }
}

// utility to maximize: accuracy minus gentle memory penalty
var util_now()
{
    int mb = mem_mb_est();
    var mem_pen = 0;
    if(mb > MEM_BUDGET_MB) mem_pen = (mb - MEM_BUDGET_MB)/(var)MEM_BUDGET_MB; else mem_pen = 0;
    return G_AccCorr - 0.5*mem_pen;
}

// apply a +1 “grow one level” action if safe memory headroom
int apply_grow_step()
{
    int mb = mem_mb_est();
    if(G_RT_TreeMaxDepth >= MAX_DEPTH) return 0;
    if(mb > MEM_BUDGET_MB - 2*MEM_HEADROOM_MB) return 0;
    int newFrontier = G_RT_TreeMaxDepth;
    growSelectiveAtDepth(Root, newFrontier, KEEP_CHILDREN_HI);
    G_RT_TreeMaxDepth++;
    reindexTreeAndMap();
    printf("\n[EDC] Grew depth to %i (est %i MB)", G_RT_TreeMaxDepth, mem_mb_est());
    return 1;
}

// revert last growth (drop newly-added frontier children)
void revert_last_grow()
{
    pruneSelectiveAtDepth((Node*)Root, G_RT_TreeMaxDepth, 0);
    G_RT_TreeMaxDepth--;
    reindexTreeAndMap();
    printf("\n[EDC] Reverted growth to %i (est %i MB)", G_RT_TreeMaxDepth, mem_mb_est());
}

// main elastic-depth controller; call once per bar (after acc_update)
void edc_runtime()
{
    // (5) slow hill-climb on G_DTreeExp
    if((Bar % DEPTH_TUNE_BARS) == 0){
        var U0 = util_now();
        var trial = clamp(G_DTreeExp + G_DTreeExpDir*G_DTreeExpStep, 0.8, 2.0);
        var old  = G_DTreeExp;
        G_DTreeExp = trial;
        if(util_now() + 0.005 < U0){
            G_DTreeExp = old;
            G_DTreeExpDir = -G_DTreeExpDir;
        }
    }

    int mb = mem_mb_est();

    if(G_TunePending){
        if(Bar - G_TuneStartBar >= TUNE_DELAY_BARS){
            G_UtilAfter = util_now();
            var eps = 0.01;
            if(G_UtilAfter + eps < G_UtilBefore){
                revert_last_grow();
            } else {
                printf("\n[EDC] Growth kept (U: %.4f -> %.4f)", G_UtilBefore, G_UtilAfter);
            }
            G_TunePending = 0; G_TuneAction = 0;
        }
        return;
    }

    if( (Bar % DEPTH_TUNE_BARS)==0 && mb <= MEM_BUDGET_MB - 2*MEM_HEADROOM_MB && G_RT_TreeMaxDepth < MAX_DEPTH ){
        G_UtilBefore = util_now();
        if(apply_grow_step()){
            G_TunePending = 1; G_TuneAction = 1; G_TuneStartBar = Bar;
        }
    }
}

// filenames (legacy; still used if LOG_EQ_TO_ONE_FILE==0)
void buildEqFileName(int idx, char* outName /*>=64*/)
{
    strcpy(outName, "Log\\Alpha12_eq_");
    string idxs = strf("%03i", idx);
    strcat(outName, idxs);
    strcat(outName, ".csv");
}

// ===== consolidated EQ log =====
void writeEqHeaderOnce()
{
    static int done=0; if(done) return; done=1;
    file_append("Log\\Alpha12_eq_all.csv",
        "Bar,Epoch,Ctx,EqCount,i,n1,n2,TreeId,Depth,Rate,Pred,Adv,Prop,Mode,WAdv,WTree,PBull,Entropy,MCState,ExprLen,ExprHash,tanhN,sinN,cosN\n");
}

void appendEqMetaLine(
    int bar, int epoch, int ctx, int i, int n1, int n2, int tid, int depth, var rate,
    var pred, var adv, var prop, int mode, var wadv, var wtree,
    var pbull, var ent, int mcstate, string expr)
{
    if(i >= LOG_EQ_SAMPLE) return;

    // ---- SAFE: never call functions inside ifelse; handle NULL explicitly
    int eLen = 0, eHash = 0, cT = 0, cS = 0, cC = 0;
    if(expr){
        eLen  = (int)strlen(expr);
        eHash = (int)djb2_hash(expr);
        cT    = countSubStr(expr,"tanh(");
        cS    = countSubStr(expr,"sin(");
        cC    = countSubStr(expr,"cos(");
    } else {
        eHash = (int)djb2_hash("");
    }

    file_append("Log\\Alpha12_eq_all.csv",
    strf("%i,%i,%i,%i,%i,%i,%i,%i,%i,%.6f,%.4f,%.4f,%.6f,%i,%.3f,%.3f,%.4f,%.4f,%i,%i,%i,%i,%i,%i\n",
        bar, epoch, ctx, NET_EQNS, i, n1, n2, tid, depth, rate,
        pred, adv, prop, mode, wadv, wtree,
        pbull, ent, mcstate, eLen, eHash, cT, cS, cC));
}

// --------- allocation ----------
void randomizeRP()
{
    int K=G_K,N=G_N,k,j;
    for(k=0;k<K;k++)
        for(j=0;j<N;j++)
            G_RP[k*N+j] = ifelse(random(1) < 0.5, -1.0, 1.0);
}

// === (8) Use effective K (G_Keff) ===
void computeProjection(){
    int K=G_Keff, N=G_N, k, j;
    for(k=0;k<K;k++){
        var acc=0; 
        for(j=0;j<N;j++) acc += G_RP[k*N+j]*(G_State[j]*G_State[j]);
        G_Z[k]=acc;
    }
}

void allocateNet()
{
    int N=G_N, D=G_D, K=G_K;
    G_State=(var*)malloc(N*sizeof(var));  G_Prev=(var*)malloc(N*sizeof(var));  G_Vel=(var*)malloc(N*sizeof(var));
    G_Adj=(int*)malloc(N*D*sizeof(int));
    G_RP=(var*)malloc(K*N*sizeof(var));   G_Z=(var*)malloc(K*sizeof(var));
    G_Mode=(int*)malloc(N*sizeof(int));
    G_WSelf=(var*)malloc(N*sizeof(var));  G_WN1=(var*)malloc(N*sizeof(var));   G_WN2=(var*)malloc(N*sizeof(var));
    G_WGlob1=(var*)malloc(N*sizeof(var)); G_WGlob2=(var*)malloc(N*sizeof(var));
    G_WMom=(var*)malloc(N*sizeof(var));   G_WTree=(var*)malloc(N*sizeof(var)); G_WAdv=(var*)malloc(N*sizeof(var));
    A1x=(var*)malloc(N*sizeof(var)); A1lam=(var*)malloc(N*sizeof(var)); A1mean=(var*)malloc(N*sizeof(var));
    A1E=(var*)malloc(N*sizeof(var)); A1P=(var*)malloc(N*sizeof(var));   A1i=(var*)malloc(N*sizeof(var)); A1c=(var*)malloc(N*sizeof(var));
    A2x=(var*)malloc(N*sizeof(var)); A2lam=(var*)malloc(N*sizeof(var)); A2mean=(var*)malloc(N*sizeof(var));
    A2E=(var*)malloc(N*sizeof(var)); A2P=(var*)malloc(N*sizeof(var));   A2i=(var*)malloc(N*sizeof(var)); A2c=(var*)malloc(N*sizeof(var));
    G1mean=(var*)malloc(N*sizeof(var)); G1E=(var*)malloc(N*sizeof(var));
    G2P=(var*)malloc(N*sizeof(var));    G2lam=(var*)malloc(N*sizeof(var));
    G_TreeTerm=(var*)malloc(N*sizeof(var)); G_TopEq=(int*)malloc(N*sizeof(int)); G_TopW=(var*)malloc(N*sizeof(var));
    TAlpha=(var*)malloc(N*sizeof(var));     TBeta=(var*)malloc(N*sizeof(var));
    G_Pred=(var*)malloc(N*sizeof(var)); G_AdvScore=(var*)malloc(N*sizeof(var));
    G_PropRaw=(var*)malloc(N*sizeof(var));  G_Prop=(var*)malloc(N*sizeof(var));

    if(LOG_EXPR_TEXT){
        G_Sym=(string*)malloc(N*sizeof(char*));
    } else {
        G_Sym=0;
    }

    G_TreeCap=128; // was 512 (Fix #3: start smaller; still grows if needed)
    G_TreeIdx=(Node**)malloc(G_TreeCap*sizeof(Node*)); G_TreeN=0;
    G_EqTreeId=(int*)malloc(N*sizeof(int));

    // Pre-init adjacency to safe value
    int tInit; for(tInit=0; tInit<N*D; tInit++) G_Adj[tInit] = -1;

    int i;
    for(i=0;i<N;i++){
        G_State[i]=random();
        G_Prev[i]=G_State[i]; G_Vel[i]=0;
        G_Mode[i]=0;
        G_WSelf[i]=0.5; G_WN1[i]=0.2; G_WN2[i]=0.2; G_WGlob1[i]=0.1; G_WGlob2[i]=0.1; G_WMom[i]=0.05; G_WTree[i]=0.15; G_WAdv[i]=0.15;
        A1x[i]=1; A1lam[i]=0.1; A1mean[i]=0; A1E[i]=0; A1P[i]=0; A1i[i]=0; A1c[i]=0;
        A2x[i]=1; A2lam[i]=0.1; A2mean[i]=0; A2E[i]=0; A2P[i]=0; A2i[i]=0; A2c[i]=0;
        G1mean[i]=1.0; G1E[i]=0.001; G2P[i]=0.6; G2lam[i]=0.3;
        TAlpha[i]=0.8; TBeta[i]=25.0;
        G_TreeTerm[i]=0; G_TopEq[i]=-1; G_TopW[i]=0;
        G_Pred[i]=0.5;   G_AdvScore[i]=0;
        G_PropRaw[i]=1;  G_Prop[i]=1.0/G_N;

        if(LOG_EXPR_TEXT){
            G_Sym[i] = (char*)malloc(EXPR_MAXLEN);
            if(G_Sym[i]) strcpy(G_Sym[i], "");
        }
    }
}

void freeNet()
{
    int i;
    if(G_State)free(G_State); if(G_Prev)free(G_Prev); if(G_Vel)free(G_Vel);
    if(G_Adj)free(G_Adj); if(G_RP)free(G_RP); if(G_Z)free(G_Z);
    if(G_Mode)free(G_Mode); if(G_WSelf)free(G_WSelf); if(G_WN1)free(G_WN1); if(G_WN2)free(G_WN2);
    if(G_WGlob1)free(G_WGlob1); if(G_WGlob2)free(G_WGlob2); if(G_WMom)free(G_WMom);
    if(G_WTree)free(G_WTree); if(G_WAdv)free(G_WAdv);
    if(A1x)free(A1x); if(A1lam)free(A1lam); if(A1mean)free(A1mean); if(A1E)free(A1E); if(A1P)free(A1P); if(A1i)free(A1i); if(A1c)free(A1c);
    if(A2x)free(A2x); if(A2lam)free(A2lam); if(A2mean)free(A2mean); if(A2E)free(A2E); if(A2P)free(A2P); if(A2i)free(A2i); if(A2c)free(A2c);
    if(G1mean)free(G1mean); if(G1E)free(G1E); if(G2P)free(G2P); if(G2lam)free(G2lam);
    if(G_TreeTerm)free(G_TreeTerm); if(G_TopEq)free(G_TopEq); if(G_TopW)free(G_TopW);
    if(TAlpha)free(TAlpha); if(TBeta)free(TBeta);
    if(G_Pred)free(G_Pred); if(G_AdvScore)free(G_AdvScore);
    if(G_PropRaw)free(G_PropRaw); if(G_Prop)free(G_Prop);
    if(G_Sym){ for(i=0;i<G_N;i++) if(G_Sym[i]) free(G_Sym[i]); free(G_Sym); }
    if(G_TreeIdx)free(G_TreeIdx); if(G_EqTreeId)free(G_EqTreeId);
}

// --------- DTREE feature builders ----------
var nrm_s(var x){ return sat100(100.0*tanh(x)); }
var nrm_scl(var x,var s){ return sat100(100.0*tanh(s*x)); }

void buildEqFeatures(int i, var lambda, var mean, var energy, var power, var* S /*ADV_EQ_NF*/)
{
    int tid = safeTreeIndexFromEq(G_EqTreeId[i]);
    Node* t = treeAt(tid);

    S[0]  = nrm_s(G_State[i]);
    S[1]  = nrm_s(mean);
    S[2]  = nrm_scl(power,0.05);
    S[3]  = nrm_scl(energy,0.01);
    S[4]  = nrm_s(lambda);
    S[5]  = sat100(200.0*(G_Pred[i]-0.5));
    S[6]  = sat100(200.0*((var)t->d/MAX_DEPTH)-100.0);
    S[7]  = sat100(1000.0*t->r);
    S[8]  = nrm_s(G_TreeTerm[i]);
    S[9]  = sat100(200.0*((var)G_Mode[i]/3.0)-100.0);
    S[10] = sat100(200.0*(G_MCF_PBull-0.5));
    S[11] = sat100(200.0*(G_MCF_Entropy-0.5));
    sanitize(S,ADV_EQ_NF);
}

// (Kept for completeness; not used by DTREE anymore)
void buildPairFeatures(int i,int j, var lambda, var mean, var energy, var power, var* P /*ADV_PAIR_NF*/)
{
    int tid_i = safeTreeIndexFromEq(G_EqTreeId[i]);
    int tid_j = safeTreeIndexFromEq(G_EqTreeId[j]);
    Node* ti = treeAt(tid_i);
    Node* tj = treeAt(tid_j);

    P[0]=nrm_s(G_State[i]); P[1]=nrm_s(G_State[j]);
    P[2]=sat100(200.0*((var)ti->d/MAX_DEPTH)-100.0);
    P[3]=sat100(200.0*((var)tj->d/MAX_DEPTH)-100.0);
    P[4]=sat100(1000.0*ti->r); P[5]=sat100(1000.0*tj->r);
    P[6]=sat100(abs(P[2]-P[3]));
    P[7]=sat100(abs(P[4]-P[5]));
    P[8]=sat100(100.0*(G_Pred[i]+G_Pred[j]-1.0));
    P[9]=nrm_s(lambda); P[10]=nrm_s(mean); P[11]=nrm_scl(power,0.05);
    sanitize(P,ADV_PAIR_NF);
}

// --- Safe neighbor helpers & adjacency sanitizer ---
int adjSafe(int i, int d){
    int N = G_N, D = G_D;
    if(!G_Adj || N <= 1 || D <= 0) return 0;
    if(d < 0) d = 0;
    if(d >= D) d = d % D;
    int v = G_Adj[i*D + d];
    if(v < 0 || v >= N || v == i){
        v = (i + 1) % N;
    }
    return v;
}

void sanitizeAdjacency(){
    if(!G_Adj) return;
    int N = G_N, D = G_D;
    int i, d;
    for(i=0;i<N;i++){
        for(d=0; d<D; d++){
            int *p = &G_Adj[i*D + d];
            if(*p < 0 || *p >= N || *p == i){
                int r = (int)random(N);
                if(r == i) r = (r+1) % N;
                *p = r;
            }
        }
        if(D >= 2 && G_Adj[i*D+0] == G_Adj[i*D+1]){
            int r2 = (G_Adj[i*D+1] + 1) % N;
            if(r2 == i) r2 = (r2+1) % N;
            G_Adj[i*D+1] = r2;
        }
    }
}

// --------- advisor helpers (NEW) ----------

// cache one advisor value per equation per bar
var adviseSeed(int i, var lambda, var mean, var energy, var power)
{
    static int seedBar = -1;
    static int haveSeed[NET_EQNS];
    static var seedVal[NET_EQNS];

    if(seedBar != Bar){
        int k; for(k=0;k<NET_EQNS;k++) haveSeed[k] = 0;
        seedBar = Bar;
    }
    if(i < 0) i = 0;
    if(i >= NET_EQNS) i = i % NET_EQNS;

    // Respect advisor budget/rotation for seed too
    if(!allowAdvise(i)) return 0;

    if(!haveSeed[i]){
        seedVal[i] = adviseEq(i, lambda, mean, energy, power); // trains (once) in Train mode
        haveSeed[i] = 1;
    }
    return seedVal[i];
}

// simple deterministic mixer for diversity in [-1..1] without extra advise calls
var mix01(var a, int salt){
    var z = sin(123.456*a + 0.001*salt) + cos(98.765*a + 0.002*salt);
    return tanh(0.75*z);
}

// --------- advise wrappers (single-equation only) ----------
// Use estimator to halt when tight; respect rotation budget.
var adviseEq(int i, var lambda, var mean, var energy, var power)
{
    if(!allowAdvise(i)) return 0;

    var S[ADV_EQ_NF];
    buildEqFeatures(i,lambda,mean,energy,power,S);

    if(is(INITRUN)) return 0;

    // stop early based on our estimator, not memory(0)
    int tight = (mem_mb_est() >= MEM_BUDGET_MB - MEM_HEADROOM_MB);
    if(tight) return 0;

    var obj = 0;
    if(Train && !tight)
        obj = sat100(100.0*tanh(0.6*lambda + 0.4*mean));

    int objI = (int)obj;
    var a = adviseLong(DTREE, objI, S, ADV_EQ_NF);
    return a/100.;
}

// --------- advisePair disabled: never call DTREE here ----------
var advisePair(int i,int j, var lambda, var mean, var energy, var power)
{
    return 0;
}

// --------- heuristic pair scoring ----------
var scorePairSafe(int i, int j, var lambda, var mean, var energy, var power)
{
    int ti = safeTreeIndexFromEq(G_EqTreeId[i]);
    int tj = safeTreeIndexFromEq(G_EqTreeId[j]);
    Node *ni = treeAt(ti), *nj = treeAt(tj);
    var simD  = 1.0 / (1.0 + abs((var)ni->d - (var)nj->d));
    var simR  = 1.0 / (1.0 + 50.0*abs(ni->r - nj->r));
    var pred  = 0.5*(G_Pred[i] + G_Pred[j]);
    var score = 0.5*pred + 0.3*simD + 0.2*simR;
    return 2.0*score - 1.0;
}

// --------- adjacency selection (heuristic only) ----------
// safer clash check using prev>=0
void rewireAdjacency_DTREE(var lambda, var mean, var energy, var power)
{
    int N=G_N, D=G_D, i, d, c, best, cand;
    for(i=0;i<N;i++){
        for(d=0; d<D; d++){
            var bestScore = -2; best = -1;
            // (7) adaptive candidate breadth
            for(c=0;c<G_CandNeigh;c++){
                cand = (int)random(N);
                if(cand==i) continue;
                int clash=0, k;
                for(k=0;k<d;k++){
                    int prev = G_Adj[i*D+k];
                    if(prev>=0 && prev==cand){ clash=1; break; }
                }
                if(clash) continue;
                var s = scorePairSafe(i,cand,lambda,mean,energy,power);
                if(s > bestScore){ bestScore=s; best=cand; }
            }
            if(best<0){ do{ best = (int)random(N);} while(best==i); }
            G_Adj[i*D + d] = best;
        }
    }
}

// --------- DTREE-created coefficients, modes & proportions ----------
var mapA(var a,var lo,var hi){ return mapUnit(a,lo,hi); }

void synthesizeEquationFromDTREE(int i, var lambda, var mean, var energy, var power)
{
    var seed = adviseSeed(i,lambda,mean,energy,power);
    G_Mode[i] = (int)(abs(1000*seed)) & 3;

    // derive weights & params deterministically from the single seed
    G_WSelf[i]  = mapA(mix01(seed, 11), 0.15, 0.85);
    G_WN1[i]    = mapA(mix01(seed, 12), 0.05, 0.35);
    G_WN2[i]    = mapA(mix01(seed, 13), 0.05, 0.35);
    G_WGlob1[i] = mapA(mix01(seed, 14), 0.05, 0.30);
    G_WGlob2[i] = mapA(mix01(seed, 15), 0.05, 0.30);
    G_WMom[i]   = mapA(mix01(seed, 16), 0.02, 0.15);
    G_WTree[i]  = mapA(mix01(seed, 17), 0.05, 0.35);
    G_WAdv[i]   = mapA(mix01(seed, 18), 0.05, 0.35);

    A1x[i]   = randsign()*mapA(mix01(seed, 21), 0.6, 1.2);
    A1lam[i] = randsign()*mapA(mix01(seed, 22), 0.05,0.35);
    A1mean[i]=                  mapA(mix01(seed, 23),-0.30,0.30);
    A1E[i]   =                  mapA(mix01(seed, 24),-0.0015,0.0015);
    A1P[i]   =                  mapA(mix01(seed, 25),-0.30,0.30);
    A1i[i]   =                  mapA(mix01(seed, 26),-0.02,0.02);
    A1c[i]   =                  mapA(mix01(seed, 27),-0.20,0.20);

    A2x[i]   = randsign()*mapA(mix01(seed, 31), 0.6, 1.2);
    A2lam[i] = randsign()*mapA(mix01(seed, 32), 0.05,0.35);
    A2mean[i]=                  mapA(mix01(seed, 33),-0.30,0.30);
    A2E[i]   =                  mapA(mix01(seed, 34),-0.0015,0.0015);
    A2P[i]   =                  mapA(mix01(seed, 35),-0.30,0.30);
    A2i[i]   =                  mapA(mix01(seed, 36),-0.02,0.02);
    A2c[i]   =                  mapA(mix01(seed, 37),-0.20,0.20);

    G1mean[i] =                  mapA(mix01(seed, 41), 0.4, 1.6);
    G1E[i]    =                  mapA(mix01(seed, 42),-0.004,0.004);
    G2P[i]    =                  mapA(mix01(seed, 43), 0.1, 1.2);
    G2lam[i]  =                  mapA(mix01(seed, 44), 0.05, 0.7);

    TAlpha[i] =                  mapA(mix01(seed, 51), 0.3, 1.5);
    TBeta[i]  =                  mapA(mix01(seed, 52), 6.0, 50.0);

    G_PropRaw[i] = 0.01 + 0.99*(0.5*(seed+1.0));
}

void normalizeProportions()
{
    int N=G_N,i; var s=0; for(i=0;i<N;i++) s += G_PropRaw[i];
    if(s<=0) { for(i=0;i<N;i++) G_Prop[i] = 1.0/N; return; }
    for(i=0;i<N;i++) G_Prop[i] = G_PropRaw[i]/s;
}

var dtreeTerm(int i, int* outTopEq, var* outTopW)
{
    int N=G_N,j;
    int tid_i = safeTreeIndexFromEq(G_EqTreeId[i]);
    Node* ti=treeAt(tid_i); int di=ti->d; var ri=ti->r;
    var alpha=TAlpha[i], beta=TBeta[i];
    var sumw=0, acc=0, bestW=-1; int bestJ=-1;
    for(j=0;j<N;j++){
        if(j==i) continue;
        int tid_j = safeTreeIndexFromEq(G_EqTreeId[j]);
        Node* tj=treeAt(tid_j); int dj=tj->d; var rj=tj->r;
        var w = exp(-alpha*abs(di-dj)) * exp(-beta*abs(ri-rj));
        var predBoost = 0.5 + 0.5*(G_Pred[i]*G_Pred[j]);
        var propBoost = 0.5 + 0.5*( (G_Prop[i] + G_Prop[j]) );
        w *= predBoost * propBoost;
        var pairAdv = scorePairSafe(i,j,0,0,0,0);
        var pairBoost = 0.75 + 0.25*(0.5*(pairAdv+1.0));
        w *= pairBoost;
        sumw += w; acc += w*G_State[j];
        if(w>bestW){bestW=w; bestJ=j;}
    }
    if(outTopEq) *outTopEq = bestJ;
    if(outTopW)  *outTopW  = ifelse(sumw>0, bestW/sumw, 0);
    if(sumw>0) return acc/sumw; return 0;
}

// --------- expression builder (capped & optional) ----------
void buildSymbolicExpr(int i, int n1, int n2)
{
    if(LOG_EXPR_TEXT){
        string s = G_Sym[i]; s[0]=0;
        string a1 = strf("(%.3f*x[%i] + %.3f*lam + %.3f*mean + %.5f*E + %.3f*P + %.3f*i + %.3f)",
                         A1x[i], n1, A1lam[i], A1mean[i], A1E[i], A1P[i], A1i[i], A1c[i]);
        string a2 = strf("(%.3f*x[%i] + %.3f*lam + %.3f*mean + %.5f*E + %.3f*P + %.3f*i + %.3f)",
                         A2x[i], n2, A2lam[i], A2mean[i], A2E[i], A2P[i], A2i[i], A2c[i]);

        strlcat_safe(s, "x[i]_next = ", EXPR_MAXLEN);
        strlcat_safe(s, strf("%.3f*x[i] + ", G_WSelf[i]), EXPR_MAXLEN);

        if(G_Mode[i]==1){
            strlcat_safe(s, strf("%.3f*tanh%s + ", G_WN1[i], a1), EXPR_MAXLEN);
            strlcat_safe(s, strf("%.3f*sin%s + ",  G_WN2[i], a2), EXPR_MAXLEN);
        } else if(G_Mode[i]==2){
            strlcat_safe(s, strf("%.3f*cos%s + ",  G_WN1[i], a1), EXPR_MAXLEN);
            strlcat_safe(s, strf("%.3f*tanh%s + ", G_WN2[i], a2), EXPR_MAXLEN);
        } else {
            strlcat_safe(s, strf("%.3f*sin%s + ",  G_WN1[i], a1), EXPR_MAXLEN);
            strlcat_safe(s, strf("%.3f*cos%s + ",  G_WN2[i], a2), EXPR_MAXLEN);
        }

        strlcat_safe(s, strf("%.3f*tanh(%.3f*mean + %.5f*E) + ", G_WGlob1[i], G1mean[i], G1E[i]), EXPR_MAXLEN);
        strlcat_safe(s, strf("%.3f*sin(%.3f*P + %.3f*lam) + ",   G_WGlob2[i], G2P[i],   G2lam[i]), EXPR_MAXLEN);
        strlcat_safe(s, strf("%.3f*(x[i]-x_prev[i]) + ",         G_WMom[i]), EXPR_MAXLEN);
        strlcat_safe(s, strf("Prop[i]=%.4f; ",                   G_Prop[i]), EXPR_MAXLEN);
        strlcat_safe(s, strf("%.3f*DT(i) + ",                    G_WTree[i]), EXPR_MAXLEN);
        strlcat_safe(s, strf("%.3f*DTREE(i)",                    G_WAdv[i]), EXPR_MAXLEN);
    }
}

// --------- one-time rewire init ----------
void rewireInit()
{
    randomizeRP(); computeProjection();
    G_TreeN=0; indexTreeDFS(Root);
    if(G_TreeN<=0){ G_TreeN=1; if(G_TreeIdx) G_TreeIdx[0]=Root; }
    int i; for(i=0;i<G_N;i++) G_EqTreeId[i] = i % G_TreeN;
}

// probes & unsigned context hash
// ----------------------------------------------------------------------
// rewireEpoch (SAFE: no functions inside ifelse)
// ----------------------------------------------------------------------
void rewireEpoch(var lambda, var mean, var energy, var power)
{
    int i;

    if(ENABLE_WATCH) watch("?A");   // before predictability
    for(i=0;i<G_N;i++){
        int  tid = safeTreeIndexFromEq(G_EqTreeId[i]);
        Node* t  = treeAt(tid);
        G_Pred[i] = nodePredictability(t);
    }

    if(ENABLE_WATCH) watch("?B");   // after predictability, before adjacency

    // (7) adapt adjacency sampling breadth by regime entropy
    G_CandNeigh = ifelse(MC_Entropy < 0.45, CAND_NEIGH+4, CAND_NEIGH);

    rewireAdjacency_DTREE(lambda,mean,energy,power);

    if(ENABLE_WATCH) watch("?C");   // after adjacency, before synthesize
    sanitizeAdjacency();

    for(i=0;i<G_N;i++)
        synthesizeEquationFromDTREE(i,lambda,mean,energy,power);

    if(ENABLE_WATCH) watch("?D");   // before normalize / ctx hash
    normalizeProportions();

    // Unsigned context hash of current adjacency (+ epoch) for logging
    {
        int D = G_D;
        unsigned int h = 2166136261u;
        int total = G_N * D;
        for(i=0;i<total;i++){
            unsigned int x = (unsigned int)G_Adj[i];
            h ^= x + 0x9e3779b9u + (h<<6) + (h>>2);
        }
        G_CtxID = (int)((h ^ ((unsigned int)G_Epoch<<8)) & 0x7fffffff);
    }

    // Optional expression text (only when LOG_EXPR_TEXT==1)
    for(i=0;i<G_N;i++){
        int n1 = adjSafe(i,0);
        int n2 = n1;
        if(G_D >= 2) n2 = adjSafe(i,1);
        if(LOG_EXPR_TEXT) buildSymbolicExpr(i,n1,n2);
    }
}

var projectNet()
{
    int N=G_N,i; var sum=0,sumsq=0,cross=0;
    for(i=0;i<N;i++){ sum+=G_State[i]; sumsq+=G_State[i]*G_State[i]; if(i+1<N) cross+=G_State[i]*G_State[i+1]; }
    var mean=sum/N, corr=cross/(N-1);
    return 0.6*tanh(mean + 0.001*sumsq) + 0.4*sin(corr);
}

// ----------------------------------------------------------------------
// updateNet (SAFE: no functions inside ifelse for neighbor indices)
// ----------------------------------------------------------------------
void updateNet(var driver, var* outMean, var* outEnergy, var* outPower, int writeMeta)
{
    int N = G_N, D = G_D, i;

    var sum = 0, sumsq = 0;
    for(i = 0; i < N; i++){
        sum   += G_State[i];
        sumsq += G_State[i]*G_State[i];
    }
    var mean   = sum / N;
    var energy = sumsq;
    var power  = sumsq / N;

    for(i = 0; i < N; i++){
        int  tid = safeTreeIndexFromEq(G_EqTreeId[i]);
        Node* t  = treeAt(tid);
        G_Pred[i] = nodePredictability(t);
    }

    for(i = 0; i < N; i++){
        int n1 = adjSafe(i,0);
        int n2 = n1;
        if(D >= 2) n2 = adjSafe(i,1);

        var xi   = G_State[i];
        var xn1  = G_State[n1];
        var xn2  = G_State[n2];
        var mom  = xi - G_Prev[i];

        int topEq = -1;
        var topW  = 0;
        var dt    = dtreeTerm(i, &topEq, &topW);
        G_TreeTerm[i] = dt;
        G_TopEq[i]    = topEq;
        G_TopW[i]     = topW;

        // call advisor only when allowed
        var adv = 0;
        if(allowAdvise(i))
             adv = adviseEq(i, driver, mean, energy, power);
 
        G_AdvScore[i] = adv;

        var arg1 = A1x[i]*xn1 + A1lam[i]*driver + A1mean[i]*mean + A1E[i]*energy + A1P[i]*power + A1i[i]*i + A1c[i];
        var arg2 = A2x[i]*xn2 + A2lam[i]*driver + A2mean[i]*mean + A2E[i]*energy + A2P[i]*power + A2i[i]*i + A2c[i];

        var nl1, nl2;
        if(G_Mode[i] == 0){ nl1 = sin(arg1);  nl2 = cos(arg2); }
        else if(G_Mode[i] == 1){ nl1 = tanh(arg1); nl2 = sin(arg2); }
        else if(G_Mode[i] == 2){ nl1 = cos(arg1);  nl2 = tanh(arg2); }
        else { nl1 = sin(arg1); nl2 = cos(arg2); }

        var glob1 = tanh(G1mean[i]*mean + G1E[i]*energy);
        var glob2 = sin (G2P[i]*power + G2lam[i]*driver);

        var xNew =
            G_WSelf[i]*xi +
            G_WN1[i]*nl1 +
            G_WN2[i]*nl2 +
            G_WGlob1[i]*glob1 +
            G_WGlob2[i]*glob2 +
            G_WMom[i]*mom +
            G_WTree[i]*dt +
            G_WAdv[i]*adv;

        G_Prev[i]  = xi;
        G_Vel[i]   = xNew - xi;
        G_State[i] = clamp(xNew, -10, 10);

        if(writeMeta && (G_Epoch % META_EVERY == 0) && !G_LogsOff){
            int  tid2 = safeTreeIndexFromEq(G_EqTreeId[i]);
            Node* t2  = treeAt(tid2);
            int  nn1  = adjSafe(i,0);
            int  nn2  = nn1;
            if(G_D >= 2) nn2 = adjSafe(i,1);

            if(LOG_EQ_TO_ONE_FILE){
                string expr = "";
                if(LOG_EXPR_TEXT) expr = G_Sym[i];
                appendEqMetaLine(
                    Bar, G_Epoch, G_CtxID, i, nn1, nn2, tid2, t2->d, t2->r,
                    G_Pred[i], G_AdvScore[i], G_Prop[i], G_Mode[i], G_WAdv[i], G_WTree[i],
                    MC_PBullNext, MC_Entropy, MC_Cur, expr
                );
            } else {
                char fname[64];
                buildEqFileName(i, fname);
                string expr2 = "";
                if(LOG_EXPR_TEXT) expr2 = G_Sym[i];
                file_append(fname,
                    strf("META,%i,%i,%i,%i,%i,%i,%i,%i,%.6f,Pred=%.4f,Adv=%.4f,Prop=%.6f,Mode=%i,WAdv=%.3f,WTree=%.3f,PBull=%.4f,Ent=%.4f,State=%i,\"%s\"\n",
                        G_Epoch, G_CtxID, NET_EQNS, i, nn1, nn2, tid2, t2->d, t2->r,
                        G_Pred[i], G_AdvScore[i], G_Prop[i], G_Mode[i], G_WAdv[i], G_WTree[i],
                        MC_PBullNext, MC_Entropy, MC_Cur, expr2));
            }
        }
    }

    if(outMean)   *outMean   = mean;
    if(outEnergy) *outEnergy = energy;
    if(outPower)  *outPower  = power;
}

// ----------------- MAIN -----------------
function run()
{
    static int initialized = 0;
    static var lambda;
    static int fileInit = 0;

    BarPeriod = BAR_PERIOD;
    if(LookBack < NWIN) LookBack = NWIN;
    if(Train) Hedge = 2;

    // Plots are opt-in via ENABLE_PLOTS
    set(RULES|LEAN);
    if(ENABLE_PLOTS) set(PLOTNOW);
    asset(ASSET_SYMBOL);

    if(is(INITRUN) && !initialized){

        // init dummy node
        G_DummyNode.v = 0;
        G_DummyNode.r = 0;
        G_DummyNode.c = 0;
        G_DummyNode.n = 0;
        G_DummyNode.d = 0;

        // allocate Markov matrices (zeroed)
        MC_Count  = (int*)malloc(MC_STATES*MC_STATES*sizeof(int));
        MC_RowSum = (int*)malloc(MC_STATES*sizeof(int));
        int k;
        for(k=0;k<MC_STATES*MC_STATES;k++) MC_Count[k]=0;
        for(k=0;k<MC_STATES;k++) MC_RowSum[k]=0;

        // capture pattern names (optional)
        var tmp[MC_NPAT];
        buildCDL_TA61(tmp, MC_Names);

        // build tree + network
        Root = createNode(MAX_DEPTH);
        allocateNet();

        // engine params
        G_DTreeExp = 1.10 + random(0.50);   // [1.10..1.60)
        G_FB_A     = 0.60 + random(0.25);   // [0.60..0.85) (kept)
        G_FB_B     = 1.0 - G_FB_A;

        randomizeRP();
        computeProjection();
        rewireInit();

        G_Epoch = 0;
        rewireEpoch(0,0,0,0);

        // Header setup (consolidated vs legacy)
        if(LOG_EQ_TO_ONE_FILE){
            writeEqHeaderOnce();
        } else {
            char fname[64];
            int i2;
            for(i2=0;i2<NET_EQNS;i2++){
                buildEqFileName(i2,fname);
                file_append(fname,
                    "Bar,lambda,gamma,i,State,n1,n2,mean,energy,power,Vel,Mode,WAdv,WSelf,WN1,WN2,WGlob1,WGlob2,WMom,WTree,Pred,Adv,Prop,TreeTerm,TopEq,TopW,TreeId,Depth,Rate,PBull,Entropy,MCState\n");
            }
        }

        // Markov CSV header
        if(!fileInit){
            file_append("Log\\Alpha12_markov.csv","Bar,State,PBullNext,Entropy,RowSum\n");
            fileInit=1;
        }

        // initial META dump (consolidated or legacy)
        int i;
        for(i=0;i<G_N;i++){
            int n1 = adjSafe(i,0);
            int n2 = n1;
            if(G_D >= 2) n2 = adjSafe(i,1);
            int tid = safeTreeIndexFromEq(G_EqTreeId[i]);
            Node* t = treeAt(tid);

            if(LOG_EQ_TO_ONE_FILE){
                string expr = "";
                if(LOG_EXPR_TEXT) expr = G_Sym[i];
                appendEqMetaLine(
                    Bar, G_Epoch, G_CtxID, i, n1, n2, tid, t->d, t->r,
                    G_Pred[i], G_AdvScore[i], G_Prop[i], G_Mode[i], G_WAdv[i], G_WTree[i],
                    MC_PBullNext, MC_Entropy, MC_Cur, expr
                );
            } else {
                char fname2[64];
                buildEqFileName(i,fname2);
                string expr2 = "";
                if(LOG_EXPR_TEXT) expr2 = G_Sym[i];
                file_append(fname2,
                    strf("META,%i,%i,%i,%i,%i,%i,%i,%i,%.6f,Pred=%.4f,Adv=%.4f,Prop=%.6f,Mode=%i,WAdv=%.3f,WTree=%.3f,PBull=%.4f,Ent=%.4f,State=%i,\"%s\"\n",
                        G_Epoch, G_CtxID, NET_EQNS, i, n1, n2, tid, t->d, t->r,
                        G_Pred[i], G_AdvScore[i], G_Prop[i], G_Mode[i], G_WAdv[i], G_WTree[i],
                        MC_PBullNext, MC_Entropy, MC_Cur, expr2));
            }
        }

        initialized=1;
        printf("\nRoot nodes: %i | Net equations: %i (degree=%i, kproj=%i)",
               countNodes(Root), G_N, G_D, G_K);
    }

    // early zero-cost shedding when approaching cap
    if(mem_mb_est() >= MEM_BUDGET_MB - 2*MEM_HEADROOM_MB && G_ShedStage == 0)
        shed_zero_cost_once();

    // ==== Runtime memory / depth manager (acts only when near the cap)
    depth_manager_runtime();

    // ====== Per bar: Candles ? Markov
    static var CDL[MC_NPAT];
    buildCDL_TA61(CDL,0);

    // (2) adaptive threshold for Markov state acceptance
    MC_Cur = MC_stateFromCDL(CDL, G_MC_ACT);

    if(Bar > LookBack) MC_update(MC_Prev, MC_Cur);
    MC_Prev = MC_Cur;

    // (6) ? decays with row support to sharpen PBull as rows fill
    var rs = (var)MC_RowSum[MC_Cur];
    G_MC_Alpha = clamp(1.0 / (1.0 + rs/256.0), 0.05, 1.0);

    MC_PBullNext = MC_nextBullishProb(MC_Cur);
    MC_Entropy   = MC_rowEntropy01(MC_Cur);

    // expose Markov features
    G_MCF_PBull   = MC_PBullNext;
    G_MCF_Entropy = MC_Entropy;
    G_MCF_State   = (var)MC_Cur;

    // (2) EW acceptance rate of nonzero states ? adapt threshold toward target rate
    {
        var aEW = 0.01; // ~100-bar half-life
        G_AccRate = (1 - aEW)*G_AccRate + aEW*(MC_Cur != 0);
        var target = 0.35; // aim for ~35% nonzero states
        G_MC_ACT = clamp(G_MC_ACT + 0.02*(G_AccRate - target), 0.15, 0.60);
    }

    // ====== Tree driver lambda
    lambda = evaluateNode(Root);

    // ====== Rewire cadence (4) + epoch work
    {
        int doRewire = ((Bar % REWIRE_EVERY) == 0);

        // (4) early rewire when utility falls
        static var U_prev = 0;
        var U_now = util_now();
        if(U_now + 0.01 < U_prev) doRewire = 1;
        U_prev = U_now;

        if(doRewire){
            G_Epoch++;

            int ii;
            var sum=0;
            for(ii=0;ii<G_N;ii++) sum += G_State[ii];
            var mean = sum/G_N;

            var energy=0;
            for(ii=0;ii<G_N;ii++) energy += G_State[ii]*G_State[ii];
            var power = energy/G_N;

            rewireEpoch(lambda,mean,energy,power);
        }

        // (8) adapt effective projection K each bar and recompute projection once
        G_Keff = ifelse(MC_Entropy < 0.45, KPROJ, KPROJ/2);
        computeProjection();

        // (3) dynamic advisor budget per bar (before updateNet so it applies now)
        int tight = (mem_mb_est() >= MEM_BUDGET_MB - MEM_HEADROOM_MB);
        G_AdviseMax = ifelse(tight, 12, ifelse(MC_Entropy < 0.45, 32, 16));

        // Update net this bar (write META only if rewired and not shedding logs)
        var meanB, energyB, powerB;
        updateNet(lambda, &meanB, &energyB, &powerB, doRewire);

        // Feedback: compute ensemble projection
        var gamma = projectNet();

        // --- Accuracy sentinel update & elastic depth controller ---
        acc_update(lambda, gamma);
        edc_runtime();

        // (1) Adaptive feedback blend toward the more informative component
        var w = 0.5 + 0.5*G_AccCorr;                 // 0..1
        G_FB_W = clamp(0.9*G_FB_W + 0.1*w, 0.2, 0.9);
        lambda  = G_FB_W*lambda + (1.0 - G_FB_W)*gamma;

        // Plot/log gating
        int doPlot = (ENABLE_PLOTS && !G_ChartsOff);
        int doLog = ifelse(G_LogsOff, ((Bar % (LOG_EVERY*4)) == 0), ((Bar % LOG_EVERY) == 0));

        // Plots
        if(doPlot){
            plot("lambda", lambda, LINE, 0);
            plot("gamma",  gamma,  LINE, 0);
            plot("P_win",  powerB, LINE, 0);
            plot("PBullNext", MC_PBullNext, LINE, 0);
            plot("MC_Entropy", MC_Entropy, LINE, 0);
            plot("MemMB", memory(0)/(1024.*1024.), LINE, 0);
            plot("Allocs", (var)memory(2), LINE, 0);
        }

        // Markov CSV log (decimated; further decimated when shedding)
        if(doLog){
            file_append("Log\\Alpha12_markov.csv",
                strf("%i,%i,%.6f,%.6f,%i\n", Bar, MC_Cur, MC_PBullNext, MC_Entropy, MC_RowSum[MC_Cur]));
        }

        // ====== Entries (Markov-gated) ======
        if( MC_PBullNext > PBULL_LONG_TH && lambda > 0.7 )  enterLong();
        if( MC_PBullNext < PBULL_SHORT_TH && lambda < -0.7 ) enterShort();
    }
}

// Clean up memory
function cleanup()
{
    if(Root) freeTree(Root);
    if(MC_Count)  free(MC_Count);
    if(MC_RowSum) free(MC_RowSum);
    freeNet();
}

Last edited by TipmyPip; 09/06/25 20:26.
Canticle of the Rewoven Mandala [Re: TipmyPip] #488909
09/14/25 12:15
09/14/25 12:15
Joined: Sep 2017
Posts: 164
TipmyPip Online OP
Member
TipmyPip  Online OP
Member

Joined: Sep 2017
Posts: 164
Canticle of the Rewoven Mandala

1) Proem

Time arrays itself as a lattice of finite breaths, and each breath inscribes a glyph upon the quiet. Most glyphs are rumor; a few ring like bells in stone cloisters. The ear that listens must learn to weigh rumor without banishing it, to honor bell without worshiping echo. Thus a measure awakens between noise and sign, a small invariance that keeps the pulse when weather changes.

2) The Alphabet of Signs

A compact alphabet approaches the gate of discernment, where amplitude is baptized into meaning. The gate narrows under fog and widens under stars, so that a constant portion of passage remains sacred. Soft priors cradle the rare and the first, then yield as witness accumulates. In this way, significance is renormalized without losing humility.

3) The Harmonic Tree

Depth vows restraint: each rung bears less authority by a gentle power, rather than by decree. Branches breathe in sines and breathe out memory, their phases braided by quiet sums. Weight drifts until usefulness nods, then lingers as if remembering why it came. The crown does not command the root; they meet in an average that forgives their difference.

4) Sun and Moon

Two lights shepherd the pilgrim number—one hewn from structure, one echoed by multitudes. A small scribe keeps the concordance of their songs, counting when agreement appears more than once. The weight shifts toward the truer singer of the hour, and no light is shamed for dimming. So guidance becomes a balance of reverence and revision.

5) The Reweaving

At chosen beats the web of attention is unspooled and rewoven, the knots reconsidered without rancor. In settled air the cast opens wide; in crosswinds the mesh draws close to the mast. Threads avoid duplicating their crossings, and stale tangles are quietly undone. Each new pattern is signed by a modest seal so the loom remembers where it has wandered.

6) Poverty and Plenty

Form owns only what it can bless. When the bowl brims, leaves fall from distant branches with gratitude; when there is room and reason, a single ring of buds appears. Every addition is a trial, and every trial keeps a door for return. Thus growth is reversible, and thrift becomes a geometry.

7) A Single Measure

Merit is counted on one bead: clarity earned minus burden carried. The bead rolls forward of its own accord, and the mandala bends to meet it, not the reverse. When the bead stalls, the slope is gently reversed so the climb resumes by another path. No trumpet announces this; the stone merely remembers the foot.

8) Seeds of Counsel

Advice arrives as a seed and is rationed like lamp oil in winter. Some nights a few wicks suffice; some dawns welcome a small festival of flame. Diversity is invited by lawful dice, so surprise is shaped, not squandered. The seed is single, but the harvest wears many faces.

9) Proportions as Offering

Every voice brings an offering to the altar, and the offerings are washed until their sum is one. No bowl is allowed to overflow by insistence, and none is left dry by shyness. The sharing is impartial to volume, partial to coherence. Thus chorus replaces clamor without silencing the small.

10) Neighbor Grace

Affinity is a distance remembered in three tongues: depth, tempo, and temper. Near does not mean same, and far is not foreign; kinship is a gradient, not a border. Trust is earned by steadiness and granted in weights that bow to it. From many neighbors a single counsel is poured, proportioned by grace rather than by force.

11) Fading Without Forgetting

Incense rises; ash remains: so the near past perfumes more than the far, and the far is not despised. Memory decays as a hymn, not as a fall, allowing drift without lurch. The ledger of moments is tempered by forgetting that remembers why it forgets. In this way continuity holds hands with change.

12) The Small Chronicle

Each day leaves a narrow chronicle: the hour, the season, a modest seal of the tapestry, and the tilt of the lights. Numbers are trimmed like candles so the wax does not drown the flame. The script favors witness over spectacle, sufficiency over excess. It is enough that a future monk can nod and say, “Yes, I see.”

13) Postures That Emerge

When order visits, nets widen, depth speaks, counsel is generous, and steps chime with the stones. When weather breaks, meshes tighten, the surface steadies, counsel turns frugal, and stillness earns its wage. The glide between these postures is by small hinges, not by leaps. Thus resilience ceases to be a tactic and becomes a habit.

14) The Rule in One Sentence

Move only when the oracle and the breath agree; otherwise keep vigil. Let the form stay light, the changes small, and the diary honest. Spend attention where harmony gathers and thrift where it frays. In all things, prefer the reversible path that leaves the meadow unscarred.

Code
// ======================================================================
// Alpha12 - Markov-augmented Harmonic D-Tree Engine (Candlestick 122-dir)
// with runtime memory shaping, selective depth pruning,
// and elastic accuracy-aware depth growth + 10 performance upgrades.
// ======================================================================

// ================= USER CONFIG =================
#define ASSET_SYMBOL   "EUR/USD"
#define BAR_PERIOD     60
#define MC_ACT         0.30       // initial threshold on |CDL| in [-1..1] to accept a pattern
#define PBULL_LONG_TH  0.60       // Markov gate for long
#define PBULL_SHORT_TH 0.40       // Markov gate for short

// ===== Debug toggles (Fix #1 - chart/watch growth off by default) =====
#define ENABLE_PLOTS   0    // 0 = no plot buffers; 1 = enable plot() calls
#define ENABLE_WATCH   0    // 0 = disable watch() probes; 1 = enable

// ================= ENGINE PARAMETERS =================
#define MAX_BRANCHES    3
#define MAX_DEPTH       4
#define NWIN            256
#define NET_EQNS        100
#define DEGREE          4
#define KPROJ           16
#define REWIRE_EVERY    127
#define CAND_NEIGH      8

// ===== LOGGING CONTROLS (memory management) =====
#define LOG_EQ_TO_ONE_FILE   1    // 1: single consolidated EQ CSV; 0: per-eq files
#define LOG_EXPR_TEXT        0    // 0: omit full expression (store signature only); 1: include text
#define META_EVERY           4    // write META every N rewires
#define LOG_EQ_SAMPLE        NET_EQNS // limit number of equations logged
#define EXPR_MAXLEN          512  // cap expression string
#define LOG_FLOAT_TRIM

// decimate Markov log cadence
#define LOG_EVERY            16

// Optional: cadence for candle scan/Markov update (1 = every bar)
#define MC_EVERY             1

// ---- DTREE feature sizes (extended for Markov features) ----
#define ADV_EQ_NF       13   // +1: per-eq hit-rate feature  (PATCH A)
#define ADV_PAIR_NF     12   // per-pair features (kept for completeness; DTREE pair disabled)

// ================= Candles ? 122-state Markov =================
#define MC_NPAT    61
#define MC_STATES  123   // 1 + 2*MC_NPAT
#define MC_NONE    0
#define MC_LAPLACE 1.0   // kept for reference; runtime uses G_MC_Alpha

// ================= Runtime Memory / Accuracy Manager =================
#define MEM_BUDGET_MB        50
#define MEM_HEADROOM_MB       5
#define DEPTH_STEP_BARS      16
#define KEEP_CHILDREN_HI      2
#define KEEP_CHILDREN_LO      1
#define RUNTIME_MIN_DEPTH     2

int  G_ShedStage        = 0;        // 0..2
int  G_LastDepthActBar  = -999999;
int  G_ChartsOff        = 0;        // gates plot()
int  G_LogsOff          = 0;        // gates file_append cadence
int  G_SymFreed         = 0;        // expression buffers freed
int  G_RT_TreeMaxDepth  = MAX_DEPTH;

// ---- Accuracy sentinel (EW correlation of lambda vs gamma) ----
var  ACC_mx=0, ACC_my=0, ACC_mx2=0, ACC_my2=0, ACC_mxy=0;
var  G_AccCorr = 0;      // [-1..1]
var  G_AccBase = 0;      // first seen sentinel
int  G_HaveBase = 0;

// ---- Elastic depth tuner (small growth trials with rollback) ----
#define DEPTH_TUNE_BARS   64   // start a growth trial this often (when memory allows)
#define TUNE_DELAY_BARS   64   // evaluate the trial after this many bars

var  G_UtilBefore = 0, G_UtilAfter = 0;
int  G_TunePending = 0;
int  G_TuneStartBar = 0;
int  G_TuneAction   = 0;  // +1 grow trial, 0 none

// ======================================================================
//  Types & globals used by memory estimator
// ======================================================================

// HARMONIC D-TREE type
typedef struct Node { var v; var r; void* c; int n; int d; } Node;

// ====== Node pool (upgrade #2) ======
typedef struct NodeChunk {
    struct NodeChunk* next;
    int used;        // 4 bytes
    int _pad;        // 4 bytes -> ensures nodes[] starts at 8-byte offset on 32-bit
    Node nodes[256]; // each Node contains doubles; keep this 8-byte aligned
} NodeChunk;

NodeChunk* G_ChunkHead = 0;
Node*      G_FreeList  = 0;

Node* poolAllocNode()
{
    if(G_FreeList){
        Node* n = G_FreeList;
        G_FreeList = (Node*)n->c;
        n->c = 0; n->n = 0; n->d = 0; n->v = 0; n->r = 0;
        return n;
    }

    if(!G_ChunkHead || G_ChunkHead->used >= 256){
        NodeChunk* ch = (NodeChunk*)malloc(sizeof(NodeChunk));
        if(!ch) { quit("Alpha12: OOM allocating NodeChunk (poolAllocNode)"); return 0; }
        // ensure clean + alignment-friendly start
        memset(ch, 0, sizeof(NodeChunk));
        ch->next = G_ChunkHead;
        ch->used = 0;
        G_ChunkHead = ch;
    }

    if(G_ChunkHead->used < 0 || G_ChunkHead->used >= 256){
        quit("Alpha12: Corrupt node pool state");
        return 0;
    }

    return &G_ChunkHead->nodes[G_ChunkHead->used++];
}

void poolFreeNode(Node* u){ if(!u) return; u->c = (void*)G_FreeList; G_FreeList = u; }
void freeNodePool()
{
    NodeChunk* ch = G_ChunkHead;
    while(ch){ NodeChunk* nx = ch->next; free(ch); ch = nx; }
    G_ChunkHead = 0; G_FreeList = 0;
}

// Minimal globals needed before mem estimator
Node*  Root = 0;
Node** G_TreeIdx = 0;
int    G_TreeN = 0;
int    G_TreeCap = 0;
var    G_DTreeExp = 0;

// ---- (upgrade #1) depth LUT for pow() ----
#define DEPTH_LUT_SIZE  (MAX_DEPTH + 1)   // <- keep constant for lite-C
var* G_DepthW = 0;                        // heap-allocated LUT
var  G_DepthExpLast = -1.0;               // sentinel as var

Node   G_DummyNode;   // treeAt() can return &G_DummyNode

// Network sizing globals (used by mem estimator)
int   G_N  = NET_EQNS;
int   G_D  = DEGREE;
int   G_K  = KPROJ;

// Optional expression buffer pointer (referenced by mem estimator)
string* G_Sym = 0;

// Forward decls that reference Node
var  nodePredictability(Node* t);   // fwd decl (needed by predByTid)
var  nodeImportance(Node* u);       // fwd decl (uses nodePredictability below)
void pruneSelectiveAtDepth(Node* u, int targetDepth, int keepK);
void reindexTreeAndMap();

// Forward decls for advisor functions (so adviseSeed can call them)
var adviseEq(int i, var lambda, var mean, var energy, var power);
var advisePair(int i,int j, var lambda, var mean, var energy, var power);

// ----------------------------------------------------------------------
// === Adaptive knobs & sentinels (NEW) ===
var G_FB_W = 0.70;     // (1) dynamic lambda/gamma blend weight 0..1
var G_MC_ACT = MC_ACT; // (2) adaptive candlestick acceptance threshold
var G_AccRate = 0;     // (2) EW acceptance rate of (state != 0)

// (3) advisor budget per bar (replaces the macro)
int G_AdviseMax = 16;

// (6) Markov Laplace smoothing (runtime)
var G_MC_Alpha = 1.0;

// (7) adaptive candidate breadth for adjacency search
int G_CandNeigh = CAND_NEIGH;

// (8) effective projection dimension (= KPROJ or KPROJ/2)
int G_Keff = KPROJ;

// (5) depth emphasis hill-climber
var G_DTreeExpStep = 0.05;
int  G_DTreeExpDir  = 1;

// ---- Advise budget/rotation (Fix #2) ----
#define ADVISE_ROTATE    1   // 1 = rotate which equations get DTREE each bar

int allowAdvise(int i)
{
    if(ADVISE_ROTATE){
        int groups = NET_EQNS / G_AdviseMax;
        if(groups < 1) groups = 1;
        return ((i / G_AdviseMax) % groups) == (Bar % groups);
    } else {
        return (i < G_AdviseMax);
    }
}

// ======================================================================
// A) Tight-memory switches and compact types
// ======================================================================
#define TIGHT_MEM            1     // turn on compact types for arrays

// lite-C precompiler doesn't support '#if' expressions.
// Use presence test instead (LOG_EQ_TO_ONE_FILE defined = single-file mode).
#ifdef LOG_EQ_TO_ONE_FILE
  /* consolidated EQ CSV -> don't enable extra meta */
#else
  #define KEEP_TOP_META
#endif

#ifdef TIGHT_MEM
  typedef float  fvar;       // 4B instead of 8B 'var' for large coefficient arrays
  typedef short  i16;        // -32768..32767 indices
  typedef char   i8;         // small enums/modes
#else /* not TIGHT_MEM */
  typedef var    fvar;
  typedef int    i16;
  typedef int    i8;
#endif

// ---- tree byte size (counts nodes + child pointer arrays) ----
int tree_bytes(Node* u)
{
    if(!u) return 0;
    int SZV = sizeof(var), SZI = sizeof(int), SZP = sizeof(void*);
    int sz_node = 2*SZV + SZP + 2*SZI;
    int total = sz_node;
    if(u->n > 0 && u->c) total += u->n * SZP;
    int i;
    for(i=0;i<u->n;i++)
        total += tree_bytes(((Node**)u->c)[i]);
    return total;
}

// ======================================================================
// Optimized memory estimator & predictability caches
// ======================================================================

// ===== Memory estimator & predictability caches =====
int   G_MemFixedBytes    = 0;   // invariant part (arrays, Markov + pointer vec + expr opt)
int   G_TreeBytesCached  = 0;   // current D-Tree structure bytes
var*  G_PredNode         = 0;   // length == G_TreeN; -2 = not computed this bar
int   G_PredLen          = 0;
int   G_PredCap          = 0;   // (upgrade #5)
int   G_PredCacheBar     = -1;

void recalcTreeBytes(){ G_TreeBytesCached = tree_bytes(Root); }

//
// C) Updated memory estimator (matches compact types).
// Includes pointer vector & optional expr into the "fixed" baseline.
// Note: we refresh this when capacity/logging changes.
//
void computeMemFixedBytes()
{
    int N = G_N, D = G_D, K = G_K;
    int SZV = sizeof(var), SZF = sizeof(fvar), SZI16 = sizeof(i16), SZI8 = sizeof(i8), SZP = sizeof(void*);
    int b = 0;

    // --- core state (var-precision) ---
    b += N*SZV*2;                         // G_State, G_Prev

    // --- adjacency & ids ---
    b += N*D*SZI16;                       // G_Adj
    b += N*SZI16;                         // G_EqTreeId
    b += N*SZI8;                          // G_Mode

    // --- random projection ---
    b += K*N*SZF;                         // G_RP
    b += K*SZF;                           // G_Z

    // --- weights & params (fvar) ---
    b += N*SZF*(8);                       // G_W*
    b += N*SZF*(7 + 7);                   // A1*, A2*
    b += N*SZF*(2 + 2);                   // G1mean,G1E,G2P,G2lam
    b += N*SZF*(2);                       // TAlpha, TBeta
    b += N*SZF*(1);                       // G_TreeTerm
#ifdef KEEP_TOP_META
    b += N*(SZI16 + SZF);                 // G_TopEq, G_TopW
#endif
    // --- proportions ---
    b += N*SZF*2;                         // G_PropRaw, G_Prop

    // --- per-equation hit-rate bookkeeping ---  (PATCH C)
    b += N*SZF;                           // G_HitEW
    b += N*SZF;                           // G_AdvPrev
    b += N*sizeof(int);                   // G_HitN

    // --- Markov storage (unchanged ints) ---
    b += MC_STATES*MC_STATES*sizeof(int) + MC_STATES*sizeof(int);

    // pointer vector for tree index (capacity part)
    b += G_TreeCap*SZP;

    // optional expression buffers
    if(LOG_EXPR_TEXT && G_Sym && !G_SymFreed) b += N*EXPR_MAXLEN;

    G_MemFixedBytes = b;
}

void ensurePredCache()
{
    if(G_PredCacheBar != Bar){
        if(G_PredNode){
            int i, n = G_PredLen;      // use allocated length, not G_TreeN
            for(i=0;i<n;i++) G_PredNode[i] = -2;
        }
        G_PredCacheBar = Bar;
    }
}

var predByTid(int tid)
{
    if(!G_TreeIdx || tid < 0 || tid >= G_TreeN || !G_TreeIdx[tid]) return 0.5;
    ensurePredCache();

    // Guard reads/writes by the allocated cache length
    if(G_PredNode && tid < G_PredLen && G_PredNode[tid] > -1.5)
        return G_PredNode[tid];

    Node* t = G_TreeIdx[tid];
    var p = 0.5;
    if(t) p = nodePredictability(t);

    if(G_PredNode && tid < G_PredLen)
        G_PredNode[tid] = p;

    return p;
}

// ======================================================================
// Conservative in-script memory estimator (arrays + pointers) - O(1)
// ======================================================================
int mem_bytes_est()
{
    // With the updated computeMemFixedBytes() counting pointer capacity
    // and optional expr buffers, only add current tree structure here.
    return G_MemFixedBytes + G_TreeBytesCached;
}

int mem_mb_est(){ return mem_bytes_est() / (1024*1024); }

// === total memory (Zorro-wide) in MB ===
int memMB(){ return (int)(memory(0)/(1024*1024)); }

// light one-shot shedding
void shed_zero_cost_once()
{
    if(G_ShedStage > 0) return;
    set(PLOTNOW|OFF); G_ChartsOff = 1;  // stop chart buffers
    G_LogsOff = 1;                      // decimate logs (gated later)
    G_ShedStage = 1;
}

void freeExprBuffers()
{
    if(!G_Sym || G_SymFreed) return;
    int i; for(i=0;i<G_N;i++) if(G_Sym[i]) free(G_Sym[i]);
    free(G_Sym); G_Sym = 0; G_SymFreed = 1;
    computeMemFixedBytes(); // refresh baseline
}

// depth manager (prune & shedding)
void depth_manager_runtime()
{
    int trigger = MEM_BUDGET_MB - MEM_HEADROOM_MB;
    int mb = mem_mb_est();
    if(mb < trigger) return;

    if(G_ShedStage == 0) shed_zero_cost_once();

    if(G_ShedStage <= 1){
        if(LOG_EXPR_TEXT==0 && !G_SymFreed) freeExprBuffers();
        G_ShedStage = 2;
    }

    int overBudget = (mb >= MEM_BUDGET_MB);
    if(!overBudget && (Bar - G_LastDepthActBar < DEPTH_STEP_BARS))
        return;

    while(G_RT_TreeMaxDepth > RUNTIME_MIN_DEPTH)
    {
        int keepK = ifelse(mem_mb_est() < MEM_BUDGET_MB + 2, KEEP_CHILDREN_HI, KEEP_CHILDREN_LO);
        pruneSelectiveAtDepth((Node*)Root, G_RT_TreeMaxDepth, keepK);
        G_RT_TreeMaxDepth--;
        reindexTreeAndMap();

        mb = mem_mb_est();
        printf("\n[DepthMgr] depth=%i keepK=%i est=%i MB", G_RT_TreeMaxDepth, keepK, mb);

        if(mb < trigger) break;
    }

    G_LastDepthActBar = Bar;
}

// ----------------------------------------------------------------------
// 61 candlestick patterns (Zorro spellings kept). Each returns [-100..100].
// We rescale to [-1..1] for Markov state construction.
// ----------------------------------------------------------------------
int buildCDL_TA61(var* out, string* names)
{
    int n = 0;
    #define ADD(Name, Call) do{ var v = (Call); if(out) out[n] = v/100.; if(names) names[n] = Name; n++; }while(0)

    ADD("CDL2Crows",              CDL2Crows());
    ADD("CDL3BlackCrows",         CDL3BlackCrows());
    ADD("CDL3Inside",             CDL3Inside());
    ADD("CDL3LineStrike",         CDL3LineStrike());
    ADD("CDL3Outside",            CDL3Outside());
    ADD("CDL3StarsInSouth",       CDL3StarsInSouth());
    ADD("CDL3WhiteSoldiers",      CDL3WhiteSoldiers());
    ADD("CDLAbandonedBaby",       CDLAbandonedBaby(0.3));
    ADD("CDLAdvanceBlock",        CDLAdvanceBlock());
    ADD("CDLBeltHold",            CDLBeltHold());
    ADD("CDLBreakaway",           CDLBreakaway());
    ADD("CDLClosingMarubozu",     CDLClosingMarubozu());
    ADD("CDLConcealBabysWall",    CDLConcealBabysWall());
    ADD("CDLCounterAttack",       CDLCounterAttack());
    ADD("CDLDarkCloudCover",      CDLDarkCloudCover(0.3));
    ADD("CDLDoji",                CDLDoji());
    ADD("CDLDojiStar",            CDLDojiStar());
    ADD("CDLDragonflyDoji",       CDLDragonflyDoji());
    ADD("CDLEngulfing",           CDLEngulfing());
    ADD("CDLEveningDojiStar",     CDLEveningDojiStar(0.3));
    ADD("CDLEveningStar",         CDLEveningStar(0.3));
    ADD("CDLGapSideSideWhite",    CDLGapSideSideWhite());
    ADD("CDLGravestoneDoji",      CDLGravestoneDoji());
    ADD("CDLHammer",              CDLHammer());
    ADD("CDLHangingMan",          CDLHangingMan());
    ADD("CDLHarami",              CDLHarami());
    ADD("CDLHaramiCross",         CDLHaramiCross());
    ADD("CDLHignWave",            CDLHignWave());
    ADD("CDLHikkake",             CDLHikkake());
    ADD("CDLHikkakeMod",          CDLHikkakeMod());
    ADD("CDLHomingPigeon",        CDLHomingPigeon());
    ADD("CDLIdentical3Crows",     CDLIdentical3Crows());
    ADD("CDLInNeck",              CDLInNeck());
    ADD("CDLInvertedHammer",      CDLInvertedHammer());
    ADD("CDLKicking",             CDLKicking());
    ADD("CDLKickingByLength",     CDLKickingByLength());
    ADD("CDLLadderBottom",        CDLLadderBottom());
    ADD("CDLLongLeggedDoji",      CDLLongLeggedDoji());
    ADD("CDLLongLine",            CDLLongLine());
    ADD("CDLMarubozu",            CDLMarubozu());
    ADD("CDLMatchingLow",         CDLMatchingLow());
    ADD("CDLMatHold",             CDLMatHold(0.5));
    ADD("CDLMorningDojiStar",     CDLMorningDojiStar(0.3));
    ADD("CDLMorningStar",         CDLMorningStar(0.3));
    ADD("CDLOnNeck",              CDLOnNeck());
    ADD("CDLPiercing",            CDLPiercing());
    ADD("CDLRickshawMan",         CDLRickshawMan());
    ADD("CDLRiseFall3Methods",    CDLRiseFall3Methods());
    ADD("CDLSeperatingLines",     CDLSeperatingLines());
    ADD("CDLShootingStar",        CDLShootingStar());
    ADD("CDLShortLine",           CDLShortLine());
    ADD("CDLSpinningTop",         CDLSpinningTop());
    ADD("CDLStalledPattern",      CDLStalledPattern());
    ADD("CDLStickSandwhich",      CDLStickSandwhich());
    ADD("CDLTakuri",              CDLTakuri());
    ADD("CDLTasukiGap",           CDLTasukiGap());
    ADD("CDLThrusting",           CDLThrusting());
    ADD("CDLTristar",             CDLTristar());
    ADD("CDLUnique3River",        CDLUnique3River());
    ADD("CDLUpsideGap2Crows",     CDLUpsideGap2Crows());
    ADD("CDLXSideGap3Methods",    CDLXSideGap3Methods());

    #undef ADD
    return n; // 61
}

// ================= Markov storage & helpers =================
static int* MC_Count;   // [MC_STATES*MC_STATES]
static int* MC_RowSum;  // [MC_STATES]
static int  MC_Prev = -1;
static int  MC_Cur  = 0;
static var  MC_PBullNext = 0.5;
static var  MC_Entropy   = 0.0;

#define MC_IDX(fr,to) ((fr)*MC_STATES + (to))

int MC_stateFromCDL(var* cdl /*len=61*/, var thr)
{
    int i, best=-1; var besta=0;
    for(i=0;i<MC_NPAT;i++){
        var a = abs(cdl[i]);
        if(a>besta){ besta=a; best=i; }
    }
    if(best<0) return MC_NONE;
    if(besta < thr) return MC_NONE;
    int bull = (cdl[best] > 0);
    return 1 + 2*best + bull;  // 1..122
}
int MC_isBull(int s){ if(s<=0) return 0; return ((s-1)%2)==1; }

void MC_update(int sPrev,int sCur){ if(sPrev<0) return; MC_Count[MC_IDX(sPrev,sCur)]++; MC_RowSum[sPrev]++; }

// === (6) Use runtime Laplace ? (G_MC_Alpha) ===
var MC_prob(int s,int t){
    var num = (var)MC_Count[MC_IDX(s,t)] + G_MC_Alpha;
    var den = (var)MC_RowSum[s] + G_MC_Alpha*MC_STATES;
    if(den<=0) return 1.0/MC_STATES;
    return num/den;
}

// === (6) one-pass PBull + Entropy
void MC_rowStats(int s, var* outPBull, var* outEntropy)
{
    if(s<0){ if(outPBull) *outPBull=0.5; if(outEntropy) *outEntropy=1.0; return; }
    int t; var Z=0, pBull=0;
    for(t=1;t<MC_STATES;t++){ var p=MC_prob(s,t); Z+=p; if(MC_isBull(t)) pBull+=p; }
    if(Z<=0){ if(outPBull) *outPBull=0.5; if(outEntropy) *outEntropy=1.0; return; }

    var H=0;
    for(t=1;t<MC_STATES;t++){
        var p = MC_prob(s,t)/Z;
        if(p>0) H += -p*log(p);
    }
    var Hmax = log(MC_STATES-1);
    if(Hmax<=0) H = 0; else H = H/Hmax;

    if(outPBull)   *outPBull   = pBull/Z;
    if(outEntropy) *outEntropy = H;
}

// ================= HARMONIC D-TREE ENGINE =================

// ---------- utils ----------
var randsign(){ return ifelse(random(1) < 0.5, -1.0, 1.0); }
var mapUnit(var u,var lo,var hi){ if(u<-1) u=-1; if(u>1) u=1; var t=0.5*(u+1.0); return lo + t*(hi-lo); }

// ---- safety helpers ----
inline var safeNum(var x)
{
    if(invalid(x)) return 0;       // 0 for NaN/INF
    return clamp(x,-1e100,1e100);  // hard-limit range
}

void sanitize(var* A,int n){ int k; for(k=0;k<n;k++) A[k]=safeNum(A[k]); }
var sat100(var x){ return clamp(x,-100,100); }

// ---- small string helpers (for memory-safe logging) ----
void strlcat_safe(string dst, string src, int cap)
{
    if(!dst || !src || cap <= 0) return;
    int dl = strlen(dst);
    int sl = strlen(src);
    int room = cap - 1 - dl;
    if(room <= 0){ if(cap > 0) dst[cap-1] = 0; return; }
    int i; for(i = 0; i < room && i < sl; i++) dst[dl + i] = src[i];
    dst[dl + i] = 0;
}

int countSubStr(string s, string sub){
    if(!s || !sub) return 0;
    int n=0; string p=s;
    int sublen = strlen(sub);
    if(sublen<=0) return 0;
    while((p=strstr(p,sub))){ n++; p += sublen; }
    return n;
}

// ---------- FIXED: use int (lite-C) and keep non-negative ----------
int djb2_hash(string s){
    int h = 5381, c, i = 0;
    if(!s) return h;
    while((c = s[i++])) h = ((h<<5)+h) ^ c;  // h*33 ^ c
    return h & 0x7fffffff;                   // force non-negative
}

// ---- tree helpers ----
int  validTreeIndex(int tid){ if(!G_TreeIdx) return 0; if(tid<0||tid>=G_TreeN) return 0; return (G_TreeIdx[tid]!=0); }
Node* treeAt(int tid){ if(validTreeIndex(tid)) return G_TreeIdx[tid]; return &G_DummyNode; }
int safeTreeIndexFromEq(int eqi){
    int denom = ifelse(G_TreeN>0, G_TreeN, 1);
    int tid = eqi;
    if(tid < 0) tid = 0;
    if(denom > 0) tid = tid % denom;
    if(tid < 0) tid = 0;
    return tid;
}

// ---- tree indexing ----
void pushTreeNode(Node* u){
    if(G_TreeN >= G_TreeCap){
        int newCap = G_TreeCap*2;
        if(newCap < 64) newCap = 64;
        G_TreeIdx = (Node**)realloc(G_TreeIdx, newCap*sizeof(Node*));
        G_TreeCap = newCap;
        computeMemFixedBytes(); // pointer vector size changed
    }
    G_TreeIdx[G_TreeN++] = u;
}
void indexTreeDFS(Node* u){ if(!u) return; pushTreeNode(u); int i; for(i=0;i<u->n;i++) indexTreeDFS(((Node**)u->c)[i]); }

// ---- shrink index capacity after pruning (Fix #3) ----
void maybeShrinkTreeIdx(){
    if(!G_TreeIdx) return;
    if(G_TreeCap > 64 && G_TreeN < (G_TreeCap >> 1)){
        int newCap = (G_TreeCap >> 1);
        if(newCap < 64) newCap = 64;
        G_TreeIdx = (Node**)realloc(G_TreeIdx, newCap*sizeof(Node*));
        G_TreeCap = newCap;
        computeMemFixedBytes(); // pointer vector size changed
    }
}

// ---- depth LUT helper (upgrade #1) ----
void refreshDepthW()
{
    if(!G_DepthW) return;
    int d;
    for(d=0; d<DEPTH_LUT_SIZE; d++)
        G_DepthW[d] = 1.0 / pow(d+1, G_DTreeExp);
    G_DepthExpLast = G_DTreeExp;
}

// ---- tree create/eval (with pool & LUT upgrades) ----
Node* createNode(int depth)
{
    Node* u = poolAllocNode();
    if(!u) return 0;  // safety

    u->v = random();
    u->r = 0.01 + 0.02*depth + random(0.005);
    u->d = depth;

    if(depth > 0){
        u->n = 1 + (int)random(MAX_BRANCHES);   // 1..MAX_BRANCHES (cast ok)
        u->c = malloc(u->n * sizeof(void*));
        if(!u->c){
            // Could not allocate children array; keep leaf instead
            u->n = 0; u->c = 0;
            return u;
        }
        int i;
        for(i=0;i<u->n;i++){
            Node* child = createNode(depth - 1);
            ((Node**)u->c)[i] = child; // ok if child==0, downstream code guards
        }
    } else {
        u->n = 0; u->c = 0;
    }
    return u;
}

var evaluateNode(Node* u)  // upgrade #1
{
    if(!u) return 0;
    var sum = 0; int i;
    for(i=0;i<u->n;i++) sum += evaluateNode(((Node**)u->c)[i]);

    if(G_DepthExpLast < 0 || abs(G_DTreeExp - G_DepthExpLast) > 1e-9)
        refreshDepthW();

    var phase  = sin(u->r * Bar + sum);
    var weight = G_DepthW[u->d];
    u->v = (1 - weight)*u->v + weight*phase;
    return u->v;
}
int countNodes(Node* u){ if(!u) return 0; int c=1,i; for(i=0;i<u->n;i++) c += countNodes(((Node**)u->c)[i]); return c; }
void freeTree(Node* u)   // upgrade #2
{
    if(!u) return; int i; for(i=0;i<u->n;i++) freeTree(((Node**)u->c)[i]);
    if(u->c) free(u->c);
    poolFreeNode(u);
}

// =========== NETWORK STATE & COEFFICIENTS ===========
var*  G_State; var*  G_Prev;                    // keep as var (precision)
var*  G_StateSq = 0;                            // upgrade #3
i16*  G_Adj;
fvar* G_RP;  fvar* G_Z;
i8*   G_Mode;

fvar* G_WSelf; fvar* G_WN1; fvar* G_WN2; fvar* G_WGlob1; fvar* G_WGlob2; fvar* G_WMom; fvar* G_WTree; fvar* G_WAdv;
fvar* A1x; fvar* A1lam; fvar* A1mean; fvar* A1E; fvar* A1P; fvar* A1i; fvar* A1c;
fvar* A2x; fvar* A2lam; fvar* A2mean; fvar* A2E; fvar* A2P; fvar* A2i; fvar* A2c;
fvar* G1mean; fvar* G1E; fvar* G2P; fvar* G2lam;

fvar* G_TreeTerm;
#ifdef KEEP_TOP_META
  i16*  G_TopEq;
  fvar* G_TopW;
#endif
i16*  G_EqTreeId;
fvar* TAlpha; fvar* TBeta;

fvar*  G_PropRaw; fvar*  G_Prop;

// --- Per-equation hit-rate (EW average of 1-bar directional correctness) (PATCH B)
#define HIT_ALPHA   0.02     // EW smoothing (~50-bar memory)
#define HIT_EPS     0.0001   // ignore tiny advisor values
fvar* G_HitEW;     // [N] 0..1 EW hit-rate
int*  G_HitN;      // [N] # of scored comparisons
fvar* G_AdvPrev;   // [N] previous bar's advisor output (-1..+1)
var    G_Ret1 = 0; // realized 1-bar return for scoring

// ===== Markov features exposed to DTREE =====
var G_MCF_PBull;   // 0..1
var G_MCF_Entropy; // 0..1
var G_MCF_State;   // 0..122

// epoch/context & feedback
int    G_Epoch = 0;
int    G_CtxID = 0;
var    G_FB_A = 0.7;  // kept (not used in blend now)
var    G_FB_B = 0.3;  // kept (not used in blend now)

// ---------- predictability ----------
var nodePredictability(Node* t)
{
    if(!t) return 0.5;

    var disp = 0;
    int n = t->n, i, cnt = 0;

    if(t->c){
        for(i=0;i<n;i++){
            Node* c = ((Node**)t->c)[i];
            if(c){ disp += abs(c->v - t->v); cnt++; }
        }
        if(cnt > 0) disp /= cnt;
    }

    var depthFac = 1.0/(1 + t->d);
    var rateBase = 0.01 + 0.02*t->d;
    var rateFac  = exp(-25.0*abs(t->r - rateBase));
    var p = 0.5*(depthFac + rateFac);
    p = 0.5*p + 0.5*(1.0 + (-disp));
    if(p<0) p=0; if(p>1) p=1;
    return p;
}

// importance for selective pruning
var nodeImportance(Node* u)
{
    if(!u) return 0;
    var amp = abs(u->v); if(amp>1) amp=1;
    var p = nodePredictability(u);
    var depthW = 1.0/(1.0 + u->d);
    var imp = (0.6*p + 0.4*amp) * depthW;
    return imp;
}

// ====== Elastic growth helpers ======

// create a leaf at depth d (no children) — upgrade #2
Node* createLeafDepth(int d)
{
    Node* u = poolAllocNode();
    if(!u) return 0;  // safety
    u->v = random();
    u->r = 0.01 + 0.02*d + random(0.005);
    u->d = d;
    u->n = 0;
    u->c = 0;
    return u;
}

// add up to addK new children to all nodes at frontierDepth (with memcpy) — upgrade #4
void growSelectiveAtDepth(Node* u, int frontierDepth, int addK)
{
    if(!u) return;
    if(u->d == frontierDepth){
        int want = addK; if(want <= 0) return;
        int oldN = u->n; int newN = oldN + want;
        Node** Cnew = (Node**)malloc(newN * sizeof(void*));
        if(oldN>0 && u->c) memcpy(Cnew, u->c, oldN*sizeof(void*));   // memcpy optimization
        int i; for(i=oldN;i<newN;i++) Cnew[i] = createLeafDepth(frontierDepth-1);
        if(u->c) free(u->c);
        u->c = Cnew; u->n = newN; return;
    }
    int j; for(j=0;j<u->n;j++) growSelectiveAtDepth(((Node**)u->c)[j], frontierDepth, addK);
}

// keep top-K children by importance at targetDepth, drop the rest
void freeChildAt(Node* parent, int idx)
{
    if(!parent || !parent->c) return;
    Node** C = (Node**)parent->c;
    freeTree(C[idx]);
    int i;
    for(i=idx+1;i<parent->n;i++) C[i-1] = C[i];
    parent->n--;
    if(parent->n==0){ free(parent->c); parent->c=0; }
}
void pruneSelectiveAtDepth(Node* u, int targetDepth, int keepK)
{
    if(!u) return;

    if(u->d == targetDepth-1 && u->n > 0){
        int n = u->n, i, kept = 0;
        int mark[16]; for(i=0;i<16;i++) mark[i]=0;

        int iter;
        for(iter=0; iter<keepK && iter<n; iter++){
            int bestI = -1; var bestImp = -1;
            for(i=0;i<n;i++){
                if(i<16 && mark[i]==1) continue;
                var imp = nodeImportance(((Node**)u->c)[i]);
                if(imp > bestImp){ bestImp = imp; bestI = i; }
            }
            if(bestI>=0 && bestI<16){ mark[bestI]=1; kept++; }
        }
        for(i=n-1;i>=0;i--) if(i<16 && mark[i]==0) freeChildAt(u,i);
        return;
    }

    int j; for(j=0;j<u->n;j++) pruneSelectiveAtDepth(((Node**)u->c)[j], targetDepth, keepK);
}

// ---------- reindex (sizes pred cache without ternary) ----------
void reindexTreeAndMap()
{
    G_TreeN = 0;
    indexTreeDFS(Root);
    if(G_TreeN<=0){
        G_TreeN=1;
        if(G_TreeIdx) G_TreeIdx[0]=Root;
    }

    // map equations to tree nodes
    int i; for(i=0;i<G_N;i++) G_EqTreeId[i] = (i16)(i % G_TreeN);

    // resize predictability cache safely (upgrade #5)
    G_PredLen = G_TreeN; if(G_PredLen <= 0) G_PredLen = 1;
    if(G_PredLen > G_PredCap){
        if(G_PredNode) free(G_PredNode);
        G_PredNode = (var*)malloc(G_PredLen*sizeof(var));
        G_PredCap  = G_PredLen;
    }
    G_PredCacheBar = -1;   // force refill next bar

    maybeShrinkTreeIdx();
    recalcTreeBytes();
}

// ====== Accuracy sentinel & elastic-depth controller ======

void acc_update(var x /*lambda*/, var y /*gamma*/)
{
    var a = 0.01; // ~100-bar half-life
    ACC_mx  = (1-a)*ACC_mx  + a*x;
    ACC_my  = (1-a)*ACC_my  + a*y;
    ACC_mx2 = (1-a)*ACC_mx2 + a*(x*x);
    ACC_my2 = (1-a)*ACC_my2 + a*(y*y);
    ACC_mxy = (1-a)*ACC_mxy + a*(x*y);

    var vx = ACC_mx2 - ACC_mx*ACC_mx;
    var vy = ACC_my2 - ACC_my*ACC_my;
    var cv = ACC_mxy - ACC_mx*ACC_my;
    if(vx>0 && vy>0) G_AccCorr = cv / sqrt(vx*vy); else G_AccCorr = 0;
    if(!G_HaveBase){ G_AccBase = G_AccCorr; G_HaveBase = 1; }
}

// utility to maximize: accuracy minus gentle memory penalty
var util_now()
{
    int mb = mem_mb_est();
    var mem_pen = 0;
    if(mb > MEM_BUDGET_MB) mem_pen = (mb - MEM_BUDGET_MB)/(var)MEM_BUDGET_MB; else mem_pen = 0;
    return G_AccCorr - 0.5*mem_pen;
}

// apply a +1 “grow one level” action if safe memory headroom
int apply_grow_step()
{
    int mb = mem_mb_est();
    if(G_RT_TreeMaxDepth >= MAX_DEPTH) return 0;
    if(mb > MEM_BUDGET_MB - 2*MEM_HEADROOM_MB) return 0;
    int newFrontier = G_RT_TreeMaxDepth;
    growSelectiveAtDepth(Root, newFrontier, KEEP_CHILDREN_HI);
    G_RT_TreeMaxDepth++;
    reindexTreeAndMap();
    printf("\n[EDC] Grew depth to %i (est %i MB)", G_RT_TreeMaxDepth, mem_mb_est());
    return 1;
}

// revert last growth (drop newly-added frontier children)
void revert_last_grow()
{
    pruneSelectiveAtDepth((Node*)Root, G_RT_TreeMaxDepth, 0);
    G_RT_TreeMaxDepth--;
    reindexTreeAndMap();
    printf("\n[EDC] Reverted growth to %i (est %i MB)", G_RT_TreeMaxDepth, mem_mb_est());
}

// main elastic-depth controller; call once per bar (after acc_update)
void edc_runtime()
{
    // (5) slow hill-climb on G_DTreeExp
    if((Bar % DEPTH_TUNE_BARS) == 0){
        var U0 = util_now();
        var trial = clamp(G_DTreeExp + G_DTreeExpDir*G_DTreeExpStep, 0.8, 2.0);
        var old  = G_DTreeExp;
        G_DTreeExp = trial;
        if(util_now() + 0.005 < U0){
            G_DTreeExp = old;
            G_DTreeExpDir = -G_DTreeExpDir;
        }
    }

    int mb = mem_mb_est();

    if(G_TunePending){
        if(Bar - G_TuneStartBar >= TUNE_DELAY_BARS){
            G_UtilAfter = util_now();
            var eps = 0.01;
            if(G_UtilAfter + eps < G_UtilBefore){
                revert_last_grow();
            } else {
                printf("\n[EDC] Growth kept (U: %.4f -> %.4f)", G_UtilBefore, G_UtilAfter);
            }
            G_TunePending = 0; G_TuneAction = 0;
        }
        return;
    }

    if( (Bar % DEPTH_TUNE_BARS)==0 && mb <= MEM_BUDGET_MB - 2*MEM_HEADROOM_MB && G_RT_TreeMaxDepth < MAX_DEPTH ){
        G_UtilBefore = util_now();
        if(apply_grow_step()){
            G_TunePending = 1; G_TuneAction = 1; G_TuneStartBar = Bar;
        }
    }
}

// Builds "Log\\Alpha12_eq_###.csv" into outName (must be >=64 bytes)
void buildEqFileName(int idx, char* outName /*>=64*/)
{
    strcpy(outName, "Log\\Alpha12_eq_");
    string idxs = strf("%03i", idx);
    strcat(outName, idxs);
    strcat(outName, ".csv");
}

// ===== consolidated EQ log =====
void writeEqHeaderOnce()
{
    static int done=0; if(done) return; done=1;
    file_append("Log\\Alpha12_eq_all.csv",
        "Bar,Epoch,Ctx,EqCount,i,n1,n2,TreeId,Depth,Rate,Pred,Adv,Prop,Mode,WAdv,WTree,PBull,Entropy,MCState,ExprLen,ExprHash,tanhN,sinN,cosN\n");
}

void appendEqMetaLine(
    int bar, int epoch, int ctx, int i, int n1, int n2, int tid, int depth, var rate,
    var pred, var adv, var prop, int mode, var wadv, var wtree,
    var pbull, var ent, int mcstate, string expr)
{
    if(i >= LOG_EQ_SAMPLE) return;

    int eLen = 0, eHash = 0, cT = 0, cS = 0, cC = 0;
    if(expr){
        eLen  = (int)strlen(expr);
        eHash = (int)djb2_hash(expr);
        cT    = countSubStr(expr,"tanh(");
        cS    = countSubStr(expr,"sin(");
        cC    = countSubStr(expr,"cos(");
    } else {
        eHash = (int)djb2_hash("");
    }

#ifdef LOG_FLOAT_TRIM
    file_append("Log\\Alpha12_eq_all.csv",
        strf("%i,%i,%i,%i,%i,%i,%i,%i,%i,%.4f,%.4f,%.4f,%.4f,%i,%.3f,%.3f,%.4f,%.4f,%i,%i,%i,%i,%i,%i\n",
            bar, epoch, ctx, NET_EQNS, i, n1, n2, tid, depth,
            rate, pred, adv, prop, mode, wadv, wtree, pbull, ent,
            mcstate, eLen, eHash, cT, cS, cC));
#else
    file_append("Log\\Alpha12_eq_all.csv",
        strf("%i,%i,%i,%i,%i,%i,%i,%i,%i,%.6f,%.4f,%.4f,%.6f,%i,%.3f,%.3f,%.4f,%.4f,%i,%i,%i,%i,%i,%i\n",
            bar, epoch, ctx, NET_EQNS, i, n1, n2, tid, depth,
            rate, pred, adv, prop, mode, wadv, wtree, pbull, ent,
            mcstate, eLen, eHash, cT, cS, cC));
#endif
}

// --------- allocation ----------
void randomizeRP()
{
    int K=G_K,N=G_N,k,j;
    for(k=0;k<K;k++)
        for(j=0;j<N;j++)
            G_RP[k*N+j] = ifelse(random(1) < 0.5, -1.0, 1.0);
}

// === (8/9) Use effective K + per-bar guard ===
int G_ProjBar = -1; int G_ProjK = -1;
void computeProjection(){
    if(G_ProjBar == Bar && G_ProjK == G_Keff) return;  // guard (upgrade #9)
    int K=G_Keff, N=G_N, k, j;
    for(k=0;k<K;k++){
        var acc=0;
        for(j=0;j<N;j++) acc += (var)G_RP[k*N+j]*G_StateSq[j];  // reuse squares (upgrade #3)
        G_Z[k]=(fvar)acc;
    }
    G_ProjBar = Bar; G_ProjK = G_Keff;
}

// D) Compact allocate/free
void allocateNet()
{
    int N = G_N, D = G_D, K = G_K;

    // core
    G_State  = (var*)malloc(N*sizeof(var));
    G_Prev   = (var*)malloc(N*sizeof(var));
    G_StateSq= (var*)malloc(N*sizeof(var));

    // graph / projection
    G_Adj  = (i16*)  malloc(N*D*sizeof(i16));
    G_RP   = (fvar*) malloc(K*N*sizeof(fvar));
    G_Z    = (fvar*) malloc(K*sizeof(fvar));
    G_Mode = (i8*)   malloc(N*sizeof(i8));

    // weights & params
    G_WSelf = (fvar*)malloc(N*sizeof(fvar));
    G_WN1   = (fvar*)malloc(N*sizeof(fvar));
    G_WN2   = (fvar*)malloc(N*sizeof(fvar));
    G_WGlob1= (fvar*)malloc(N*sizeof(fvar));
    G_WGlob2= (fvar*)malloc(N*sizeof(fvar));
    G_WMom  = (fvar*)malloc(N*sizeof(fvar));
    G_WTree = (fvar*)malloc(N*sizeof(fvar));
    G_WAdv  = (fvar*)malloc(N*sizeof(fvar));

    A1x   = (fvar*)malloc(N*sizeof(fvar));
    A1lam = (fvar*)malloc(N*sizeof(fvar));
    A1mean= (fvar*)malloc(N*sizeof(fvar));
    A1E   = (fvar*)malloc(N*sizeof(fvar));
    A1P   = (fvar*)malloc(N*sizeof(fvar));
    A1i   = (fvar*)malloc(N*sizeof(fvar));
    A1c   = (fvar*)malloc(N*sizeof(fvar));

    A2x   = (fvar*)malloc(N*sizeof(fvar));
    A2lam = (fvar*)malloc(N*sizeof(fvar));
    A2mean= (fvar*)malloc(N*sizeof(fvar));
    A2E   = (fvar*)malloc(N*sizeof(fvar));
    A2P   = (fvar*)malloc(N*sizeof(fvar));
    A2i   = (fvar*)malloc(N*sizeof(fvar));
    A2c   = (fvar*)malloc(N*sizeof(fvar));

    G1mean= (fvar*)malloc(N*sizeof(fvar));
    G1E   = (fvar*)malloc(N*sizeof(fvar));
    G2P   = (fvar*)malloc(N*sizeof(fvar));
    G2lam = (fvar*)malloc(N*sizeof(fvar));
    TAlpha= (fvar*)malloc(N*sizeof(fvar));
    TBeta = (fvar*)malloc(N*sizeof(fvar));

    G_TreeTerm = (fvar*)malloc(N*sizeof(fvar));
#ifdef KEEP_TOP_META
    G_TopEq = (i16*) malloc(N*sizeof(i16));
    G_TopW  = (fvar*)malloc(N*sizeof(fvar));
#endif

    G_PropRaw = (fvar*)malloc(N*sizeof(fvar));
    G_Prop    = (fvar*)malloc(N*sizeof(fvar));

    if(LOG_EXPR_TEXT) G_Sym = (string*)malloc(N*sizeof(char*)); else G_Sym = 0;

    // tree index
    G_TreeCap = 128;
    G_TreeIdx = (Node**)malloc(G_TreeCap*sizeof(Node*));
    G_TreeN   = 0;

    G_EqTreeId = (i16*)malloc(N*sizeof(i16));

    // initialize adjacency
    { int t; for(t=0; t<N*D; t++) G_Adj[t] = -1; }

    // initialize state and parameters
    {
        int i;
        for(i=0;i<N;i++){
            G_State[i]  = random();
            G_Prev[i]   = G_State[i];
            G_StateSq[i]= G_State[i]*G_State[i];
            G_Mode[i]   = 0;

            G_WSelf[i]=0.5; G_WN1[i]=0.2; G_WN2[i]=0.2;
            G_WGlob1[i]=0.1; G_WGlob2[i]=0.1;
            G_WMom[i]=0.05;  G_WTree[i]=0.15; G_WAdv[i]=0.15;

            A1x[i]=1; A1lam[i]=0.1; A1mean[i]=0; A1E[i]=0; A1P[i]=0; A1i[i]=0; A1c[i]=0;
            A2x[i]=1; A2lam[i]=0.1; A2mean[i]=0; A2E[i]=0; A2P[i]=0; A2i[i]=0; A2c[i]=0;

            G1mean[i]=1.0; G1E[i]=0.001; G2P[i]=0.6; G2lam[i]=0.3;
            TAlpha[i]=0.8;  TBeta[i]=25.0;

            G_TreeTerm[i]=0;
#ifdef KEEP_TOP_META
            G_TopEq[i]=-1; G_TopW[i]=0;
#endif
            G_PropRaw[i]=1; G_Prop[i]=1.0/G_N;

            if(LOG_EXPR_TEXT){
                G_Sym[i] = (char*)malloc(EXPR_MAXLEN);
                if(G_Sym[i]) strcpy(G_Sym[i],"");
            }
        }
    }

    // --- Hit-rate state --- (PATCH D)
    G_HitEW   = (fvar*)malloc(N*sizeof(fvar));
    G_HitN    = (int*)  malloc(N*sizeof(int));
    G_AdvPrev = (fvar*)malloc(N*sizeof(fvar));
    {
        int i;
        for(i=0;i<N;i++){
            G_HitEW[i]   = 0.5;   // neutral start
            G_HitN[i]    = 0;
            G_AdvPrev[i] = 0;     // no prior advice yet
        }
    }

    computeMemFixedBytes();

    if(G_PredNode) free(G_PredNode);
    G_PredLen = G_TreeN; if(G_PredLen<=0) G_PredLen=1;
    G_PredNode = (var*)malloc(G_PredLen*sizeof(var));
    G_PredCap  = G_PredLen;
    G_PredCacheBar = -1;
}

void freeNet()
{
    int i;

    if(G_State)free(G_State); if(G_Prev)free(G_Prev); if(G_StateSq)free(G_StateSq);

    if(G_Adj)free(G_Adj); if(G_RP)free(G_RP); if(G_Z)free(G_Z); if(G_Mode)free(G_Mode);

    if(G_WSelf)free(G_WSelf); if(G_WN1)free(G_WN1); if(G_WN2)free(G_WN2);
    if(G_WGlob1)free(G_WGlob1); if(G_WGlob2)free(G_WGlob2);
    if(G_WMom)free(G_WMom); if(G_WTree)free(G_WTree); if(G_WAdv)free(G_WAdv);

    if(A1x)free(A1x); if(A1lam)free(A1lam); if(A1mean)free(A1mean); if(A1E)free(A1E); if(A1P)free(A1P); if(A1i)free(A1i); if(A1c)free(A1c);
    if(A2x)free(A2x); if(A2lam)free(A2lam); if(A2mean)free(A2mean); if(A2E)free(A2E); if(A2P)free(A2P); if(A2i)free(A2i); if(A2c)free(A2c);

    if(G1mean)free(G1mean); if(G1E)free(G1E); if(G2P)free(G2P); if(G2lam)free(G2lam);

    if(TAlpha)free(TAlpha); if(TBeta)free(TBeta);

    if(G_TreeTerm)free(G_TreeTerm);
#ifdef KEEP_TOP_META
    if(G_TopEq)free(G_TopEq); if(G_TopW)free(G_TopW);
#endif
    if(G_EqTreeId)free(G_EqTreeId);

    if(G_PropRaw)free(G_PropRaw); if(G_Prop)free(G_Prop);

    if(G_Sym){ for(i=0;i<G_N;i++) if(G_Sym[i]) free(G_Sym[i]); free(G_Sym); }

    if(G_TreeIdx)free(G_TreeIdx); if(G_PredNode)free(G_PredNode);
}
// --------- DTREE feature builders ----------
// MEMORYLESS normalization to avoid series misuse in conditional paths
inline var nrm_s(var x)          { return sat100(100.*tanh(x)); }
inline var nrm_scl(var x, var s) { return sat100(100.*tanh(s*x)); }

// F) Features accept 'pred' (no G_Pred[])
void buildEqFeatures(int i, var lambda, var mean, var energy, var power, var pred, var* S /*ADV_EQ_NF*/)
{
    int tid = safeTreeIndexFromEq(G_EqTreeId[i]);
    Node* t = treeAt(tid);

    S[0]  = nrm_s(G_State[i]);
    S[1]  = nrm_s(mean);
    S[2]  = nrm_scl(power,0.05);
    S[3]  = nrm_scl(energy,0.01);
    S[4]  = nrm_s(lambda);
    S[5]  = sat100(200.0*(pred-0.5));
    S[6]  = sat100(200.0*((var)t->d/MAX_DEPTH)-100.0);
    S[7]  = sat100(1000.0*t->r);
    S[8]  = nrm_s(G_TreeTerm[i]);
    S[9]  = sat100( (200.0/3.0) * (var)( (int)G_Mode[i] ) - 100.0 );
    S[10] = sat100(200.0*(G_MCF_PBull-0.5));
    S[11] = sat100(200.0*(G_MCF_Entropy-0.5));
    S[12] = sat100(200.0*((var)G_HitEW[i] - 0.5));  // NEW: reliability feature (PATCH G)
    sanitize(S,ADV_EQ_NF);
}

// (Kept for completeness; not used by DTREE anymore)
void buildPairFeatures(int i,int j, var lambda, var mean, var energy, var power, var* P /*ADV_PAIR_NF*/)
{
    int tid_i = safeTreeIndexFromEq(G_EqTreeId[i]);
    int tid_j = safeTreeIndexFromEq(G_EqTreeId[j]);
    Node* ti = treeAt(tid_i);
    Node* tj = treeAt(tid_j);

    var predi = predByTid(tid_i);
    var predj = predByTid(tid_j);

    P[0]=nrm_s(G_State[i]); P[1]=nrm_s(G_State[j]);
    P[2]=sat100(200.0*((var)ti->d/MAX_DEPTH)-100.0);
    P[3]=sat100(200.0*((var)tj->d/MAX_DEPTH)-100.0);
    P[4]=sat100(1000.0*ti->r); P[5]=sat100(1000.0*tj->r);
    P[6]=sat100(abs(P[2]-P[3]));
    P[7]=sat100(abs(P[4]-P[5]));
    P[8]=sat100(100.0*(predi+predj-1.0));
    P[9]=nrm_s(lambda); P[10]=nrm_s(mean); P[11]=nrm_scl(power,0.05);
    sanitize(P,ADV_PAIR_NF);
}

// --- Safe neighbor helpers & adjacency sanitizer ---
int adjSafe(int i, int d){
    int N = G_N, D = G_D;
    if(!G_Adj || N <= 1 || D <= 0) return 0;
    if(d < 0) d = 0; if(d >= D) d = d % D;
    int v = G_Adj[i*D + d];
    if(v < 0 || v >= N || v == i) v = (i + 1) % N;
    return v;
}

void sanitizeAdjacency(){
    if(!G_Adj) return;
    int N = G_N, D = G_D, i, d;
    for(i=0;i<N;i++){
        for(d=0; d<D; d++){
            i16 *p = &G_Adj[i*D + d];
            if(*p < 0 || *p >= N || *p == i){
                int r = (int)random(N);
                if(r == i) r = (r+1) % N;
                *p = (i16)r;
            }
        }
        if(D >= 2 && G_Adj[i*D+0] == G_Adj[i*D+1]){
            int r2 = (G_Adj[i*D+1] + 1) % N;
            if(r2 == i) r2 = (r2+1) % N;
            G_Adj[i*D+1] = (i16)r2;
        }
    }
}

// --------- advisor helpers (NEW) ----------

// cache one advisor value per equation per bar
var adviseSeed(int i, var lambda, var mean, var energy, var power)
{
    static int seedBar = -1;
    static int haveSeed[NET_EQNS];
    static var seedVal[NET_EQNS];

    if(seedBar != Bar){
        int k; for(k=0;k<NET_EQNS;k++) haveSeed[k] = 0;
        seedBar = Bar;
    }
    if(i < 0) i = 0;
    if(i >= NET_EQNS) i = i % NET_EQNS;

    if(!allowAdvise(i)) return 0;

    if(!haveSeed[i]){
        seedVal[i] = adviseEq(i, lambda, mean, energy, power); // trains (once) in Train mode
        haveSeed[i] = 1;
    }
    return seedVal[i];
}

// simple deterministic mixer for diversity in [-1..1] without extra advise calls
var mix01(var a, int salt){
    var z = sin(123.456*a + 0.001*salt) + cos(98.765*a + 0.002*salt);
    return tanh(0.75*z);
}

// --------- advise wrappers (single-equation only) ----------
// upgrade #7: early exit on tight memory BEFORE building features
var adviseEq(int i, var lambda, var mean, var energy, var power)
{
    if(!allowAdvise(i)) return 0;
    if(is(INITRUN)) return 0;

    int tight = (mem_mb_est() >= MEM_BUDGET_MB - MEM_HEADROOM_MB);
    if(tight) return 0;

    // --- Patch L: Prefer advising reliable equations; explore a bit for the rest
    if(G_HitN[i] > 32){  // wait until some evidence
        var h = (var)G_HitEW[i];
        var gate = 0.40 + 0.15*(1.0 - MC_Entropy);   // uses the Markov entropy directly
        if(h < gate){
            if(random() >= 0.5) return 0;  // ~50% exploration
        }
    }

    int tid = safeTreeIndexFromEq(G_EqTreeId[i]);
    var pred = predByTid(tid);

    var S[ADV_EQ_NF];
    buildEqFeatures(i,lambda,mean,energy,power,pred,S);

    // --- Patch 4: reliability-weighted DTREE training objective
    var obj = 0;
    if(Train){
        obj = sat100(100.0*tanh(0.6*lambda + 0.4*mean));
        // Reliability prior (0.5..1.0) to bias learning toward historically better equations
        var prior = 0.75 + 0.5*((var)G_HitEW[i] - 0.5);  // 0.5..1.0
        obj *= prior;
    }

    int objI = (int)obj;
    var a = adviseLong(DTREE, objI, S, ADV_EQ_NF);
    return a/100.;
}

// --------- advisePair disabled: never call DTREE here ----------
var advisePair(int i,int j, var lambda, var mean, var energy, var power)
{
    return 0;
}

// --------- heuristic pair scoring ----------
var scorePairSafe(int i, int j, var lambda, var mean, var energy, var power)
{
    int ti = safeTreeIndexFromEq(G_EqTreeId[i]);
    int tj = safeTreeIndexFromEq(G_EqTreeId[j]);
    Node *ni = treeAt(ti), *nj = treeAt(tj);
    var simD  = 1.0 / (1.0 + abs((var)ni->d - (var)nj->d));
    var dr    = 50.0*abs(ni->r - nj->r);                     // upgrade #10
    var simR  = 1.0 / (1.0 + dr);
    var predi = predByTid(ti);
    var predj = predByTid(tj);
    var pred  = 0.5*(predi + predj);
    var score = 0.5*pred + 0.3*simD + 0.2*simR;
    return 2.0*score - 1.0;
}

// --------- adjacency selection (heuristic only) ----------
// safer clash check using prev>=0
void rewireAdjacency_DTREE(var lambda, var mean, var energy, var power)
{
    int N=G_N, D=G_D, i, d, c, best, cand;
    for(i=0;i<N;i++){
        for(d=0; d<D; d++){
            var bestScore = -2; best = -1;
            for(c=0;c<G_CandNeigh;c++){
                cand = (int)random(N);
                if(cand==i) continue;
                int clash=0, k;
                for(k=0;k<d;k++){
                    int prev = G_Adj[i*D+k];
                    if(prev>=0 && prev==cand){ clash=1; break; }
                }
                if(clash) continue;
                var s = scorePairSafe(i,cand,lambda,mean,energy,power);
                if(s > bestScore){ bestScore=s; best=cand; }
            }
            if(best<0){ do{ best = (int)random(N);} while(best==i); }
            G_Adj[i*D + d] = (i16)best;
        }
    }
}

// --------- DTREE-created coefficients, modes & proportions ----------
var mapA(var a,var lo,var hi){ return mapUnit(a,lo,hi); }

void synthesizeEquationFromDTREE(int i, var lambda, var mean, var energy, var power)
{
    var seed = adviseSeed(i,lambda,mean,energy,power);
    G_Mode[i] = (int)(abs(1000*seed)) & 3;

    // derive weights & params deterministically from the single seed
    G_WSelf[i]  = (fvar)mapA(mix01(seed, 11), 0.15, 0.85);
    G_WN1[i]    = (fvar)mapA(mix01(seed, 12), 0.05, 0.35);
    G_WN2[i]    = (fvar)mapA(mix01(seed, 13), 0.05, 0.35);
    G_WGlob1[i] = (fvar)mapA(mix01(seed, 14), 0.05, 0.30);
    G_WGlob2[i] = (fvar)mapA(mix01(seed, 15), 0.05, 0.30);
    G_WMom[i]   = (fvar)mapA(mix01(seed, 16), 0.02, 0.15);
    G_WTree[i]  = (fvar)mapA(mix01(seed, 17), 0.05, 0.35);
    G_WAdv[i]   = (fvar)mapA(mix01(seed, 18), 0.05, 0.35);

    A1x[i]   = (fvar)(randsign()*mapA(mix01(seed, 21), 0.6, 1.2));
    A1lam[i] = (fvar)(randsign()*mapA(mix01(seed, 22), 0.05,0.35));
    A1mean[i]= (fvar)                 mapA(mix01(seed, 23),-0.30,0.30);
    A1E[i]   = (fvar)                 mapA(mix01(seed, 24),-0.0015,0.0015);
    A1P[i]   = (fvar)                 mapA(mix01(seed, 25),-0.30,0.30);
    A1i[i]   = (fvar)                 mapA(mix01(seed, 26),-0.02,0.02);
    A1c[i]   = (fvar)                 mapA(mix01(seed, 27),-0.20,0.20);

    A2x[i]   = (fvar)(randsign()*mapA(mix01(seed, 31), 0.6, 1.2));
    A2lam[i] = (fvar)(randsign()*mapA(mix01(seed, 32), 0.05,0.35));
    A2mean[i]= (fvar)                 mapA(mix01(seed, 33),-0.30,0.30);
    A2E[i]   = (fvar)                 mapA(mix01(seed, 34),-0.0015,0.0015);
    A2P[i]   = (fvar)                 mapA(mix01(seed, 35),-0.30,0.30);
    A2i[i]   = (fvar)                 mapA(mix01(seed, 36),-0.02,0.02);
    A2c[i]   = (fvar)                 mapA(mix01(seed, 37),-0.20,0.20);

    G1mean[i] = (fvar)                 mapA(mix01(seed, 41), 0.4, 1.6);
    G1E[i]    = (fvar)                 mapA(mix01(seed, 42),-0.004,0.004);
    G2P[i]    = (fvar)                 mapA(mix01(seed, 43), 0.1, 1.2);
    G2lam[i]  = (fvar)                 mapA(mix01(seed, 44), 0.05, 0.7);

    TAlpha[i] = (fvar)                 mapA(mix01(seed, 51), 0.3, 1.5);
    TBeta[i]  = (fvar)                 mapA(mix01(seed, 52), 6.0, 50.0);

    G_PropRaw[i] = (fvar)(0.01 + 0.99*(0.5*(seed+1.0)));

    // Reliability-aware proportion boost (0.75..1.25 multiplier)  (PATCH I)
    {
        var boost = 0.75 + 0.5*(var)G_HitEW[i];
        G_PropRaw[i] = (fvar)((var)G_PropRaw[i] * boost);
    }
}

void normalizeProportions()
{
    int N=G_N,i; var s=0; for(i=0;i<N;i++) s += G_PropRaw[i];
    if(s<=0) { for(i=0;i<N;i++) G_Prop[i] = (fvar)(1.0/N); return; }
    for(i=0;i<N;i++) G_Prop[i] = (fvar)(G_PropRaw[i]/s);
}

// H) dtreeTerm gets predictabilities on demand
var dtreeTerm(int i, int* outTopEq, var* outTopW)
{
    int N=G_N,j;
    int tid_i = safeTreeIndexFromEq(G_EqTreeId[i]);
    Node* ti=treeAt(tid_i); int di=ti->d; var ri=ti->r;

    var predI = predByTid(tid_i);

    var alpha=TAlpha[i], beta=TBeta[i];
    var sumw=0, acc=0, bestW=-1; int bestJ=-1;

    for(j=0;j<N;j++){
        if(j==i) continue;
        int tid_j = safeTreeIndexFromEq(G_EqTreeId[j]);
        Node* tj=treeAt(tid_j); int dj=tj->d; var rj=tj->r;

        var predJ = predByTid(tid_j);

        var w = exp(-alpha*abs(di-dj)) * exp(-beta*abs(ri-rj));
        var predBoost = 0.5 + 0.5*(predI*predJ);
        var propBoost = 0.5 + 0.5*( (G_Prop[i] + G_Prop[j]) );
        w *= predBoost * propBoost;

        var pairAdv = scorePairSafe(i,j,0,0,0,0);
        var pairBoost = 0.75 + 0.25*(0.5*(pairAdv+1.0));
        w *= pairBoost;

        sumw += w; acc += w*G_State[j];
        if(w>bestW){bestW=w; bestJ=j;}
    }
    if(outTopEq) *outTopEq = bestJ;
    if(outTopW)  *outTopW  = ifelse(sumw>0, bestW/sumw, 0);
    if(sumw>0) return acc/sumw; return 0;
}

// --------- expression builder (capped & optional) ----------
void buildSymbolicExpr(int i, int n1, int n2)
{
    if(LOG_EXPR_TEXT){
        string s = G_Sym[i]; s[0]=0;
        string a1 = strf("(%.3f*x[%i] + %.3f*lam + %.3f*mean + %.5f*E + %.3f*P + %.3f*i + %.3f)",
                         (var)A1x[i], n1, (var)A1lam[i], (var)A1mean[i], (var)A1E[i], (var)A1P[i], (var)A1i[i], (var)A1c[i]);
        string a2 = strf("(%.3f*x[%i] + %.3f*lam + %.3f*mean + %.5f*E + %.3f*P + %.3f*i + %.3f)",
                         (var)A2x[i], n2, (var)A2lam[i], (var)A2mean[i], (var)A2E[i], (var)A2P[i], (var)A2i[i], (var)A2c[i]);

        strlcat_safe(s, "x[i]_next = ", EXPR_MAXLEN);
        strlcat_safe(s, strf("%.3f*x[i] + ", (var)G_WSelf[i]), EXPR_MAXLEN);

        if(G_Mode[i]==1){
            strlcat_safe(s, strf("%.3f*tanh%s + ", (var)G_WN1[i], a1), EXPR_MAXLEN);
            strlcat_safe(s, strf("%.3f*sin%s + ",  (var)G_WN2[i], a2), EXPR_MAXLEN);
        } else if(G_Mode[i]==2){
            strlcat_safe(s, strf("%.3f*cos%s + ",  (var)G_WN1[i], a1), EXPR_MAXLEN);
            strlcat_safe(s, strf("%.3f*tanh%s + ", (var)G_WN2[i], a2), EXPR_MAXLEN);
        } else {
            strlcat_safe(s, strf("%.3f*sin%s + ",  (var)G_WN1[i], a1), EXPR_MAXLEN);
            strlcat_safe(s, strf("%.3f*cos%s + ",  (var)G_WN2[i], a2), EXPR_MAXLEN);
        }

        strlcat_safe(s, strf("%.3f*tanh(%.3f*mean + %.5f*E) + ", (var)G_WGlob1[i], (var)G1mean[i], (var)G1E[i]), EXPR_MAXLEN);
        strlcat_safe(s, strf("%.3f*sin(%.3f*P + %.3f*lam) + ",   (var)G_WGlob2[i], (var)G2P[i],   (var)G2lam[i]), EXPR_MAXLEN);
        strlcat_safe(s, strf("%.3f*(x[i]-x_prev[i]) + ",         (var)G_WMom[i]), EXPR_MAXLEN);
        strlcat_safe(s, strf("Prop[i]=%.4f; ",                   (var)G_Prop[i]), EXPR_MAXLEN);
        strlcat_safe(s, strf("%.3f*DT(i) + ",                    (var)G_WTree[i]), EXPR_MAXLEN);
        strlcat_safe(s, strf("%.3f*DTREE(i)",                    (var)G_WAdv[i]), EXPR_MAXLEN);
    }
}

// ---------- one-time rewire init (call central reindex) ----------
void rewireInit()
{
    randomizeRP();
    computeProjection();
    reindexTreeAndMap();   // ensures G_PredNode sized before any use
}

// ----------------------------------------------------------------------
// I) Trim rewireEpoch (no G_Pred sweep; same behavior)
// ----------------------------------------------------------------------
void rewireEpoch(var lambda, var mean, var energy, var power)
{
    int i;

    if(ENABLE_WATCH) watch("?A");   // before adjacency

    // (7) adapt breadth by regime entropy
    G_CandNeigh = ifelse(MC_Entropy < 0.45, CAND_NEIGH+4, CAND_NEIGH);

    rewireAdjacency_DTREE(lambda,mean,energy,power);

    if(ENABLE_WATCH) watch("?C");   // after adjacency
    sanitizeAdjacency();

    for(i=0;i<G_N;i++)
        synthesizeEquationFromDTREE(i,lambda,mean,energy,power);

    if(ENABLE_WATCH) watch("?D");
    normalizeProportions();

    // Unsigned context hash of current adjacency (+ epoch) for logging
    {
        int D = G_D;
        unsigned int h = 2166136261u;
        int total = G_N * D;
        for(i=0;i<total;i++){
            unsigned int x = (unsigned int)G_Adj[i];
            h ^= x + 0x9e3779b9u + (h<<6) + (h>>2);
        }
        G_CtxID = (int)((h ^ ((unsigned int)G_Epoch<<8)) & 0x7fffffff);
    }

    if(LOG_EXPR_TEXT){
        for(i=0;i<G_N;i++){
            int n1, n2;
            n1 = adjSafe(i,0);
            if(G_D >= 2) n2 = adjSafe(i,1); else n2 = n1;
            buildSymbolicExpr(i,n1,n2);
        }
    }
}

var projectNet()
{
    int N=G_N,i; var sum=0,sumsq=0,cross=0;
    for(i=0;i<N;i++){ sum+=G_State[i]; sumsq+=G_State[i]*G_State[i]; if(i+1<N) cross+=G_State[i]*G_State[i+1]; }
    var mean=sum/N, corr=cross/(N-1);
    return 0.6*tanh(mean + 0.001*sumsq) + 0.4*sin(corr);
}

// ----------------------------------------------------------------------
// J) Tighten updateNet (local pred, no G_AdvScore, log directly)
// ----------------------------------------------------------------------
void updateNet(var driver, var* outMean, var* outEnergy, var* outPower, int writeMeta)
{
    int N = G_N, D = G_D, i;

    var sum = 0, sumsq = 0;
    for(i = 0; i < N; i++){ sum += G_State[i]; sumsq += G_State[i]*G_State[i]; }
    var mean   = sum / N;
    var energy = sumsq;
    var power  = sumsq / N;

    for(i = 0; i < N; i++){
        int n1, n2;
        n1 = adjSafe(i,0);
        if(D >= 2) n2 = adjSafe(i,1); else n2 = n1;

        var xi   = G_State[i];
        var xn1  = G_State[n1];
        var xn2  = G_State[n2];
        var mom  = xi - G_Prev[i];

        // --- EW hit-rate update from previous bar's advice vs this bar's realized return (PATCH H)
        {
            int canScore = 1;
            if(is(INITRUN)) canScore = 0;
            if(Bar <= LookBack) canScore = 0;
            if(abs((var)G_AdvPrev[i]) <= HIT_EPS) canScore = 0;

            if(canScore){
                int sameSign = 0;
                if( ( (var)G_AdvPrev[i] > 0 && G_Ret1 > 0 ) || ( (var)G_AdvPrev[i] < 0 && G_Ret1 < 0 ) )
                    sameSign = 1;

                G_HitEW[i] = (fvar)((1.0 - HIT_ALPHA)*(var)G_HitEW[i] + HIT_ALPHA*(var)sameSign);
                if(G_HitN[i] < 0x7fffffff) G_HitN[i] += 1;
            }
        }

        int topEq = -1; var topW  = 0;
        var dt    = dtreeTerm(i, &topEq, &topW);
        G_TreeTerm[i] = (fvar)dt;
#ifdef KEEP_TOP_META
        G_TopEq[i]    = (i16)topEq;
        G_TopW[i]     = (fvar)topW;
#endif

        {
            int tid = safeTreeIndexFromEq(G_EqTreeId[i]);
            var pred = predByTid(tid); // local predictability if you need it for features

            var adv = 0;
            if(allowAdvise(i))
                adv = adviseEq(i, driver, mean, energy, power);

            // Reliability gating of advisor by hit-rate (0.5..1.0)  (PATCH H)
            var wHit = 0.5 + 0.5*(var)G_HitEW[i];
            var advEff = adv * wHit;

            var arg1 = (var)(A1x[i])*xn1 + (var)(A1lam[i])*driver + (var)(A1mean[i])*mean + (var)(A1E[i])*energy + (var)(A1P[i])*power + (var)(A1i[i])*i + (var)(A1c[i]);
            var arg2 = (var)(A2x[i])*xn2 + (var)(A2lam[i])*driver + (var)(A2mean[i])*mean + (var)(A2E[i])*energy + (var)(A2P[i])*power + (var)(A2i[i])*i + (var)(A2c[i]);

            var nl1, nl2;
            if(G_Mode[i] == 0){ nl1 = sin(arg1);  nl2 = cos(arg2); }
            else if(G_Mode[i] == 1){ nl1 = tanh(arg1); nl2 = sin(arg2); }
            else if(G_Mode[i] == 2){ nl1 = cos(arg1);  nl2 = tanh(arg2); }
            else { nl1 = sin(arg1); nl2 = cos(arg2); }

            var glob1 = tanh((var)G1mean[i]*mean + (var)G1E[i]*energy);
            var glob2 = sin ((var)G2P[i]*power + (var)G2lam[i]*driver);

            var xNew =
                (var)G_WSelf[i]*xi +
                (var)G_WN1[i]*nl1 +
                (var)G_W

Last edited by TipmyPip; 09/19/25 04:45.
Canticle of the Rewoven Mandala [Re: TipmyPip] #488911
09/14/25 12:28
09/14/25 12:28
Joined: Sep 2017
Posts: 164
TipmyPip Online OP
Member
TipmyPip  Online OP
Member

Joined: Sep 2017
Posts: 164
From the previous post : The code continuation : Canticle of the Rewoven Mandala

Code
// ----------------------------------------------------------------------
// J) Tighten updateNet (local pred, no G_AdvScore, log directly)
// ----------------------------------------------------------------------
void updateNet(var driver, var* outMean, var* outEnergy, var* outPower, int writeMeta)
{
    int N = G_N, D = G_D, i;

    var sum = 0, sumsq = 0;
    for(i = 0; i < N; i++){ sum += G_State[i]; sumsq += G_State[i]*G_State[i]; }
    var mean   = sum / N;
    var energy = sumsq;
    var power  = sumsq / N;

    for(i = 0; i < N; i++){
        int n1, n2;
        n1 = adjSafe(i,0);
        if(D >= 2) n2 = adjSafe(i,1); else n2 = n1;

        var xi   = G_State[i];
        var xn1  = G_State[n1];
        var xn2  = G_State[n2];
        var mom  = xi - G_Prev[i];

        // --- EW hit-rate update from previous bar's advice vs this bar's realized return (PATCH H)
        {
            int canScore = 1;
            if(is(INITRUN)) canScore = 0;
            if(Bar <= LookBack) canScore = 0;
            if(abs((var)G_AdvPrev[i]) <= HIT_EPS) canScore = 0;

            if(canScore){
                int sameSign = 0;
                if( ( (var)G_AdvPrev[i] > 0 && G_Ret1 > 0 ) || ( (var)G_AdvPrev[i] < 0 && G_Ret1 < 0 ) )
                    sameSign = 1;

                G_HitEW[i] = (fvar)((1.0 - HIT_ALPHA)*(var)G_HitEW[i] + HIT_ALPHA*(var)sameSign);
                if(G_HitN[i] < 0x7fffffff) G_HitN[i] += 1;
            }
        }

        int topEq = -1; var topW  = 0;
        var dt    = dtreeTerm(i, &topEq, &topW);
        G_TreeTerm[i] = (fvar)dt;
#ifdef KEEP_TOP_META
        G_TopEq[i]    = (i16)topEq;
        G_TopW[i]     = (fvar)topW;
#endif

        {
            int tid = safeTreeIndexFromEq(G_EqTreeId[i]);
            var pred = predByTid(tid); // local predictability if you need it for features

            var adv = 0;
            if(allowAdvise(i))
                adv = adviseEq(i, driver, mean, energy, power);

            // Reliability gating of advisor by hit-rate (0.5..1.0)  (PATCH H)
            var wHit = 0.5 + 0.5*(var)G_HitEW[i];
            var advEff = adv * wHit;

            var arg1 = (var)(A1x[i])*xn1 + (var)(A1lam[i])*driver + (var)(A1mean[i])*mean + (var)(A1E[i])*energy + (var)(A1P[i])*power + (var)(A1i[i])*i + (var)(A1c[i]);
            var arg2 = (var)(A2x[i])*xn2 + (var)(A2lam[i])*driver + (var)(A2mean[i])*mean + (var)(A2E[i])*energy + (var)(A2P[i])*power + (var)(A2i[i])*i + (var)(A2c[i]);

            var nl1, nl2;
            if(G_Mode[i] == 0){ nl1 = sin(arg1);  nl2 = cos(arg2); }
            else if(G_Mode[i] == 1){ nl1 = tanh(arg1); nl2 = sin(arg2); }
            else if(G_Mode[i] == 2){ nl1 = cos(arg1);  nl2 = tanh(arg2); }
            else { nl1 = sin(arg1); nl2 = cos(arg2); }

            var glob1 = tanh((var)G1mean[i]*mean + (var)G1E[i]*energy);
            var glob2 = sin ((var)G2P[i]*power + (var)G2lam[i]*driver);

            var xNew =
                (var)G_WSelf[i]*xi +
                (var)G_WN1[i]*nl1 +
                (var)G_WN2[i]*nl2 +
                (var)G_WGlob1[i]*glob1 +
                (var)G_WGlob2[i]*glob2 +
                (var)G_WMom[i]*mom +
                (var)G_WTree[i]*dt +
                (var)G_WAdv[i]*advEff;   // <-- changed to advEff (PATCH H)

            G_Prev[i]  = xi;
            G_State[i] = clamp(xNew, -10, 10);

            // Keep last advisor output for next-bar scoring (PATCH H)
            G_AdvPrev[i] = (fvar)adv;
        }

        if(writeMeta && (G_Epoch % META_EVERY == 0) && !G_LogsOff){
            int  tid2, nn1, nn2;
            Node* t2;
            tid2 = safeTreeIndexFromEq(G_EqTreeId[i]);
            t2   = treeAt(tid2);
            nn1  = adjSafe(i,0);
            if(G_D >= 2) nn2 = adjSafe(i,1); else nn2 = nn1;

            if(LOG_EQ_TO_ONE_FILE){
                string expr = "";
                if(LOG_EXPR_TEXT) expr = G_Sym[i];
                appendEqMetaLine(
                    Bar, G_Epoch, G_CtxID, i, nn1, nn2, tid2, t2->d, t2->r,
                    predByTid(tid2), 0, G_Prop[i], G_Mode[i], G_WAdv[i], G_WTree[i],
                    MC_PBullNext, MC_Entropy, MC_Cur, expr
                );
            } else {
                char fname[64];
                buildEqFileName(i, fname);
                string expr2 = "";
                if(LOG_EXPR_TEXT) expr2 = G_Sym[i];
#ifdef LOG_FLOAT_TRIM
                file_append(fname,
                    strf("META,%i,%i,%i,%i,%i,%i,%i,%i,%.4f,Pred=%.4f,Adv=%.4f,Prop=%.4f,Mode=%i,WAdv=%.3f,WTree=%.3f,PBull=%.4f,Ent=%.4f,State=%i,\"%s\"\n",
                        G_Epoch, G_CtxID, NET_EQNS, i, nn1, nn2, tid2, t2->d, t2->r,
                        predByTid(tid2), 0.0, (var)G_Prop[i], G_Mode[i], (var)G_WAdv[i], (var)G_WTree[i],
                        MC_PBullNext, MC_Entropy, MC_Cur, expr2));
#else
                file_append(fname,
                    strf("META,%i,%i,%i,%i,%i,%i,%i,%i,%.6f,Pred=%.4f,Adv=%.4f,Prop=%.6f,Mode=%i,WAdv=%.3f,WTree=%.3f,PBull=%.4f,Ent=%.4f,State=%i,\"%s\"\n",
                        G_Epoch, G_CtxID, NET_EQNS, i, nn1, nn2, tid2, t2->d, t2->r,
                        predByTid(tid2), 0.0, (var)G_Prop[i], G_Mode[i], (var)G_WAdv[i], (var)G_WTree[i],
                        MC_PBullNext, MC_Entropy, MC_Cur, expr2));
#endif
            }
        }
    }

    // refresh squared state once per bar for projection (upgrade #3)
    int jj; for(jj=0; jj<N; jj++) G_StateSq[jj] = G_State[jj]*G_State[jj];

    if(outMean)   *outMean   = mean;
    if(outEnergy) *outEnergy = energy;
    if(outPower)  *outPower  = power;
}

// ----------------- MAIN -----------------
function run()
{
    static int initialized = 0;
    static var lambda;
    static int fileInit = 0;

    BarPeriod = BAR_PERIOD;
    if(LookBack < NWIN) LookBack = NWIN;
    if(Train) Hedge = 2;

    // Plots are opt-in via ENABLE_PLOTS
    set(RULES|LEAN);
    if(ENABLE_PLOTS) set(PLOTNOW);
    asset(ASSET_SYMBOL);

    // --- 1-bar realized return for scoring (Close_t - Close_{t-1}) (PATCH F)
    {
        static var *S_Close;
        S_Close = series(priceClose());
        if(Bar > LookBack)
            G_Ret1 = S_Close[0] - S_Close[1];
        else
            G_Ret1 = 0;
    }

    if(is(INITRUN) && !initialized){

        // init dummy node
        G_DummyNode.v = 0;
        G_DummyNode.r = 0;
        G_DummyNode.c = 0;
        G_DummyNode.n = 0;
        G_DummyNode.d = 0;

        // allocate Markov matrices (zeroed)
        MC_Count  = (int*)malloc(MC_STATES*MC_STATES*sizeof(int));
        MC_RowSum = (int*)malloc(MC_STATES*sizeof(int));
        {
            int k;
            for(k=0;k<MC_STATES*MC_STATES;k++) MC_Count[k]=0;
            for(k=0;k<MC_STATES;k++) MC_RowSum[k]=0;
        }

        // Candlestick list (names not needed)
        buildCDL_TA61(0, 0);

        // build tree + network
        // Pre-warm node pool so first allocation is guaranteed aligned & ready
        if(!G_ChunkHead){
            NodeChunk* ch0 = (NodeChunk*)malloc(sizeof(NodeChunk));
            if(!ch0) quit("Alpha12: OOM preallocating NodeChunk");
                memset(ch0, 0, sizeof(NodeChunk));
                ch0->next = 0; ch0->used = 0;
                G_ChunkHead = ch0;
         }

        Root = createNode(MAX_DEPTH);
        recalcTreeBytes();

        allocateNet();

        // ---- depth LUT allocation (heap) ----
        G_DepthW = (var*)malloc(DEPTH_LUT_SIZE * sizeof(var));
        { int d; for(d=0; d<DEPTH_LUT_SIZE; d++) G_DepthW[d] = 0; }
        G_DepthExpLast = -1.0; // force first refresh

        // engine params
        G_DTreeExp = 1.10 + random(0.50);   // [1.10..1.60)
        G_FB_A     = 0.60 + random(0.25);   // [0.60..0.85)
        G_FB_B     = 1.0 - G_FB_A;
        refreshDepthW();                    // prefill LUT

        randomizeRP();
        computeProjection();
        rewireInit();

        G_Epoch = 0;
        rewireEpoch(0,0,0,0);

        // Header setup (consolidated vs legacy)
        if(LOG_EQ_TO_ONE_FILE){
            writeEqHeaderOnce();
        } else {
            char fname[64];
            int i2;
            for(i2=0;i2<NET_EQNS;i2++){
                buildEqFileName(i2,fname);
                file_append(fname,
                    "Bar,lambda,gamma,i,State,n1,n2,mean,energy,power,Vel,Mode,WAdv,WSelf,WN1,WN2,WGlob1,WGlob2,WMom,WTree,Pred,Adv,Prop,TreeTerm,TopEq,TopW,TreeId,Depth,Rate,PBull,Entropy,MCState\n");
            }
        }

        // Markov CSV header
        if(!fileInit){
            file_append("Log\\Alpha12_markov.csv","Bar,State,PBullNext,Entropy,RowSum\n");
            fileInit=1;
        }

        // initial META dump (consolidated or legacy)
        {
            int i;
            for(i=0;i<G_N;i++){
                int n1, n2, tid;
                Node* t;
                var pred, adv;

                n1  = adjSafe(i,0);
                if(G_D >= 2) n2 = adjSafe(i,1); else n2 = n1;

                tid = safeTreeIndexFromEq(G_EqTreeId[i]);
                t   = treeAt(tid);
                pred = predByTid(tid);
                adv  = 0; // no advising during INITRUN

                if(LOG_EQ_TO_ONE_FILE){
                    string expr = "";
                    if(LOG_EXPR_TEXT) expr = G_Sym[i];
                    appendEqMetaLine(
                        Bar, G_Epoch, G_CtxID, i, n1, n2, tid, t->d, t->r,
                        pred, adv, G_Prop[i], G_Mode[i], G_WAdv[i], G_WTree[i],
                        MC_PBullNext, MC_Entropy, MC_Cur, expr
                    );
                } else {
                    char fname2[64];
                    buildEqFileName(i,fname2);
                    string expr2 = "";
                    if(LOG_EXPR_TEXT) expr2 = G_Sym[i];
#ifdef LOG_FLOAT_TRIM
                    file_append(fname2,
                        strf("META,%i,%i,%i,%i,%i,%i,%i,%i,%.4f,Pred=%.4f,Adv=%.4f,Prop=%.4f,Mode=%i,WAdv=%.3f,WTree=%.3f,PBull=%.4f,Ent=%.4f,State=%i,\"%s\"\n",
                            G_Epoch, G_CtxID, NET_EQNS, i, n1, n2, tid, t->d, t->r,
                            pred, adv, (var)G_Prop[i], G_Mode[i], (var)G_WAdv[i], (var)G_WTree[i],
                            MC_PBullNext, MC_Entropy, MC_Cur, expr2));
#else
                    file_append(fname2,
                        strf("META,%i,%i,%i,%i,%i,%i,%i,%i,%.6f,Pred=%.4f,Adv=%.4f,Prop=%.6f,Mode=%i,WAdv=%.3f,WTree=%.3f,PBull=%.4f,Ent=%.4f,State=%i,\"%s\"\n",
                            G_Epoch, G_CtxID, NET_EQNS, i, n1, n2, tid, t->d, t->r,
                            pred, adv, (var)G_Prop[i], G_Mode[i], (var)G_WAdv[i], (var)G_WTree[i],
                            MC_PBullNext, MC_Entropy, MC_Cur, expr2));
#endif
                }
            }
        }

        initialized=1;
        printf("\nRoot nodes: %i | Net equations: %i (degree=%i, kproj=%i)",
               countNodes(Root), G_N, G_D, G_K);
    }

    // early zero-cost shedding when approaching cap
    if(mem_mb_est() >= MEM_BUDGET_MB - 2*MEM_HEADROOM_MB && G_ShedStage == 0)
        shed_zero_cost_once();

    // ==== Runtime memory / depth manager (acts only when near the cap)
    depth_manager_runtime();

    // ====== Per bar: Candles ? Markov (with optional cadence)
    {
        static var CDL[MC_NPAT];

        if((Bar % MC_EVERY) == 0){
            buildCDL_TA61(CDL,0);
            MC_Cur = MC_stateFromCDL(CDL, G_MC_ACT);

            if(Bar > LookBack) MC_update(MC_Prev, MC_Cur);
            MC_Prev = MC_Cur;

            {
                var rs = (var)MC_RowSum[MC_Cur];
                G_MC_Alpha = clamp(1.0 / (1.0 + rs/256.0), 0.05, 1.0);
            }

            // one-pass stats (upgrade #6)
            MC_rowStats(MC_Cur, &MC_PBullNext, &MC_Entropy);
        }
        // expose Markov features
        G_MCF_PBull   = MC_PBullNext;
        G_MCF_Entropy = MC_Entropy;
        G_MCF_State   = (var)MC_Cur;

        // adaptive acceptance rate ? adjust threshold
        {
            var aEW = 0.01; // ~100-bar half-life
            G_AccRate = (1 - aEW)*G_AccRate + aEW*(MC_Cur != 0);
            {
                var target = 0.35; // aim for ~35% nonzero states
                G_MC_ACT = clamp(G_MC_ACT + 0.02*(G_AccRate - target), 0.15, 0.60);
            }
        }
    }

    // ====== Tree driver lambda
    lambda = evaluateNode(Root);

    // ====== Rewire cadence (4) + epoch work
    {
        int doRewire = ((Bar % REWIRE_EVERY) == 0);

        // --- Patch N: If reliability is weak, allow a light early rewire
        {
            var HitAvg = 0; int ii;
            for(ii=0; ii<G_N; ii++) HitAvg += (var)G_HitEW[ii] * (var)G_Prop[ii];
            if(G_N <= 0) HitAvg = 0.5;
            // At most 1/4 the normal cadence, only when weak
            if(HitAvg < 0.46 && (Bar % (REWIRE_EVERY/4)) == 0) doRewire = 1;
        }

        // (4) early rewire when utility falls
        {
            static var U_prev = 0;
            var U_now = util_now();
            if(U_now + 0.01 < U_prev) doRewire = 1;
            U_prev = U_now;
        }

        if(doRewire){
            G_Epoch++;

            {
                int ii;
                var sum=0;
                for(ii=0;ii<G_N;ii++) sum += G_State[ii];
                {
                    var mean = sum/G_N;

                    var energy=0;
                    for(ii=0;ii<G_N;ii++) energy += G_State[ii]*G_State[ii];
                    var power = energy/G_N;

                    rewireEpoch(lambda,mean,energy,power);
                }
            }
        }

        // (8/9) adapt effective projection K each bar and recompute projection once
        G_Keff = ifelse(MC_Entropy < 0.45, KPROJ, KPROJ/2);
        computeProjection();

        // (3) dynamic advisor budget per bar (before updateNet so it applies now)
        {
            int tight = (mem_mb_est() >= MEM_BUDGET_MB - MEM_HEADROOM_MB);
            G_AdviseMax = ifelse(tight, 12, ifelse(MC_Entropy < 0.45, 32, 16));
        }

        // Update net this bar (write META only if rewired and not shedding logs)
        {
            var meanB, energyB, powerB;
            updateNet(lambda, &meanB, &energyB, &powerB, doRewire);

            var gamma = 0;

            // Feedback: compute ensemble projection
            {
                gamma = projectNet();

                // --- Accuracy sentinel update & elastic depth controller ---
                acc_update(lambda, gamma);
                edc_runtime();

                // (1) Adaptive feedback blend toward the more informative component
                {
                    var w = 0.5 + 0.5*G_AccCorr;                 // 0..1
                    G_FB_W = clamp(0.9*G_FB_W + 0.1*w, 0.2, 0.9);
                    lambda  = G_FB_W*lambda + (1.0 - G_FB_W)*gamma;
                }
            }

            // Plot/log gating
            {
                int doPlot = (ENABLE_PLOTS && !G_ChartsOff);
                int doLog = ifelse(G_LogsOff, ((Bar % (LOG_EVERY*4)) == 0), ((Bar % LOG_EVERY) == 0));

                // Plots
                if(doPlot){
                    plot("lambda", lambda, LINE, 0);
                    plot("gamma",  gamma,  LINE, 0);
                    plot("P_win",  powerB, LINE, 0);
                    plot("PBullNext", MC_PBullNext, LINE, 0);
                    plot("MC_Entropy", MC_Entropy, LINE, 0);
                    plot("MemMB", memory(0)/(1024.*1024.), LINE, 0);
                    plot("Allocs", (var)memory(2), LINE, 0);
                    plot("HitEW_7", (var)G_HitEW[7], LINE, 0);  // (PATCH K) watch eq #7
                }

                // Markov CSV log (decimated; further decimated when shedding)
                if(doLog){
#ifdef LOG_FLOAT_TRIM
                    file_append("Log\\Alpha12_markov.csv",
                        strf("%i,%i,%.4f,%.4f,%i\n", Bar, MC_Cur, MC_PBullNext, MC_Entropy, MC_RowSum[MC_Cur]));
#else
                    file_append("Log\\Alpha12_markov.csv",
                        strf("%i,%i,%.6f,%.6f,%i\n", Bar, MC_Cur, MC_PBullNext, MC_Entropy, MC_RowSum[MC_Cur]));
#endif
                    // Optional: per-eq hit snapshot (throttled by LOG_EQ_SAMPLE) (PATCH J)
                    {
                        int ii;
                        for(ii=0; ii<G_N && ii<LOG_EQ_SAMPLE; ii++){
                            file_append("Log\\Alpha12_hits.csv",
                                strf("%i,%i,%.4f,%i,%.3f,%.6f\n",
                                    Bar, ii, (var)G_HitEW[ii], G_HitN[ii], (var)G_AdvPrev[ii], G_Ret1));
                            // Columns: Bar,i,HitEW,HitN,PrevAdv,Ret1
                        }
                    }
                }
            }
        }

        // --- Patch M: reliability-weighted ensemble hit ? position sizing
        {
            var HitAvg = 0;
            int ii;
            for(ii=0; ii<G_N; ii++) HitAvg += (var)G_HitEW[ii] * (var)G_Prop[ii];
            if(G_N <= 0) HitAvg = 0.5;
            // Map 0..1 -> 0.5..2.0 lots, gently
            Lots = clamp(1.0 + 2.0*(HitAvg - 0.5), 0.5, 2.0);
        }

        // ====== Entries (Markov-gated) ======
        if( MC_PBullNext > PBULL_LONG_TH && lambda > 0.7 )  enterLong();
        if( MC_PBullNext < PBULL_SHORT_TH && lambda < -0.7 ) enterShort();
    }
}

// Clean up memory
function cleanup()
{
    if(Root) freeTree(Root);
    freeNodePool();              // upgrade #2: release pool chunks
    if(MC_Count)  free(MC_Count);
    if(MC_RowSum) free(MC_RowSum);
    if(G_DepthW)  free(G_DepthW);   // free LUT
    freeNet();
}

Last edited by TipmyPip; 09/19/25 04:47.
Consensus Gate Orchestrator [Re: TipmyPip] #488924
09/27/25 10:02
09/27/25 10:02
Joined: Sep 2017
Posts: 164
TipmyPip Online OP
Member
TipmyPip  Online OP
Member

Joined: Sep 2017
Posts: 164
Consensus Gate Orchestrator

The system follows a gate-and-flow pattern. It begins by compressing raw, fast-moving observations into a small alphabet of archetypes—a compact context that says “what the moment looks like” right now. From the rolling stream of these archetypes it infers two quiet dials: a lean (directional tendency for the immediate next step) and a clarity (how decisive that tendency appears). Those two dials form a permission gate: sometimes it opens, sometimes it holds; sometimes it opens in one direction but not the other. The gate is conservative by design and adjusts as evidence accumulates or disperses.

Beneath the gate, a soft influence field evolves continuously. Many small units—lightweight, partially independent—carry a trace of their own past, listen to a few peers, and absorb coarse summaries from the broader environment across multiple horizons. Signals are intentionally bounded to prevent spikes from dominating. Attention is rationed: weight is allocated in proportion to agreement and reliability, so faint, inconsistent voices naturally recede while convergent evidence rises to the surface.

Connections among these units are reshaped in measured slices. Rather than restarting from scratch, the system refreshes “who listens to whom” and how strongly, favoring simple, stable pairings and rhythm-compatible neighbors. Structure molts; scaffold stays. The goal is to remain adaptive without becoming erratic.

Capacity breathes with circumstances. When resources tighten or extra detail stops helping, the system trims depth where it matters least. When there’s headroom and a demonstrable benefit, it adds a thin layer. Changes are tentative and reversible: growth is trialed, scored after a delay, and rolled back if utility falls. Utility balances quality of alignment with a mild cost for complexity.

Decisions happen only when permission and the influence field agree meaningfully. Timing and size of action (in any application) scale with consensus strength; ambiguity elevates patience. “Do nothing” is first-class, not failure.

A compact diary records the moment’s archetype, the two gate dials, and terse sketches of how influences combined to justify the current posture. It favors clarity over detail, enabling auditability without exposing internals.

What emerges is coherence without rigidity. Groups move together when rhythms align; solos fade when clarity drops. Adaptation is maintained through many small adjustments, not dramatic overhauls, so behavior tracks structural change while staying steady between regimes.

Code
// ======================================================================
// Alpha12 - Markov-augmented Harmonic D-Tree Engine (Candlestick 122-dir)
// with runtime memory shaping, selective depth pruning,
// elastic accuracy-aware depth growth, and equation-cycle time series.
// ======================================================================

// ================= USER CONFIG =================
#define ASSET_SYMBOL "EUR/USD"
#define BAR_PERIOD 5
#define TF_H1 12
// ... (rest of your USER CONFIG defines)

// ---- Forward declarations (needed by hooks placed early) ----
void Alpha12_init();
void Alpha12_bar();
void Alpha12_cleanup();
void updateAllMarkov();

#define MC_ACT 0.30         // initial threshold on |CDL| in [-1..1] to accept a pattern
#define PBULL_LONG_TH 0.60  // Markov gate for long
#define PBULL_SHORT_TH 0.40 // Markov gate for short

// ===== Debug toggles (Fix #1 - chart/watch growth off by default) =====
#define ENABLE_PLOTS 0  // 0 = no plot buffers; 1 = enable plot() calls
#define ENABLE_WATCH 0  // 0 = disable watch() probes; 1 = enable

// ================= ENGINE PARAMETERS =================
#define MAX_BRANCHES 3
#define MAX_DEPTH 4
#define NWIN 256
#define NET_EQNS 100
#define DEGREE 4
#define KPROJ 16
#define REWIRE_EVERY 127
#define CAND_NEIGH 8

// ===== LOGGING CONTROLS (memory management) =====
#define LOG_EQ_TO_ONE_FILE 1 // 1: single consolidated EQ CSV; 0: per-eq files
#define LOG_EXPR_TEXT 0      // 0: omit full expression (store signature only); 1: include text
#define META_EVERY 4         // write META every N rewires
#define LOG_EQ_SAMPLE NET_EQNS
#define EXPR_MAXLEN 512
#define LOG_FLOAT_TRIM
#define LOG_EVERY 16
#define MC_EVERY 1

// ---- DTREE feature sizes (extended: adds cycle + multi-TF features) ----
#define ADV_EQ_NF 19     // CHANGED: was 15, now +4 (5M + Relation)
#define ADV_PAIR_NF 12   // <— RESTORED: used by buildPairFeatures()

// ================= Candles ? 122-state Markov =================
#define MC_NPAT 61
#define MC_STATES 123   // 1 + 2*MC_NPAT
#define MC_NONE 0
#define MC_LAPLACE 1.0  // kept for reference; runtime uses G_MC_Alpha

// ================= Runtime Memory / Accuracy Manager =================
#define MEM_BUDGET_MB 50
#define MEM_HEADROOM_MB 5
#define DEPTH_STEP_BARS 16
#define KEEP_CHILDREN_HI 2
#define KEEP_CHILDREN_LO 1
#define RUNTIME_MIN_DEPTH 2

// ===== Chunked rewire settings =====
#define REWIRE_BATCH_EQ_5M 24 // equations to (re)build on 5m bars
#define REWIRE_BATCH_EQ_H1 64 // bigger chunk when an H1 closes
#define REWIRE_MIN_BATCH 8    // floor under pressure
#define REWIRE_NORM_EVERY 1   // normalize after completing 1 full pass
// If mem est near budget, scale batch down
#define REWIRE_MEM_SOFT (MEM_BUDGET_MB - 4)
#define REWIRE_MEM_HARD (MEM_BUDGET_MB - 1)

// ===== Chunked update settings (heavy DTREE/advisor in slices) =====
// (Added per your patch)
#define UPDATE_BATCH_EQ_5M 32  // heavy updates on 5m bars
#define UPDATE_BATCH_EQ_H1 96  // larger slice when an H1 closes
#define UPDATE_MIN_BATCH 8
#define UPDATE_MEM_SOFT (MEM_BUDGET_MB - 4)
#define UPDATE_MEM_HARD (MEM_BUDGET_MB - 1)

// runtime flag used by alpha12_step()
int ALPHA12_READY = 0;   // single global init sentinel (int)
int G_ShedStage = 0;  // 0..2
int G_LastDepthActBar = -999999;
int G_ChartsOff = 0;  // gates plot()
int G_LogsOff = 0;    // gates file_append cadence
int G_SymFreed = 0;   // expression buffers freed
int G_RT_TreeMaxDepth = MAX_DEPTH;

// ---- Accuracy sentinel (EW correlation of lambda vs gamma) ----
var ACC_mx=0, ACC_my=0, ACC_mx2=0, ACC_my2=0, ACC_mxy=0;
var G_AccCorr = 0;   // [-1..1]
var G_AccBase = 0;   // first seen sentinel
int G_HaveBase = 0;

// ---- Elastic depth tuner (small growth trials with rollback) ----
#define DEPTH_TUNE_BARS 64  // start a growth trial this often (when memory allows)
#define TUNE_DELAY_BARS 64  // evaluate the trial after this many bars
var G_UtilBefore = 0, G_UtilAfter = 0;
int G_TunePending = 0;
int G_TuneStartBar = 0;
int G_TuneAction = 0; // +1 grow trial, 0 none

// ======================================================================
// Types & globals used by memory estimator
// ======================================================================

// HARMONIC D-TREE type
typedef struct Node {
  var v;
  var r;
  void* c;
  int n;
  int d;
} Node;

// ====== Node pool (upgrade #2) ======
typedef struct NodeChunk {
  struct NodeChunk* next;
  int used;  // 4 bytes
  int _pad;  // 4 bytes -> ensures nodes[] starts at 8-byte offset on 32-bit
  Node nodes[256]; // each Node contains doubles; keep this 8-byte aligned
} NodeChunk;

NodeChunk* G_ChunkHead = 0;
Node* G_FreeList = 0;

Node* poolAllocNode() {
  if(G_FreeList){
    Node* n = G_FreeList;
    G_FreeList = (Node*)n->c;
    n->c = 0;
    n->n = 0;
    n->d = 0;
    n->v = 0;
    n->r = 0;
    return n;
  }
  if(!G_ChunkHead || G_ChunkHead->used >= 256){
    NodeChunk* ch = (NodeChunk*)malloc(sizeof(NodeChunk));
    if(!ch) { quit("Alpha12: OOM allocating NodeChunk (poolAllocNode)"); return 0; }
    memset(ch, 0, sizeof(NodeChunk));
    ch->next = G_ChunkHead;
    ch->used = 0;
    G_ChunkHead = ch;
  }
  if(G_ChunkHead->used < 0 || G_ChunkHead->used >= 256){
    quit("Alpha12: Corrupt node pool state");
    return 0;
  }
  return &G_ChunkHead->nodes[G_ChunkHead->used++];
}

void poolFreeNode(Node* u){
  if(!u) return;
  u->c = (void*)G_FreeList;
  G_FreeList = u;
}

void freeNodePool() {
  NodeChunk* ch = G_ChunkHead;
  while(ch){
    NodeChunk* nx = ch->next;
    free(ch);
    ch = nx;
  }
  G_ChunkHead = 0;
  G_FreeList = 0;
}

// Minimal globals needed before mem estimator
Node* Root = 0;
Node** G_TreeIdx = 0;
int G_TreeN = 0;
int G_TreeCap = 0;
var G_DTreeExp = 0;

// ---- (upgrade #1) depth LUT for pow() ----
#define DEPTH_LUT_SIZE (MAX_DEPTH + 1) // <- keep constant for lite-C
var* G_DepthW = 0; // heap-allocated LUT
var G_DepthExpLast = -1.0; // sentinel as var
Node G_DummyNode; // treeAt() can return &G_DummyNode

// Network sizing globals (used by mem estimator)
int G_N = NET_EQNS;
int G_D = DEGREE;
int G_K = KPROJ;

// Optional expression buffer pointer (referenced by mem estimator)
string* G_Sym = 0;

// Forward decls that reference Node
var nodePredictability(Node* t); // fwd decl (needed by predByTid)
var nodeImportance(Node* u);     // fwd decl (uses nodePredictability below)
void pruneSelectiveAtDepth(Node* u, int targetDepth, int keepK);
void reindexTreeAndMap();

// Forward decls for advisor functions (so adviseSeed can call them)
var adviseEq(int i, var lambda, var mean, var energy, var power);
var advisePair(int i,int j, var lambda, var mean, var energy, var power);

// ----------------------------------------------------------------------
// === Adaptive knobs & sentinels (NEW) ===
var G_FB_W = 0.70; // (1) dynamic lambda/gamma blend weight 0..1
var G_MC_ACT = MC_ACT; // (2) adaptive candlestick acceptance threshold
var G_AccRate = 0;     // (2) EW acceptance rate of (state != 0)
// (3) advisor budget per bar (replaces the macro)
int G_AdviseMax = 16;
// (6) Markov Laplace smoothing (runtime)
var G_MC_Alpha = 1.0;
// (7) adaptive candidate breadth for adjacency search
int G_CandNeigh = CAND_NEIGH;
// (8) effective projection dimension (= KPROJ or KPROJ/2)
int G_Keff = KPROJ;
// (5) depth emphasis hill-climber
var G_DTreeExpStep = 0.05;
int G_DTreeExpDir = 1;

// ---- Advise budget/rotation (Fix #2) ----
#define ADVISE_ROTATE 1 // 1 = rotate which equations get DTREE each bar
int allowAdvise(int i) {
  if(ADVISE_ROTATE){
    int groups = NET_EQNS / G_AdviseMax;
    if(groups < 1) groups = 1;
    return ((i / G_AdviseMax) % groups) == (Bar % groups);
  } else {
    return (i < G_AdviseMax);
  }
}

// ======================================================================
// A) Tight-memory switches and compact types
// ======================================================================
#define TIGHT_MEM 1 // turn on compact types for arrays

// consolidated EQ CSV -> don't enable extra meta
// (no #if available; force meta OFF explicitly)


#ifdef TIGHT_MEM
typedef float fvar;  // 4B instead of 8B 'var' for large coefficient arrays
typedef short i16;   // -32768..32767 indices
typedef char i8;     // small enums/modes
#else
typedef var fvar;
typedef int i16;
typedef int i8;
#endif

// ---- tree byte size (counts nodes + child pointer arrays) ----
int tree_bytes(Node* u) {
  if(!u) return 0;
  int SZV = sizeof(var), SZI = sizeof(int), SZP = sizeof(void*);
  int sz_node = 2*SZV + SZP + 2*SZI;
  int total = sz_node;
  if(u->n > 0 && u->c) total += u->n * SZP;
  int i;
  for(i=0;i<u->n;i++) total += tree_bytes(((Node**)u->c)[i]);
  return total;
}

// ======================================================================
// Optimized memory estimator & predictability caches
// ======================================================================

// ===== Memory estimator & predictability caches =====
int G_MemFixedBytes = 0;   // invariant part (arrays, Markov + pointer vec + expr opt)
int G_TreeBytesCached = 0; // current D-Tree structure bytes

var* G_PredNode = 0; // length == G_TreeN; -2 = not computed this bar
int G_PredLen = 0;
int G_PredCap = 0;   // (upgrade #5)
int G_PredCacheBar = -1;

void recalcTreeBytes(){
  G_TreeBytesCached = tree_bytes(Root);
}

void computeMemFixedBytes() {
  int N = G_N, D = G_D, K = G_K;
  int SZV = sizeof(var), SZF = sizeof(fvar), SZI16 = sizeof(i16), SZI8 = sizeof(i8), SZP = sizeof(void*);
  int b = 0;

  // --- core state (var-precision) ---
  b += N*SZV*2; // G_State, G_Prev

  // --- adjacency & ids ---
  b += N*D*SZI16; // G_Adj
  b += N*SZI16;   // G_EqTreeId
  b += N*SZI8;    // G_Mode

  // --- random projection ---
  b += K*N*SZF; // G_RP
  b += K*SZF;   // G_Z

  // --- weights & params (fvar) ---
  b += N*SZF*(8);          // G_W* (WSelf, WN1, WN2, WGlob1, WGlob2, WMom, WTree, WAdv)
  b += N*SZF*(7 + 7);      // A1*, A2*
  b += N*SZF*(2 + 2);      // G1mean,G1E,G2P,G2lam
  b += N*SZF*(2);          // TAlpha, TBeta
  b += N*SZF*(1);          // G_TreeTerm

  b += N*(SZI16 + SZF);    // G_TopEq, G_TopW

  // --- proportions ---
  b += N*SZF*2;            // G_PropRaw, G_Prop

  // --- per-equation hit-rate bookkeeping ---
  b += N*SZF;              // G_HitEW
  b += N*SZF;              // G_AdvPrev
  b += N*sizeof(int);      // G_HitN

  // --- Markov storage (unchanged ints) ---
  b += MC_STATES*MC_STATES*sizeof(int) + MC_STATES*sizeof(int);

  // pointer vector for tree index (capacity part)
  b += G_TreeCap*SZP;

  // optional expression buffers
  if(LOG_EXPR_TEXT && G_Sym && !G_SymFreed) b += N*EXPR_MAXLEN;

  G_MemFixedBytes = b;
}

void ensurePredCache() {
  if(G_PredCacheBar != Bar){
    if(G_PredNode){
      int i, n = G_PredLen;
      for(i=0;i<n;i++) G_PredNode[i] = -2;
    }
    G_PredCacheBar = Bar;
  }
}

var predByTid(int tid) {
  if(!G_TreeIdx || tid < 0 || tid >= G_TreeN || !G_TreeIdx[tid]) return 0.5;
  ensurePredCache();
  if(G_PredNode && tid < G_PredLen && G_PredNode[tid] > -1.5) return G_PredNode[tid];
  Node* t = G_TreeIdx[tid];
  var p = 0.5;
  if(t) p = nodePredictability(t);
  if(G_PredNode && tid < G_PredLen) G_PredNode[tid] = p;
  return p;
}

// ======================================================================
// Conservative in-script memory estimator (arrays + pointers) - O(1)
// ======================================================================
int mem_bytes_est(){ return G_MemFixedBytes + G_TreeBytesCached; }
int mem_mb_est(){ return mem_bytes_est() / (1024*1024); }
int memMB(){ return (int)(memory(0)/(1024*1024)); }

// light one-shot shedding
void shed_zero_cost_once() {
  if(G_ShedStage > 0) return;
  set(PLOTNOW|OFF);
  G_ChartsOff = 1;
  G_LogsOff = 1;
  G_ShedStage = 1;
}

void freeExprBuffers() {
  if(!G_Sym || G_SymFreed) return;
  int i;
  for(i=0;i<G_N;i++) if(G_Sym[i]) free(G_Sym[i]);
  free(G_Sym);
  G_Sym = 0;
  G_SymFreed = 1;
  computeMemFixedBytes();
}

// depth manager (prune & shedding)
void depth_manager_runtime() {
  int trigger = MEM_BUDGET_MB - MEM_HEADROOM_MB;
  int mb = mem_mb_est();
  if(mb < trigger) return;

  if(G_ShedStage == 0) shed_zero_cost_once();
  if(G_ShedStage <= 1){
    if(LOG_EXPR_TEXT==0 && !G_SymFreed) freeExprBuffers();
    G_ShedStage = 2;
  }

  int overBudget = (mb >= MEM_BUDGET_MB);
  if(!overBudget && (Bar - G_LastDepthActBar < DEPTH_STEP_BARS)) return;

  while(G_RT_TreeMaxDepth > RUNTIME_MIN_DEPTH) {
    int keepK = ifelse(mem_mb_est() < MEM_BUDGET_MB + 2, KEEP_CHILDREN_HI, KEEP_CHILDREN_LO);
    pruneSelectiveAtDepth((Node*)Root, G_RT_TreeMaxDepth, keepK);
    G_RT_TreeMaxDepth--;
    reindexTreeAndMap();
    mb = mem_mb_est();
    printf("\n[DepthMgr] depth=%i keepK=%i est=%i MB", G_RT_TreeMaxDepth, keepK, mb);
    if(mb < trigger) break;
  }
  G_LastDepthActBar = Bar;
}

// ----------------------------------------------------------------------
// 61 candlestick patterns (Zorro spellings kept). Each returns [-100..100].
// We rescale to [-1..1] for Markov state construction.
// ----------------------------------------------------------------------
int buildCDL_TA61(var* out, string* names)
{
    int n = 0;
    #define ADD(Name, Call) do{ var v = (Call); if(out) out[n] = v/100.; if(names) names[n] = Name; n++; }while(0)

    ADD("CDL2Crows",              CDL2Crows());
    ADD("CDL3BlackCrows",         CDL3BlackCrows());
    ADD("CDL3Inside",             CDL3Inside());
    ADD("CDL3LineStrike",         CDL3LineStrike());
    ADD("CDL3Outside",            CDL3Outside());
    ADD("CDL3StarsInSouth",       CDL3StarsInSouth());
    ADD("CDL3WhiteSoldiers",      CDL3WhiteSoldiers());
    ADD("CDLAbandonedBaby",       CDLAbandonedBaby(0.3));
    ADD("CDLAdvanceBlock",        CDLAdvanceBlock());
    ADD("CDLBeltHold",            CDLBeltHold());
    ADD("CDLBreakaway",           CDLBreakaway());
    ADD("CDLClosingMarubozu",     CDLClosingMarubozu());
    ADD("CDLConcealBabysWall",    CDLConcealBabysWall());
    ADD("CDLCounterAttack",       CDLCounterAttack());
    ADD("CDLDarkCloudCover",      CDLDarkCloudCover(0.3));
    ADD("CDLDoji",                CDLDoji());
    ADD("CDLDojiStar",            CDLDojiStar());
    ADD("CDLDragonflyDoji",       CDLDragonflyDoji());
    ADD("CDLEngulfing",           CDLEngulfing());
    ADD("CDLEveningDojiStar",     CDLEveningDojiStar(0.3));
    ADD("CDLEveningStar",         CDLEveningStar(0.3));
    ADD("CDLGapSideSideWhite",    CDLGapSideSideWhite());
    ADD("CDLGravestoneDoji",      CDLGravestoneDoji());
    ADD("CDLHammer",              CDLHammer());
    ADD("CDLHangingMan",          CDLHangingMan());
    ADD("CDLHarami",              CDLHarami());
    ADD("CDLHaramiCross",         CDLHaramiCross());
    ADD("CDLHignWave",            CDLHignWave());
    ADD("CDLHikkake",             CDLHikkake());
    ADD("CDLHikkakeMod",          CDLHikkakeMod());
    ADD("CDLHomingPigeon",        CDLHomingPigeon());
    ADD("CDLIdentical3Crows",     CDLIdentical3Crows());
    ADD("CDLInNeck",              CDLInNeck());
    ADD("CDLInvertedHammer",      CDLInvertedHammer());
    ADD("CDLKicking",             CDLKicking());
    ADD("CDLKickingByLength",     CDLKickingByLength());
    ADD("CDLLadderBottom",        CDLLadderBottom());
    ADD("CDLLongLeggedDoji",      CDLLongLeggedDoji());
    ADD("CDLLongLine",            CDLLongLine());
    ADD("CDLMarubozu",            CDLMarubozu());
    ADD("CDLMatchingLow",         CDLMatchingLow());
    ADD("CDLMatHold",             CDLMatHold(0.5));
    ADD("CDLMorningDojiStar",     CDLMorningDojiStar(0.3));
    ADD("CDLMorningStar",         CDLMorningStar(0.3));
    ADD("CDLOnNeck",              CDLOnNeck());
    ADD("CDLPiercing",            CDLPiercing());
    ADD("CDLRickshawMan",         CDLRickshawMan());
    ADD("CDLRiseFall3Methods",    CDLRiseFall3Methods());
    ADD("CDLSeperatingLines",     CDLSeperatingLines());
    ADD("CDLShootingStar",        CDLShootingStar());
    ADD("CDLShortLine",           CDLShortLine());
    ADD("CDLSpinningTop",         CDLSpinningTop());
    ADD("CDLStalledPattern",      CDLStalledPattern());
    ADD("CDLStickSandwhich",      CDLStickSandwhich());
    ADD("CDLTakuri",              CDLTakuri());
    ADD("CDLTasukiGap",           CDLTasukiGap());
    ADD("CDLThrusting",           CDLThrusting());
    ADD("CDLTristar",             CDLTristar());
    ADD("CDLUnique3River",        CDLUnique3River());
    ADD("CDLUpsideGap2Crows",     CDLUpsideGap2Crows());
    ADD("CDLXSideGap3Methods",    CDLXSideGap3Methods());

    #undef ADD
    return n; // 61
}

// ================= Markov storage & helpers =================
static int* MC_Count;  // [MC_STATES*MC_STATES] -> we alias this as the 1H (HTF) chain
static int* MC_RowSum; // [MC_STATES]

static int MC_Prev = -1;
static int MC_Cur = 0;
static var MC_PBullNext = 0.5;
static var MC_Entropy = 0.0;

#define MC_IDX(fr,to) ((fr)*MC_STATES + (to))

int MC_stateFromCDL(var* cdl /*len=61*/, var thr) {
  int i, best=-1;
  var besta=0;
  for(i=0;i<MC_NPAT;i++){
    var a = abs(cdl[i]);
    if(a>besta){ besta=a; best=i; }
  }
  if(best<0) return MC_NONE;
  if(besta < thr) return MC_NONE;
  int bull = (cdl[best] > 0);
  return 1 + 2*best + bull; // 1..122
}

int MC_isBull(int s){
  if(s<=0) return 0;
  return ((s-1)%2)==1;
}

void MC_update(int sPrev,int sCur){
  if(sPrev<0) return;
  MC_Count[MC_IDX(sPrev,sCur)]++;
  MC_RowSum[sPrev]++;
}

// === (6) Use runtime Laplace ? (G_MC_Alpha) ===
var MC_prob(int s,int t){
  var num = (var)MC_Count[MC_IDX(s,t)] + G_MC_Alpha;
  var den = (var)MC_RowSum[s] + G_MC_Alpha*MC_STATES;
  if(den<=0) return 1.0/MC_STATES;
  return num/den;
}

// === (6) one-pass PBull + Entropy
void MC_rowStats(int s, var* outPBull, var* outEntropy) {
  if(s<0){
    if(outPBull) *outPBull=0.5;
    if(outEntropy) *outEntropy=1.0;
    return;
  }
  int t;
  var Z=0, pBull=0;
  for(t=1;t<MC_STATES;t++){
    var p=MC_prob(s,t);
    Z+=p;
    if(MC_isBull(t)) pBull+=p;
  }
  if(Z<=0){
    if(outPBull) *outPBull=0.5;
    if(outEntropy) *outEntropy=1.0;
    return;
  }
  var H=0;
  for(t=1;t<MC_STATES;t++){
    var p = MC_prob(s,t)/Z;
    if(p>0) H += -p*log(p);
  }
  var Hmax = log(MC_STATES-1);
  if(Hmax<=0) H = 0; else H = H/Hmax;
  if(outPBull) *outPBull = pBull/Z;
  if(outEntropy) *outEntropy = H;
}

// ==================== NEW: Multi-TF Markov extensions ====================
// We keep the legacy MC_* as the HTF (1H) chain via aliases:
#define MH_Count MC_Count
#define MH_RowSum MC_RowSum
#define MH_Prev MC_Prev
#define MH_Cur MC_Cur
#define MH_PBullNext MC_PBullNext
#define MH_Entropy MC_Entropy

// ---------- 5M (LTF) Markov ----------
static int* ML_Count;  // [MC_STATES*MC_STATES]
static int* ML_RowSum; // [MC_STATES]
static int ML_Prev = -1;
static int ML_Cur = 0;
static var ML_PBullNext = 0.5;
static var ML_Entropy = 0.0;

// ---------- Relation Markov (links 5M & 1H) ----------
#define MR_STATES MC_STATES
static int* MR_Count;  // [MR_STATES*MC_STATES]
static int* MR_RowSum; // [MR_STATES]
static int MR_Prev = -1;
static int MR_Cur = 0;
static var MR_PBullNext = 0.5;
static var MR_Entropy = 0.0;

// Relation state mapping (agreement only)
int MC_relFromHL(int sL, int sH) /* sL, sH in [0..122], 0 = none
                                    return in [0..122], 0 = no-agreement */ {
  if(sL <= 0 || sH <= 0) return MC_NONE;
  int idxL = (sL - 1)/2; int bullL = ((sL - 1)%2)==1;
  int idxH = (sH - 1)/2; int bullH = ((sH - 1)%2)==1;
  if(idxL == idxH && bullL == bullH) return sL; // same shared state
  return MC_NONE; // no-agreement bucket
}

// Small helpers reused for all three chains
void MC_update_any(int* C, int* R, int sPrev, int sCur) {
  if(sPrev<0) return;
  C[MC_IDX(sPrev,sCur)]++;
  R[sPrev]++;
}

// Ultra-safe row stats for any Markov matrix (Zorro lite-C friendly)
void MC_rowStats_any(int* C, int* R, int s, var alpha, var* outPBull, var* outEntropy)
{
  // Defaults
  if(outPBull)   *outPBull   = 0.5;
  if(outEntropy) *outEntropy = 1.0;

  // Guards
  if(!C || !R) return;
  if(!(alpha > 0)) alpha = 1.0;        // also catches NaN/INF
  if(s <= 0 || s >= MC_STATES) return; // ignore NONE(0) and OOB

  // Row must have observations
  {
    int rs = R[s];
    if(rs <= 0) return;
  }

  // Precompute safe row slice
  int STATES = MC_STATES;
  int NN     = STATES * STATES;

  int rowBase = s * STATES;
  if(rowBase < 0 || rowBase > NN - STATES) return; // paranoid bound
  int* Crow = C + rowBase;

  // Denominator with Laplace smoothing
  var den = (var)R[s] + alpha * (var)STATES;
  if(!(den > 0)) return;

  // Pass 1: mass and bull mass
  var Z = 0.0, pBull = 0.0;
  int t;
  for(t = 1; t < STATES; t++){
    var num = (var)Crow[t] + alpha;
    var p   = num / den;
    Z += p;
    if(MC_isBull(t)) pBull += p;
  }
  if(!(Z > 0)) return;

  // Pass 2: normalized entropy
  var H = 0.0;
  var Hmax = log((var)(STATES - 1));
  if(!(Hmax > 0)) Hmax = 1.0;

  for(t = 1; t < STATES; t++){
    var num = (var)Crow[t] + alpha;
    var p   = (num / den) / Z;
    if(p > 0) H += -p*log(p);
  }

  if(outPBull)   *outPBull   = pBull / Z;
  if(outEntropy) *outEntropy = H / Hmax;
}

// --------------- 5M chain (every 5-minute bar) ---------------
void updateMarkov_5M()
{
  // arrays must exist
  if(!ML_Count || !ML_RowSum) return;

  // compute LTF candlestick state
  static var CDL_L[MC_NPAT];
  buildCDL_TA61(CDL_L, 0);
  int s = MC_stateFromCDL(CDL_L, G_MC_ACT);   // 0..MC_STATES-1 (0 = NONE)

  // debug/guard: emit when state is NONE or out of range (no indexing yet)
  if(s <= 0 || s >= MC_STATES) printf("\n[MC] skip s=%d (Bar=%d)", s, Bar);

  // update transitions once we have enough history
  if(Bar > LookBack) MC_update_any(ML_Count, ML_RowSum, ML_Prev, s);
  ML_Prev = s;

  // only compute stats when s is a valid, in-range state and the row has mass
  if(s > 0 && s < MC_STATES){
    if(ML_RowSum[s] > 0)
      MC_rowStats_any(ML_Count, ML_RowSum, s, G_MC_Alpha, &ML_PBullNext, &ML_Entropy);
    ML_Cur = s;    // keep last valid state; do not overwrite on NONE
  }
  // else: leave ML_Cur unchanged (sticky last valid)
}

// --------------- 1H chain (only when an H1 bar closes) ---------------
void updateMarkov_1H()
{
  // arrays must exist
  if(!MC_Count || !MC_RowSum) return;

  // switch to 1H timeframe for the patterns
  int saveTF = TimeFrame;
  TimeFrame  = TF_H1;

  static var CDL_H[MC_NPAT];
  buildCDL_TA61(CDL_H, 0);
  int sH = MC_stateFromCDL(CDL_H, G_MC_ACT);  // 0..MC_STATES-1

  // debug/guard: emit when state is NONE or out of range (no indexing yet)
  if(sH <= 0 || sH >= MC_STATES) printf("\n[MC] skip sH=%d (Bar=%d)", sH, Bar);

  if(Bar > LookBack) MC_update(MH_Prev, sH);
  MH_Prev = sH;

  // only compute stats when sH is valid and its row has mass
  if(sH > 0 && sH < MC_STATES){
    if(MH_RowSum[sH] > 0)
      MC_rowStats(sH, &MH_PBullNext, &MH_Entropy); // HTF uses legacy helper
    MH_Cur = sH;   // keep last valid HTF state
  }
  // else: leave MH_Cur unchanged

  // restore original timeframe
  TimeFrame = saveTF;
}

// --------------- Relation chain (agreement-only between 5M & 1H) ---------------
void updateMarkov_REL()
{
  // arrays must exist
  if(!MR_Count || !MR_RowSum) return;

  // relation state from current LTF state and last HTF state
  int r = MC_relFromHL(ML_Cur, MH_Cur);  // 0 = no agreement / none

  // debug/guard: emit when relation is NONE or out of range (no indexing yet)
  if(r <= 0 || r >= MC_STATES) printf("\n[MC] skip r=%d (Bar=%d)", r, Bar);

  if(Bar > LookBack) MC_update_any(MR_Count, MR_RowSum, MR_Prev, r);
  MR_Prev = r;

  // only compute stats when r is valid and row has mass
  if(r > 0 && r < MC_STATES){
    if(MR_RowSum[r] > 0)
      MC_rowStats_any(MR_Count, MR_RowSum, r, G_MC_Alpha, &MR_PBullNext, &MR_Entropy);
    MR_Cur = r;    // keep last valid relation state
  }
  // else: leave MR_Cur unchanged
}

// ================= HARMONIC D-TREE ENGINE =================
// ---------- utils ----------
var randsign(){ return ifelse(random(1) < 0.5, -1.0, 1.0); }
var mapUnit(var u,var lo,var hi){
  if(u<-1) u=-1;
  if(u>1) u=1;
  var t=0.5*(u+1.0);
  return lo + t*(hi-lo);
}

// ---- safety helpers ----
var safeNum(var x) {
  if(invalid(x)) return 0;                 // 0 for NaN/INF
  return clamp(x,-1e100,1e100);            // hard-limit range
}
void sanitize(var* A,int n){
  int k; for(k=0;k<n;k++) A[k]=safeNum(A[k]);
}
var sat100(var x){ return clamp(x,-100,100); }

// ===== EQC-0: Equation-cycle angle helpers =====
var pi() { return 3.141592653589793; }
var wrapPi(var a) {
  while(a <= -pi()) a += 2.*pi();
  while(a >  pi()) a -= 2.*pi();
  return a;
}
var angDiff(var a, var b) { return wrapPi(b - a); }

// ---- small string helpers (for memory-safe logging) ----
void strlcat_safe(string dst, string src, int cap) {
  if(!dst || !src || cap <= 0) return;
  int dl = strlen(dst);
  int sl = strlen(src);
  int room = cap - 1 - dl;
  if(room <= 0){ if(cap > 0) dst[cap-1] = 0; return; }
  int i;
  for(i = 0; i < room && i < sl; i++) dst[dl + i] = src[i];
  dst[dl + i] = 0;
}
int countSubStr(string s, string sub){
  if(!s || !sub) return 0;
  int n=0; string p=s; int sublen = strlen(sub); if(sublen<=0) return 0;
  while((p=strstr(p,sub))){ n++; p += sublen; }
  return n;
}

// ---------- FIXED: use int (lite-C) and keep non-negative ----------
int djb2_hash(string s){
  int h = 5381, c, i = 0;
  if(!s) return h;
  while((c = s[i++])) h = ((h<<5)+h) ^ c; // h*33 ^ c
  return h & 0x7fffffff; // force non-negative
}

// ---- tree helpers ----
int validTreeIndex(int tid){
  if(!G_TreeIdx) return 0;
  if(tid<0||tid>=G_TreeN) return 0;
  return (G_TreeIdx[tid]!=0);
}
Node* treeAt(int tid){
  if(validTreeIndex(tid)) return G_TreeIdx[tid];
  return &G_DummyNode;
}
int safeTreeIndexFromEq(int eqi){
  int denom = ifelse(G_TreeN>0, G_TreeN, 1);
  int tid = eqi;
  if(tid < 0) tid = 0;
  if(denom > 0) tid = tid % denom;
  if(tid < 0) tid = 0;
  return tid;
}

// ---- tree indexing ----
void pushTreeNode(Node* u){
  if(G_TreeN >= G_TreeCap){
    int newCap = G_TreeCap*2;
    if(newCap < 64) newCap = 64;
    G_TreeIdx = (Node**)realloc(G_TreeIdx, newCap*sizeof(Node*));
    G_TreeCap = newCap;
    computeMemFixedBytes();
  }
  G_TreeIdx[G_TreeN++] = u;
}
void indexTreeDFS(Node* u){
  if(!u) return;
  pushTreeNode(u);
  int i;
  for(i=0;i<u->n;i++) indexTreeDFS(((Node**)u->c)[i]);
}

// ---- shrink index capacity after pruning (Fix #3) ----
void maybeShrinkTreeIdx(){
  if(!G_TreeIdx) return;
  if(G_TreeCap > 64 && G_TreeN < (G_TreeCap >> 1)){
    int newCap = (G_TreeCap >> 1);
    if(newCap < 64) newCap = 64;
    G_TreeIdx = (Node**)realloc(G_TreeIdx, newCap*sizeof(Node*));
    G_TreeCap = newCap;
    computeMemFixedBytes();
  }
}

// ---- depth LUT helper (upgrade #1) ----
void refreshDepthW() {
  if(!G_DepthW) return;
  int d; for(d=0; d<DEPTH_LUT_SIZE; d++) G_DepthW[d] = 1.0 / pow(d+1, G_DTreeExp);
  G_DepthExpLast = G_DTreeExp;
}

// ---- tree create/eval (with pool & LUT upgrades) ----
Node* createNode(int depth) {
  Node* u = poolAllocNode();
  if(!u) return 0;
  u->v = random();
  u->r = 0.01 + 0.02*depth + random(0.005);
  u->d = depth;
  if(depth > 0){
    u->n = 1 + (int)random(MAX_BRANCHES);
    u->c = malloc(u->n * sizeof(void*));
    if(!u->c){ u->n = 0; u->c = 0; return u; }
    int i;
    for(i=0;i<u->n;i++){
      Node* child = createNode(depth - 1);
      ((Node**)u->c)[i] = child;
    }
  } else {
    u->n = 0; u->c = 0;
  }
  return u;
}

var evaluateNode(Node* u) {
  if(!u) return 0;
  var sum = 0; int i;
  for(i=0;i<u->n;i++) sum += evaluateNode(((Node**)u->c)[i]);
  if(G_DepthExpLast < 0 || abs(G_DTreeExp - G_DepthExpLast) > 1e-9) refreshDepthW();
  var phase = sin(u->r * Bar + sum);
  var weight = G_DepthW[u->d];
  u->v = (1 - weight)*u->v + weight*phase;
  return u->v;
}

int countNodes(Node* u){
  if(!u) return 0;
  int c=1,i;
  for(i=0;i<u->n;i++) c += countNodes(((Node**)u->c)[i]);
  return c;
}

void freeTree(Node* u) {
  if(!u) return;
  int i;
  for(i=0;i<u->n;i++) freeTree(((Node**)u->c)[i]);
  if(u->c) free(u->c);
  poolFreeNode(u);
}

// =========== NETWORK STATE & COEFFICIENTS ===========
var* G_State;
var* G_Prev;
var* G_StateSq = 0;
i16* G_Adj;
fvar* G_RP;
fvar* G_Z;
i8* G_Mode;

fvar* G_WSelf;
fvar* G_WN1;
fvar* G_WN2;
fvar* G_WGlob1;
fvar* G_WGlob2;
fvar* G_WMom;
fvar* G_WTree;
fvar* G_WAdv;

fvar* A1x; fvar* A1lam; fvar* A1mean; fvar* A1E; fvar* A1P; fvar* A1i; fvar* A1c;
fvar* A2x; fvar* A2lam; fvar* A2mean; fvar* A2E; fvar* A2P; fvar* A2i; fvar* A2c;
fvar* G1mean; fvar* G1E; fvar* G2P; fvar* G2lam;

fvar* G_TreeTerm;

i16* G_TopEq;
fvar* G_TopW;

i16* G_EqTreeId;
fvar* TAlpha;
fvar* TBeta;

fvar* G_PropRaw;
fvar* G_Prop;

// --- Per-equation hit-rate (EW average of 1-bar directional correctness) ---
#define HIT_ALPHA 0.02
#define HIT_EPS 0.0001
fvar* G_HitEW;  // [N] 0..1 EW hit-rate
int*  G_HitN;   // [N] # of scored comparisons
fvar* G_AdvPrev; // [N] previous bar's advisor output (-1..+1)
var   G_Ret1 = 0; // realized 1-bar return for scoring

// ===== Markov features exposed to DTREE (HTF) =====
var G_MCF_PBull;   // 0..1
var G_MCF_Entropy; // 0..1
var G_MCF_State;   // 0..122

// ===== EQC-1: Equation-cycle globals =====
var* G_EqTheta = 0; // [G_N] fixed angle per equation on ring (0..2?)
int  G_LeadEq = -1; // last bar's leader eq index
var  G_LeadTh = 0;  // leader's angle
var  G_CycPh = 0;   // wrapped cumulative phase (-?..?)
var  G_CycSpd = 0;  // smoothed angular speed (?? EMA)

// epoch/context & feedback
int G_Epoch = 0;
int G_CtxID = 0;
var G_FB_A = 0.7;
var G_FB_B = 0.3;

// ---------- predictability ----------
var nodePredictability(Node* t) {
  if(!t) return 0.5;
  var disp = 0; int n = t->n, i, cnt = 0;
  if(t->c){
    for(i=0;i<n;i++){
      Node* c = ((Node**)t->c)[i];
      if(c){ disp += abs(c->v - t->v); cnt++; }
    }
    if(cnt > 0) disp /= cnt;
  }
  var depthFac = 1.0/(1 + t->d);
  var rateBase = 0.01 + 0.02*t->d;
  var rateFac = exp(-25.0*abs(t->r - rateBase));
  var p = 0.5*(depthFac + rateFac);
  p = 0.5*p + 0.5*(1.0 + (-disp));
  if(p<0) p=0; if(p>1) p=1;
  return p;
}

var nodeImportance(Node* u) {
  if(!u) return 0;
  var amp = abs(u->v); if(amp>1) amp=1;
  var p = nodePredictability(u);
  var depthW = 1.0/(1.0 + u->d);
  var imp = (0.6*p + 0.4*amp) * depthW;
  return imp;
}

// ====== Elastic growth helpers ======
Node* createLeafDepth(int d) {
  Node* u = poolAllocNode();
  if(!u) return 0;
  u->v = random();
  u->r = 0.01 + 0.02*d + random(0.005);
  u->n = 0;
  u->c = 0;
  return u;
}

void growSelectiveAtDepth(Node* u, int frontierDepth, int addK) {
  if(!u) return;
  if(u->d == frontierDepth){
    int want = addK; if(want <= 0) return;
    int oldN = u->n; int newN = oldN + want;
    Node** Cnew = (Node**)malloc(newN * sizeof(void*));
    if(oldN>0 && u->c) memcpy(Cnew, u->c, oldN*sizeof(void*));
    int i;
    for(i=oldN;i<newN;i++) Cnew[i] = createLeafDepth(frontierDepth-1);
    if(u->c) free(u->c);
    u->c = Cnew;
    u->n = newN;
    return;
  }
  int j;
  for(j=0;j<u->n;j++) growSelectiveAtDepth(((Node**)u->c)[j], frontierDepth, addK);
}

void freeChildAt(Node* parent, int idx) {
  if(!parent || !parent->c) return;
  Node** C = (Node**)parent->c;
  freeTree(C[idx]);
  int i;
  for(i=idx+1;i<parent->n;i++) C[i-1] = C[i];
  parent->n--;
  if(parent->n==0){ free(parent->c); parent->c=0; }
}

void pruneSelectiveAtDepth(Node* u, int targetDepth, int keepK) {
  if(!u) return;
  if(u->d == targetDepth-1 && u->n > 0){
    int n = u->n, i, kept = 0;
    int mark[16]; for(i=0;i<16;i++) mark[i]=0;
    int iter;
    for(iter=0; iter<keepK && iter<n; iter++){
      int bestI = -1; var bestImp = -1;
      for(i=0;i<n;i++){
        if(i<16 && mark[i]==1) continue;
        var imp = nodeImportance(((Node**)u->c)[i]);
        if(imp > bestImp){ bestImp = imp; bestI = i; }
      }
      if(bestI>=0 && bestI<16){ mark[bestI]=1; kept++; }
    }
    for(i=n-1;i>=0;i--) if(i<16 && mark[i]==0) freeChildAt(u,i);
    return;
  }
  int j; for(j=0;j<u->n;j++) pruneSelectiveAtDepth(((Node**)u->c)[j], targetDepth, keepK);
}

// ----- refresh fixed ring angles per equation (0..2?) -----
void refreshEqAngles() {
  if(!G_EqTheta){ G_EqTheta = (var*)malloc(G_N*sizeof(var)); }
  int i; var twoPi = 2.*pi(); var denom = ifelse(G_TreeN>0,(var)G_TreeN,1.0);
  for(i=0;i<G_N;i++){
    int tid = safeTreeIndexFromEq(G_EqTreeId[i]);
    var u = ((var)tid)/denom; // 0..1
    G_EqTheta[i] = twoPi*u;   // 0..2?
  }
}

// ---------- reindex (sizes pred cache; + refresh angles) ----------
void reindexTreeAndMap() {
  G_TreeN = 0;
  indexTreeDFS(Root);
  if(G_TreeN<=0){ G_TreeN=1; if(G_TreeIdx) G_TreeIdx[0]=Root; }
  { int i; for(i=0;i<G_N;i++) G_EqTreeId[i] = (i16)(i % G_TreeN); }

  G_PredLen = G_TreeN; if(G_PredLen <= 0) G_PredLen = 1;
  if(G_PredLen > G_PredCap){
    if(G_PredNode) free(G_PredNode);
    G_PredNode = (var*)malloc(G_PredLen*sizeof(var));
    G_PredCap = G_PredLen;
  }
  G_PredCacheBar = -1;

  // NEW: compute equation ring angles after mapping
  refreshEqAngles();
  maybeShrinkTreeIdx();
  recalcTreeBytes();
}

// ====== Accuracy sentinel & elastic-depth controller ======
void acc_update(var x /*lambda*/, var y /*gamma*/) {
  var a = 0.01; // ~100-bar half-life
  ACC_mx = (1-a)*ACC_mx + a*x;
  ACC_my = (1-a)*ACC_my + a*y;
  ACC_mx2 = (1-a)*ACC_mx2 + a*(x*x);
  ACC_my2 = (1-a)*ACC_my2 + a*(y*y);
  ACC_mxy = (1-a)*ACC_mxy + a*(x*y);
  var vx = ACC_mx2 - ACC_mx*ACC_mx;
  var vy = ACC_my2 - ACC_my*ACC_my;
  var cv = ACC_mxy - ACC_mx*ACC_my;
  if(vx>0 && vy>0) G_AccCorr = cv / sqrt(vx*vy);
  else G_AccCorr = 0;
  if(!G_HaveBase){ G_AccBase = G_AccCorr; G_HaveBase = 1; }
}

var util_now() {
  int mb = mem_mb_est();
  var mem_pen = 0;
  if(mb > MEM_BUDGET_MB) mem_pen = (mb - MEM_BUDGET_MB)/(var)MEM_BUDGET_MB;
  else mem_pen = 0;
  return G_AccCorr - 0.5*mem_pen;
}

int apply_grow_step() {
  int mb = mem_mb_est();
  if(G_RT_TreeMaxDepth >= MAX_DEPTH) return 0;
  if(mb > MEM_BUDGET_MB - 2*MEM_HEADROOM_MB) return 0;
  int newFrontier = G_RT_TreeMaxDepth;
  growSelectiveAtDepth(Root, newFrontier, KEEP_CHILDREN_HI);
  G_RT_TreeMaxDepth++;
  reindexTreeAndMap();
  printf("\n[EDC] Grew depth to %i (est %i MB)", G_RT_TreeMaxDepth, mem_mb_est());
  return 1;
}

void revert_last_grow() {
  pruneSelectiveAtDepth((Node*)Root, G_RT_TreeMaxDepth, 0);
  G_RT_TreeMaxDepth--;
  reindexTreeAndMap();
  printf("\n[EDC] Reverted growth to %i (est %i MB)", G_RT_TreeMaxDepth, mem_mb_est());
}

void edc_runtime() {
  if((Bar % DEPTH_TUNE_BARS) == 0){
    var U0 = util_now();
    var trial = clamp(G_DTreeExp + G_DTreeExpDir*G_DTreeExpStep, 0.8, 2.0);
    var old = G_DTreeExp;
    G_DTreeExp = trial;
    if(util_now() + 0.005 < U0){
      G_DTreeExp = old;
      G_DTreeExpDir = -G_DTreeExpDir;
    }
  }
  int mb = mem_mb_est();
  if(G_TunePending){
    if(Bar - G_TuneStartBar >= TUNE_DELAY_BARS){
      G_UtilAfter = util_now();
      var eps = 0.01;
      if(G_UtilAfter + eps < G_UtilBefore){ revert_last_grow(); }
      else { printf("\n[EDC] Growth kept (U: %.4f -> %.4f)", G_UtilBefore, G_UtilAfter); }
      G_TunePending = 0;
      G_TuneAction = 0;
    }
    return;
  }
  if( (Bar % DEPTH_TUNE_BARS)==0 && mb <= MEM_BUDGET_MB - 2*MEM_HEADROOM_MB && G_RT_TreeMaxDepth < MAX_DEPTH ){
    G_UtilBefore = util_now();
    if(apply_grow_step()){
      G_TunePending = 1;
      G_TuneAction = 1;
      G_TuneStartBar = Bar;
    }
  }
}

// Builds "Log\\Alpha12_eq_###.csv" into outName (must be >=64)
void buildEqFileName(int idx, char* outName /*>=64*/) {
  strcpy(outName, "Log\\Alpha12_eq_");
  string idxs = strf("%03i", idx);
  strcat(outName, idxs);
  strcat(outName, ".csv");
}

// ===== consolidated EQ log =====
void writeEqHeaderOnce() {
  static int done=0; if(done) return; done=1;
  file_append("Log\\Alpha12_eq_all.csv",
    "Bar,Epoch,Ctx,EqCount,i,n1,n2,TreeId,Depth,Rate,Pred,Adv,Prop,Mode,WAdv,WTree,PBull,Entropy,MCState,ExprLen,ExprHash,tanhN,sinN,cosN\n");
}

void appendEqMetaLine(
  int bar, int epoch, int ctx,
  int i, int n1, int n2, int tid, int depth, var rate, var pred, var adv, var prop, int mode,
  var wadv, var wtree, var pbull, var ent, int mcstate, string expr)
{
  if(i >= LOG_EQ_SAMPLE) return;

  // Lightweight expression stats (safe if expr == 0)
  int eLen = 0, eHash = 0, cT = 0, cS = 0, cC = 0;
  if(expr){
    eLen  = (int)strlen(expr);
    eHash = (int)djb2_hash(expr);
    cT    = countSubStr(expr,"tanh(");
    cS    = countSubStr(expr,"sin(");
    cC    = countSubStr(expr,"cos(");
  } else {
    eHash = (int)djb2_hash("");
  }

  // One trimmed CSV line; order matches writeEqHeaderOnce()
  file_append("Log\\Alpha12_eq_all.csv",
    strf("%i,%i,%i,%i,%i,%i,%i,%i,%i,%.4f,%.4f,%.4f,%.4f,%i,%.3f,%.3f,%.4f,%.4f,%i,%i,%i,%i,%i,%i\n",
      bar, epoch, ctx, NET_EQNS, i, n1, n2, tid, depth,
      rate, pred, adv, prop, mode, wadv, wtree, pbull, ent,
      mcstate, eLen, eHash, cT, cS, cC));
}

// --------- allocation ----------
void randomizeRP() {
  int K=G_K,N=G_N,k,j;
  for(k=0;k<K;k++)
    for(j=0;j<N;j++)
      G_RP[k*N+j] = ifelse(random(1) < 0.5, -1.0, 1.0);
}

// === (8/9) Use effective K + per-bar guard ===
int G_ProjBar = -1;
int G_ProjK = -1;

int keffClamped(){
  int K = G_Keff;
  if(K < 0) K = 0;
  if(K > G_K) K = G_K;
  return K;
}

void computeProjection()
{
  if(!G_RP || !G_Z || !G_StateSq) return;

  int K = keffClamped();
  if(G_ProjBar == Bar && G_ProjK == K) return;

  int N = G_N, k, j;
  for(k = 0; k < K; k++){
    var acc = 0;
    for(j = 0; j < N; j++) acc += (var)G_RP[k*N + j] * G_StateSq[j];
    G_Z[k] = (fvar)acc;
  }
  G_ProjBar = Bar;
  G_ProjK   = K;
}

// D) Compact allocate/free
void allocateNet() {
  int N = G_N, D = G_D, K = G_K;

  // core
  G_State = (var*)malloc(N*sizeof(var));
  G_Prev  = (var*)malloc(N*sizeof(var));
  G_StateSq = (var*)malloc(N*sizeof(var));

  // graph / projection
  G_Adj = (i16*) malloc(N*D*sizeof(i16));
  G_RP  = (fvar*)malloc(K*N*sizeof(fvar));
  G_Z   = (fvar*)malloc(K*sizeof(fvar));
  G_Mode= (i8*)  malloc(N*sizeof(i8));

  // weights & params
  G_WSelf  = (fvar*)malloc(N*sizeof(fvar));
  G_WN1    = (fvar*)malloc(N*sizeof(fvar));
  G_WN2    = (fvar*)malloc(N*sizeof(fvar));
  G_WGlob1 = (fvar*)malloc(N*sizeof(fvar));
  G_WGlob2 = (fvar*)malloc(N*sizeof(fvar));
  G_WMom   = (fvar*)malloc(N*sizeof(fvar));
  G_WTree  = (fvar*)malloc(N*sizeof(fvar));
  G_WAdv   = (fvar*)malloc(N*sizeof(fvar));

  A1x = (fvar*)malloc(N*sizeof(fvar));
  A1lam=(fvar*)malloc(N*sizeof(fvar));
  A1mean=(fvar*)malloc(N*sizeof(fvar));
  A1E=(fvar*)malloc(N*sizeof(fvar));
  A1P=(fvar*)malloc(N*sizeof(fvar));
  A1i=(fvar*)malloc(N*sizeof(fvar));
  A1c=(fvar*)malloc(N*sizeof(fvar));

  A2x = (fvar*)malloc(N*sizeof(fvar));
  A2lam=(fvar*)malloc(N*sizeof(fvar));
  A2mean=(fvar*)malloc(N*sizeof(fvar));
  A2E=(fvar*)malloc(N*sizeof(fvar));
  A2P=(fvar*)malloc(N*sizeof(fvar));
  A2i=(fvar*)malloc(N*sizeof(fvar));
  A2c=(fvar*)malloc(N*sizeof(fvar));

  G1mean=(fvar*)malloc(N*sizeof(fvar));
  G1E   =(fvar*)malloc(N*sizeof(fvar));
  G2P   =(fvar*)malloc(N*sizeof(fvar));
  G2lam =(fvar*)malloc(N*sizeof(fvar));

  TAlpha=(fvar*)malloc(N*sizeof(fvar));
  TBeta =(fvar*)malloc(N*sizeof(fvar));

  G_TreeTerm=(fvar*)malloc(N*sizeof(fvar));

  G_TopEq=(i16*) malloc(N*sizeof(i16));
  G_TopW=(fvar*)malloc(N*sizeof(fvar));

  G_PropRaw=(fvar*)malloc(N*sizeof(fvar));
  G_Prop   =(fvar*)malloc(N*sizeof(fvar));

  if(LOG_EXPR_TEXT) G_Sym = (string*)malloc(N*sizeof(char*));
  else G_Sym = 0;

  // tree index
  G_TreeCap = 128;
  G_TreeIdx = (Node**)malloc(G_TreeCap*sizeof(Node*));
  G_TreeN = 0;
  G_EqTreeId = (i16*)malloc(N*sizeof(i16));

  // initialize adjacency
  { int t; for(t=0; t<N*D; t++) G_Adj[t] = -1; }

  // initialize state and parameters
  {
    int i;
    for(i=0;i<N;i++){
      G_State[i] = random();
      G_Prev[i]  = G_State[i];
      G_StateSq[i]= G_State[i]*G_State[i];
      G_Mode[i] = 0;

      G_WSelf[i]=0.5; G_WN1[i]=0.2; G_WN2[i]=0.2;
      G_WGlob1[i]=0.1; G_WGlob2[i]=0.1; G_WMom[i]=0.05;
      G_WTree[i]=0.15; G_WAdv[i]=0.15;

      A1x[i]=1; A1lam[i]=0.1; A1mean[i]=0; A1E[i]=0; A1P[i]=0; A1i[i]=0; A1c[i]=0;
      A2x[i]=1; A2lam[i]=0.1; A2mean[i]=0; A2E[i]=0; A2P[i]=0; A2i[i]=0; A2c[i]=0;

      G1mean[i]=1.0; G1E[i]=0.001;
      G2P[i]=0.6; G2lam[i]=0.3;

      TAlpha[i]=0.8; TBeta[i]=25.0;

      G_TreeTerm[i]=0;

      G_TopEq[i]=-1; G_TopW[i]=0;

      G_PropRaw[i]=1; G_Prop[i]=1.0/G_N;

      if(LOG_EXPR_TEXT){
        G_Sym[i] = (char*)malloc(EXPR_MAXLEN);
        if(G_Sym[i]) strcpy(G_Sym[i],"");
      }
    }
  }

  // --- Hit-rate state ---
  G_HitEW = (fvar*)malloc(N*sizeof(fvar));
  G_HitN  = (int*) malloc(N*sizeof(int));
  G_AdvPrev = (fvar*)malloc(N*sizeof(fvar));
  { int i; for(i=0;i<N;i++){ G_HitEW[i] = 0.5; G_HitN[i] = 0; G_AdvPrev[i] = 0; } }

  computeMemFixedBytes();

  if(G_PredNode) free(G_PredNode);
  G_PredLen = G_TreeN; if(G_PredLen<=0) G_PredLen=1;
  G_PredNode = (var*)malloc(G_PredLen*sizeof(var));
  G_PredCap  = G_PredLen;
  G_PredCacheBar = -1;
}

void freeNet() {
  int i;
  if(G_State)free(G_State);
  if(G_Prev)free(G_Prev);
  if(G_StateSq)free(G_StateSq);
  if(G_Adj)free(G_Adj);
  if(G_RP)free(G_RP);
  if(G_Z)free(G_Z);
  if(G_Mode)free(G_Mode);

  if(G_WSelf)free(G_WSelf);
  if(G_WN1)free(G_WN1);
  if(G_WN2)free(G_WN2);
  if(G_WGlob1)free(G_WGlob1);
  if(G_WGlob2)free(G_WGlob2);
  if(G_WMom)free(G_WMom);
  if(G_WTree)free(G_WTree);
  if(G_WAdv)free(G_WAdv);

  if(A1x)free(A1x); if(A1lam)free(A1lam); if(A1mean)free(A1mean);
  if(A1E)free(A1E); if(A1P)free(A1P); if(A1i)free(A1i); if(A1c)free(A1c);

  if(A2x)free(A2x); if(A2lam)free(A2lam); if(A2mean)free(A2mean);
  if(A2E)free(A2E); if(A2P)free(A2P); if(A2i)free(A2i); if(A2c)free(A2c);

  if(G1mean)free(G1mean); if(G1E)free(G1E);
  if(G2P)free(G2P); if(G2lam)free(G2lam);
  if(TAlpha)free(TAlpha); if(TBeta)free(TBeta);

  if(G_TreeTerm)free(G_TreeTerm);

  if(G_TopEq)free(G_TopEq);
  if(G_TopW)free(G_TopW);

  if(G_EqTreeId)free(G_EqTreeId);
  if(G_PropRaw)free(G_PropRaw);
  if(G_Prop)free(G_Prop);

  if(G_Sym){
    for(i=0;i<G_N;i++) if(G_Sym[i]) free(G_Sym[i]);
    free(G_Sym);
  }
  if(G_TreeIdx)free(G_TreeIdx);

  if(G_PredNode)free(G_PredNode);
  if(G_EqTheta) free(G_EqTheta); // NEW: free ring angles
}

// --------- DTREE feature builders ----------
var nrm_s(var x) { return sat100(100.*tanh(x)); }
var nrm_scl(var x, var s) { return sat100(100.*tanh(s*x)); }

void buildEqFeatures(int i, var lambda, var mean, var energy, var power, var pred, var* S /*ADV_EQ_NF*/) {
  int tid = safeTreeIndexFromEq(G_EqTreeId[i]);
  Node* t = treeAt(tid);

  // equation-cycle alignment
  var th_i = ifelse(G_EqTheta!=0, G_EqTheta[i], 0);
  var dphi = angDiff(G_CycPh, th_i);
  var alignC = cos(dphi); // +1 aligned, -1 opposite
  var alignS = sin(dphi); // quadrature

  S[0]  = nrm_s(G_State[i]);
  S[1]  = nrm_s(mean);
  S[2]  = nrm_scl(power,0.05);
  S[3]  = nrm_scl(energy,0.01);
  S[4]  = nrm_s(lambda);
  S[5]  = sat100(200.0*(pred-0.5));
  S[6]  = sat100(200.0*((var)t->d/MAX_DEPTH)-100.0);
  S[7]  = sat100(1000.0*t->r);
  S[8]  = nrm_s(G_TreeTerm[i]);
  S[9]  = sat100( (200.0/3.0) * (var)( (int)G_Mode[i] ) - 100.0 );

  // HTF (1H)
  S[10] = sat100(200.0*(G_MCF_PBull-0.5));
  S[11] = sat100(200.0*(G_MCF_Entropy-0.5));

  S[12] = sat100(200.0*((var)G_HitEW[i] - 0.5));
  S[13] = sat100(100.*alignC);
  S[14] = sat100(100.*alignS);

  // NEW: 5M & Relation Markov features
  S[15] = sat100(200.0*(ML_PBullNext - 0.5)); // 5M PBull
  S[16] = sat100(200.0*(ML_Entropy   - 0.5)); // 5M Entropy
  S[17] = sat100(200.0*(MR_PBullNext - 0.5)); // Relation PBull
  S[18] = sat100(200.0*(MR_Entropy   - 0.5)); // Relation Entropy
  sanitize(S,ADV_EQ_NF);
}

void buildPairFeatures(int i,int j, var lambda, var mean, var energy, var power, var* P /*ADV_PAIR_NF*/) {
  int tid_i = safeTreeIndexFromEq(G_EqTreeId[i]);
  int tid_j = safeTreeIndexFromEq(G_EqTreeId[j]);
  Node* ti = treeAt(tid_i);
  Node* tj = treeAt(tid_j);
  var predi = predByTid(tid_i);
  var predj = predByTid(tid_j);

  P[0]=nrm_s(G_State[i]);
  P[1]=nrm_s(G_State[j]);
  P[2]=sat100(200.0*((var)ti->d/MAX_DEPTH)-100.0);
  P[3]=sat100(200.0*((var)tj->d/MAX_DEPTH)-100.0);
  P[4]=sat100(1000.0*ti->r);
  P[5]=sat100(1000.0*tj->r);
  P[6]=sat100(abs(P[2]-P[3]));
  P[7]=sat100(abs(P[4]-P[5]));
  P[8]=sat100(100.0*(predi+predj-1.0));
  P[9]=nrm_s(lambda);
  P[10]=nrm_s(mean);
  P[11]=nrm_scl(power,0.05);
  sanitize(P,ADV_PAIR_NF);
}

// --- Safe neighbor helpers & adjacency sanitizer ---
int adjSafe(int i, int d){
  int N = G_N, D = G_D;
  if(!G_Adj || N <= 1 || D <= 0) return 0;
  if(d < 0) d = 0;
  if(d >= D) d = d % D;
  int v = G_Adj[i*D + d];
  if(v < 0 || v >= N || v == i) v = (i + 1) % N;
  return v;
}

void sanitizeAdjacency(){
  if(!G_Adj) return;
  int N = G_N, D = G_D, i, d;
  for(i=0;i<N;i++){
    for(d=0; d<D; d++){
      i16 *p = &G_Adj[i*D + d];
      if(*p < 0 || *p >= N || *p == i){
        int r = (int)random(N);
        if(r == i) r = (r+1) % N;
        *p = (i16)r;
      }
    }
    if(D >= 2 && G_Adj[i*D+0] == G_Adj[i*D+1]){
      int r2 = (G_Adj[i*D+1] + 1) % N;
      if(r2 == i) r2 = (r2+1) % N;
      G_Adj[i*D+1] = (i16)r2;
    }
  }
}

// --------- advisor helpers (NEW) ----------
var adviseSeed(int i, var lambda, var mean, var energy, var power) {
  static int seedBar = -1;
  static int haveSeed[NET_EQNS];
  static var seedVal[NET_EQNS];

  if(seedBar != Bar){
    int k; for(k=0;k<NET_EQNS;k++) haveSeed[k] = 0;
    seedBar = Bar;
  }
  if(i < 0) i = 0; if(i >= NET_EQNS) i = i % NET_EQNS;

  if(!allowAdvise(i)) return 0;
  if(!haveSeed[i]){
    seedVal[i] = adviseEq(i, lambda, mean, energy, power); // trains (once) in Train mode
    haveSeed[i] = 1;
  }
  return seedVal[i];
}

var mix01(var a, int salt){
  var z = sin(123.456*a + 0.001*salt) + cos(98.765*a + 0.002*salt);
  return tanh(0.75*z);
}

// --------- advise wrappers (single-equation only) ----------
var adviseEq(int i, var lambda, var mean, var energy, var power) {
  if(!allowAdvise(i)) return 0;
  if(is(INITRUN)) return 0;

  int tight = (mem_mb_est() >= MEM_BUDGET_MB - MEM_HEADROOM_MB);
  if(tight) return 0;

  if(G_HitN[i] > 32){
    var h = (var)G_HitEW[i];
    var gate = 0.40 + 0.15*(1.0 - MC_Entropy);
    if(h < gate){
      if(random() >= 0.5) return 0;
    }
  }

  int tid = safeTreeIndexFromEq(G_EqTreeId[i]);
  var pred = predByTid(tid);

  var S[ADV_EQ_NF];
  buildEqFeatures(i,lambda,mean,energy,power,pred,S);

  var obj = 0;
  if(Train){
    obj = sat100(100.0*tanh(0.6*lambda + 0.4*mean));
    var prior = 0.75 + 0.5*((var)G_HitEW[i] - 0.5); // 0.5..1.0
    obj *= prior;

    // --- EQC-5: cycle priors (reward aligned & non-stalled rotation)
    { var th_i = ifelse(G_EqTheta!=0, G_EqTheta[i], 0);
      var dphi = angDiff(G_CycPh, th_i);
      var align = 0.90 + 0.20*(0.5*(cos(dphi)+1.0));
      var spdOK = 0.90 + 0.20*clamp(abs(G_CycSpd)/(0.15), 0., 1.);
      obj *= align * spdOK;
    }
  }

  int objI = (int)obj;
  var a = adviseLong(DTREE, objI, S, ADV_EQ_NF);
  return a/100.;
}

var advisePair(int i,int j, var lambda, var mean, var energy, var power) {
  return 0;
}

// --------- heuristic pair scoring ----------
var scorePairSafe(int i, int j, var lambda, var mean, var energy, var power) {
  int ti = safeTreeIndexFromEq(G_EqTreeId[i]);
  int tj = safeTreeIndexFromEq(G_EqTreeId[j]);
  Node *ni = treeAt(ti), *nj = treeAt(tj);

  var simD = 1.0 / (1.0 + abs((var)ni->d - (var)nj->d));
  var dr = 50.0*abs(ni->r - nj->r);
  var simR = 1.0 / (1.0 + dr);

  var predi = predByTid(ti);
  var predj = predByTid(tj);
  var pred  = 0.5*(predi + predj);

  var score = 0.5*pred + 0.3*simD + 0.2*simR;
  return 2.0*score - 1.0;
}

// --------- adjacency selection (heuristic only) ----------
void rewireAdjacency_DTREE(var lambda, var mean, var energy, var power) {
  int N=G_N, D=G_D, i, d, c, best, cand;
  for(i=0;i<N;i++){
    for(d=0; d<D; d++){
      var bestScore = -2; best = -1;
      for(c=0;c<G_CandNeigh;c++){
        cand = (int)random(N);
        if(cand==i) continue;

        // avoid duplicate neighbors
        int clash=0, k;
        for(k=0;k<d;k++){
          int prev = G_Adj[i*D+k];
          if(prev>=0 && prev==cand){ clash=1; break; }
        }
        if(clash) continue;

        var s = scorePairSafe(i,cand,lambda,mean,energy,power);
        if(s > bestScore){ bestScore=s; best=cand; }
      }
      if(best<0){
        do{ best = (int)random(N);} while(best==i);
      }
      G_Adj[i*D + d] = (i16)best;
    }
  }
}

// --------- DTREE-created coefficients, modes & proportions ----------
var mapA(var a,var lo,var hi){ return mapUnit(a,lo,hi); }

void synthesizeEquationFromDTREE(int i, var lambda, var mean, var energy, var power) {
  var seed = adviseSeed(i,lambda,mean,energy,power);

  G_Mode[i] = (int)(abs(1000*seed)) & 3;

  G_WSelf[i]  = (fvar)mapA(mix01(seed, 11), 0.15, 0.85);
  G_WN1[i]    = (fvar)mapA(mix01(seed, 12), 0.05, 0.35);
  G_WN2[i]    = (fvar)mapA(mix01(seed, 13), 0.05, 0.35);
  G_WGlob1[i] = (fvar)mapA(mix01(seed, 14), 0.05, 0.30);
  G_WGlob2[i] = (fvar)mapA(mix01(seed, 15), 0.05, 0.30);
  G_WMom[i]   = (fvar)mapA(mix01(seed, 16), 0.02, 0.15);
  G_WTree[i]  = (fvar)mapA(mix01(seed, 17), 0.05, 0.35);
  G_WAdv[i]   = (fvar)mapA(mix01(seed, 18), 0.05, 0.35);

  A1x[i]   = (fvar)(randsign()*mapA(mix01(seed, 21), 0.6, 1.2));
  A1lam[i] = (fvar)(randsign()*mapA(mix01(seed, 22), 0.05,0.35));
  A1mean[i]= (fvar) mapA(mix01(seed, 23),-0.30,0.30);
  A1E[i]   = (fvar) mapA(mix01(seed, 24),-0.0015,0.0015);
  A1P[i]   = (fvar) mapA(mix01(seed, 25),-0.30,0.30);
  A1i[i]   = (fvar) mapA(mix01(seed, 26),-0.02,0.02);
  A1c[i]   = (fvar) mapA(mix01(seed, 27),-0.20,0.20);

  A2x[i]   = (fvar)(randsign()*mapA(mix01(seed, 31), 0.6, 1.2));
  A2lam[i] = (fvar)(randsign()*mapA(mix01(seed, 32), 0.05,0.35));
  A2mean[i]= (fvar) mapA(mix01(seed, 33),-0.30,0.30);
  A2E[i]   = (fvar) mapA(mix01(seed, 34),-0.0015,0.0015);
  A2P[i]   = (fvar) mapA(mix01(seed, 35),-0.30,0.30);
  A2i[i]   = (fvar) mapA(mix01(seed, 36),-0.02,0.02);
  A2c[i]   = (fvar) mapA(mix01(seed, 37),-0.20,0.20);

  G1mean[i] = (fvar) mapA(mix01(seed, 41), 0.4, 1.6);
  G1E[i]    = (fvar) mapA(mix01(seed, 42),-0.004,0.004);
  G2P[i]    = (fvar) mapA(mix01(seed, 43), 0.1, 1.2);
  G2lam[i]  = (fvar) mapA(mix01(seed, 44), 0.05, 0.7);

  TAlpha[i] = (fvar) mapA(mix01(seed, 51), 0.3, 1.5);
  TBeta[i]  = (fvar) mapA(mix01(seed, 52), 6.0, 50.0);

  G_PropRaw[i] = (fvar)(0.01 + 0.99*(0.5*(seed+1.0)));

  { // reliability boost
    var boost = 0.75 + 0.5*(var)G_HitEW[i];
    G_PropRaw[i] = (fvar)((var)G_PropRaw[i] * boost);
  }
}

void normalizeProportions() {
  int N=G_N,i;
  var s=0;
  for(i=0;i<N;i++) s += G_PropRaw[i];
  if(s<=0) {
    for(i=0;i<N;i++) G_Prop[i] = (fvar)(1.0/N);
    return;
  }
  for(i=0;i<N;i++) G_Prop[i] = (fvar)(G_PropRaw[i]/s);
}

// H) dtreeTerm gets predictabilities on demand
var dtreeTerm(int i, int* outTopEq, var* outTopW) {
  int N=G_N,j;

  int tid_i = safeTreeIndexFromEq(G_EqTreeId[i]);
  Node* ti=treeAt(tid_i);
  int di=ti->d; var ri=ti->r;
  var predI = predByTid(tid_i);

  var alpha=TAlpha[i], beta=TBeta[i];

  var sumw=0, acc=0, bestW=-1; int bestJ=-1;

  for(j=0;j<N;j++){
    if(j==i) continue;
    int tid_j = safeTreeIndexFromEq(G_EqTreeId[j]);
    Node* tj=treeAt(tid_j);
    int dj=tj->d; var rj=tj->r;
    var predJ = predByTid(tid_j);

    var w = exp(-alpha*abs(di-dj)) * exp(-beta*abs(ri-rj));
    var predBoost = 0.5 + 0.5*(predI*predJ);
    var propBoost = 0.5 + 0.5*( (G_Prop[i] + G_Prop[j]) );
    w *= predBoost * propBoost;

    var pairAdv = scorePairSafe(i,j,0,0,0,0);
    var pairBoost = 0.75 + 0.25*(0.5*(pairAdv+1.0));
    w *= pairBoost;

    sumw += w;
    acc  += w*G_State[j];
    if(w>bestW){bestW=w; bestJ=j;}
  }

  if(outTopEq) *outTopEq = bestJ;
  if(outTopW)  *outTopW  = ifelse(sumw>0, bestW/sumw, 0);
  if(sumw>0) return acc/sumw;
  return 0;
}

// --------- expression builder (capped & optional) ----------
void buildSymbolicExpr(int i, int n1, int n2) {
  if(LOG_EXPR_TEXT){
    string s = G_Sym[i];
    s[0]=0;

    string a1 = strf("(%.3f*x[%i] + %.3f*lam + %.3f*mean + %.5f*E + %.3f*P + %.3f*i + %.3f)",
                     (var)A1x[i], n1, (var)A1lam[i], (var)A1mean[i], (var)A1E[i], (var)A1P[i], (var)A1i[i], (var)A1c[i]);
    string a2 = strf("(%.3f*x[%i] + %.3f*lam + %.3f*mean + %.5f*E + %.3f*P + %.3f*i + %.3f)",
                     (var)A2x[i], n2, (var)A2lam[i], (var)A2mean[i], (var)A2E[i], (var)A2P[i], (var)A2i[i], (var)A2c[i]);

    strlcat_safe(s, "x[i]_next = ", EXPR_MAXLEN);
    strlcat_safe(s, strf("%.3f*x[i] + ", (var)G_WSelf[i]), EXPR_MAXLEN);

    if(G_Mode[i]==1){
      strlcat_safe(s, strf("%.3f*tanh%s + ", (var)G_WN1[i], a1), EXPR_MAXLEN);
      strlcat_safe(s, strf("%.3f*sin%s + ",  (var)G_WN2[i], a2), EXPR_MAXLEN);
    } else if(G_Mode[i]==2){
      strlcat_safe(s, strf("%.3f*cos%s + ",  (var)G_WN1[i], a1), EXPR_MAXLEN);
      strlcat_safe(s, strf("%.3f*tanh%s + ", (var)G_WN2[i], a2), EXPR_MAXLEN);
    } else {
      strlcat_safe(s, strf("%.3f*sin%s + ",  (var)G_WN1[i], a1), EXPR_MAXLEN);
      strlcat_safe(s, strf("%.3f*cos%s + ",  (var)G_WN2[i], a2), EXPR_MAXLEN);
    }

    strlcat_safe(s, strf("%.3f*tanh(%.3f*mean + %.5f*E) + ",
                         (var)G_WGlob1[i], (var)G1mean[i], (var)G1E[i]), EXPR_MAXLEN);
    strlcat_safe(s, strf("%.3f*sin(%.3f*P + %.3f*lam) + ",
                         (var)G_WGlob2[i], (var)G2P[i], (var)G2lam[i]), EXPR_MAXLEN);
    strlcat_safe(s, strf("%.3f*(x[i]-x_prev[i]) + ", (var)G_WMom[i]), EXPR_MAXLEN);
    strlcat_safe(s, strf("Prop[i]=%.4f; ", (var)G_Prop[i]), EXPR_MAXLEN);
    strlcat_safe(s, strf("%.3f*DT(i) + ", (var)G_WTree[i]), EXPR_MAXLEN);
    strlcat_safe(s, strf("%.3f*DTREE(i)", (var)G_WAdv[i]), EXPR_MAXLEN);
  }
}

// ======================= NEW: Range builders for chunked rewires =======================

// Rewire adjacency for i in [i0..i1), keeps others unchanged
void rewireAdjacency_DTREE_range(int i0,int i1, var lambda, var mean, var energy, var power) {
  int N=G_N, D=G_D, i, d, c, best, cand;
  if(i0<0) i0=0; if(i1>N) i1=N;

  for(i=i0;i<i1;i++){
    for(d=0; d<D; d++){
      var bestScore = -2; best = -1;
      for(c=0;c<G_CandNeigh;c++){
        cand = (int)random(N);
        if(cand==i) continue;

        int clash=0, k;
        for(k=0;k<d;k++){
          int prev = G_Adj[i*D+k];
          if(prev>=0 && prev==cand){ clash=1; break; }
        }
        if(clash) continue;

        var s = scorePairSafe(i,cand,lambda,mean,energy,power);
        if(s > bestScore){ bestScore=s; best=cand; }
      }
      if(best<0){
        do{ best = (int)random(N);} while(best==i);
      }
      G_Adj[i*D + d] = (i16)best;
    }
  }
}

// Synthesize equations only for [i0..i1)
void synthesizeEquation_range(int i0,int i1, var lambda, var mean, var energy, var power) {
  int i; if(i0<0) i0=0; if(i1>G_N) i1=G_N;
  for(i=i0;i<i1;i++) synthesizeEquationFromDTREE(i,lambda,mean,energy,power);
}

// Build expr text only for [i0..i1) — guarded at runtime for lite-C compatibility
void buildSymbolicExpr_range(int i0,int i1) {
  if(!LOG_EXPR_TEXT) return;  // 0 = omit; 1 = build
  int i; if(i0<0) i0=0; if(i1>G_N) i1=G_N;
  for(i=i0;i<i1;i++){
    int n1 = adjSafe(i,0);
    int n2 = ifelse(G_D >= 2, adjSafe(i,1), n1);
    buildSymbolicExpr(i,n1,n2);
  }
}

// ======================= NEW: Rolling rewire cursor state =======================
int G_RewirePos = 0;     // next equation index to process
int G_RewirePasses = 0;  // #completed full passes since start
int G_RewireBatch = REWIRE_BATCH_EQ_5M; // effective batch for this bar

// ======================= NEW: Rolling cursor for heavy per-bar updates =======================
int G_UpdatePos = 0;     // next equation index to do heavy work
int G_UpdatePasses = 0;  // #completed full heavy passes

// ======================= NEW: Chunked rewire orchestrator =======================
// Run part of a rewire: only a slice of equations this bar.
// Returns 1 if a full pass just completed (we can normalize), else 0.
int rewireEpochChunk(var lambda, var mean, var energy, var power, int batch) {
  int N = G_N;
  if(N <= 0) return 0;
  if(batch < REWIRE_MIN_BATCH) batch = REWIRE_MIN_BATCH;

  if(G_RewirePos >= N) G_RewirePos = 0;
  int i0 = G_RewirePos;
  int i1 = i0 + batch; if(i1 > N) i1 = N;

  // Adapt neighbor breadth by entropy (your original heuristic)
  G_CandNeigh = ifelse(MC_Entropy < 0.45, CAND_NEIGH+4, CAND_NEIGH);

  // Rewire only the target slice
  rewireAdjacency_DTREE_range(i0,i1, lambda,mean,energy,power);
  sanitizeAdjacency(); // cheap; can keep global
  synthesizeEquation_range(i0,i1, lambda,mean,energy,power);
  buildSymbolicExpr_range(i0,i1);

  // advance cursor
  G_RewirePos = i1;

  // Full pass finished?
  if(G_RewirePos >= N){
    G_RewirePos = 0;
    G_RewirePasses += 1;
    return 1;
  }
  return 0;
}

// ---------- one-time rewire init (call central reindex) ----------
void rewireInit() {
  randomizeRP();
  computeProjection();
  reindexTreeAndMap(); // ensures G_PredNode sized before any use
}

// ----------------------------------------------------------------------
// I) Trim rewireEpoch -> now used for one-shot/initialization full pass
// ----------------------------------------------------------------------
void rewireEpoch(var lambda, var mean, var energy, var power) {
  // Backward compatibility: do one full pass immediately
  int done = 0;
  while(!done){
    done = rewireEpochChunk(lambda,mean,energy,power, REWIRE_BATCH_EQ_H1);
  }
  // After full pass, normalize proportions once (exact)
  normalizeProportions();

  // Context hash (unchanged)
  {
    int D = G_D, i, total = G_N * D;
    unsigned int h = 2166136261u;
    for(i=0;i<total;i++){
      unsigned int x = (unsigned int)G_Adj[i];
      h ^= x + 0x9e3779b9u + (h<<6) + (h>>2);
    }
    G_CtxID = (int)((h ^ ((unsigned int)G_Epoch<<8)) & 0x7fffffff);
  }
}

// coarse projection-based driver for gamma
var projectNet() {
  int N=G_N,i;
  var sum=0,sumsq=0,cross=0;
  for(i=0;i<N;i++){
    sum+=G_State[i];
    sumsq+=G_State[i]*G_State[i];
    if(i+1<N) cross+=G_State[i]*G_State[i+1];
  }
  var mean=sum/N, corr=cross/(N-1);
  return 0.6*tanh(mean + 0.001*sumsq) + 0.4*sin(corr);
}

// ----------------------------------------------------------------------
// J) Heavy per-bar update slice (uses rolling G_UpdatePos cursor)
// ----------------------------------------------------------------------
var f_affine(var x, var lam, var mean, var E, var P, var i, var c){
  return x + lam*m

Consensus Gate Orchestrator (continue) [Re: TipmyPip] #488925
09/27/25 10:05
09/27/25 10:05
Joined: Sep 2017
Posts: 164
TipmyPip Online OP
Member
TipmyPip  Online OP
Member

Joined: Sep 2017
Posts: 164
continuation... cool

Code
// ----------------------------------------------------------------------
// J) Heavy per-bar update slice (uses rolling G_UpdatePos cursor)
// ----------------------------------------------------------------------
var f_affine(var x, var lam, var mean, var E, var P, var i, var c){
  return x + lam*mean + E + P + i + c; // small helper used inside nonlins
}

var nonlin1(int i, int n1, var lam, var mean, var E, var P){
  var x   = G_State[n1];
  var arg = (var)A1x[i]*x + (var)A1lam[i]*lam + (var)A1mean[i]*mean + (var)A1E[i]*E + (var)A1P[i]*P + (var)A1i[i]*i + (var)A1c[i];
  return arg;
}
var nonlin2(int i, int n2, var lam, var mean, var E, var P){
  var x   = G_State[n2];
  var arg = (var)A2x[i]*x + (var)A2lam[i]*lam + (var)A2mean[i]*mean + (var)A2E[i]*E + (var)A2P[i]*P + (var)A2i[i]*i + (var)A2c[i];
  return arg;
}

// returns 1 if a full heavy-update pass finishes, else 0
int heavyUpdateChunk(var lambda, var mean, var energy, var power, int batch){
  int N = G_N;
  if(N <= 0) return 0;
  if(batch < UPDATE_MIN_BATCH) batch = UPDATE_MIN_BATCH;

  if(G_UpdatePos >= N) G_UpdatePos = 0;
  int i0 = G_UpdatePos;
  int i1 = i0 + batch; if(i1 > N) i1 = N;

  // projection may be reused by multiple chunks within the same bar
  computeProjection();

  int i;
  for(i=i0;i<i1;i++){
    // --- neighbors (safe) ---
    int n1 = adjSafe(i,0);
    int n2 = ifelse(G_D>=2, adjSafe(i,1), n1);

    // --- DTREE ensemble term (also returns top meta) ---
    int topEq = -1; var topW = 0;
    var treeT = dtreeTerm(i, &topEq, &topW);
    G_TreeTerm[i] = (fvar)treeT;

    G_TopEq[i] = (i16)topEq;
    G_TopW[i]  = (fvar)topW;

    // --- advisor (data-driven) ---
    var adv = adviseEq(i, lambda, mean, energy, power);

    // --- nonlinear pair terms controlled by Mode ---
    var a1 = nonlin1(i,n1,lambda,mean,energy,power);
    var a2 = nonlin2(i,n2,lambda,mean,energy,power);
    var t1, t2;
    if(G_Mode[i]==1){        t1 = tanh(a1); t2 = sin(a2);
    } else if(G_Mode[i]==2){ t1 = cos(a1);  t2 = tanh(a2);
    } else {                 t1 = sin(a1);  t2 = cos(a2); }

    // --- global couplings & momentum ---
    var glob1 = tanh( (var)G1mean[i]*mean + (var)G1E[i]*energy );
    var glob2 = sin ( (var)G2P[i]*power + (var)G2lam[i]*lambda );
    var mom   = (G_State[i] - G_Prev[i]);

    // --- next state synthesis ---
    var xnext =
        (var)G_WSelf[i]*G_State[i]
      + (var)G_WN1[i]*t1
      + (var)G_WN2[i]*t2
      + (var)G_WGlob1[i]*glob1
      + (var)G_WGlob2[i]*glob2
      + (var)G_WMom[i]*mom
      + (var)G_WTree[i]*treeT
      + (var)G_WAdv[i]*adv;

    // --- stability clamp & book-keeping ---
    xnext = clamp(xnext, -10, 10);
    G_Prev[i] = G_State[i];
    G_State[i]= xnext;
    G_StateSq[i] = xnext*xnext;

    // --- keep last advisor output for hit-rate scoring next bar ---
    G_AdvPrev[i] = (fvar)adv;

    // --- lightweight per-eq meta logging (sampled) ---
    if(!G_LogsOff && (Bar % LOG_EVERY)==0 && (i < LOG_EQ_SAMPLE)){
      int tid = safeTreeIndexFromEq(G_EqTreeId[i]);
      Node* tnode = treeAt(tid);
      int nodeDepth = 0;
      if(tnode) nodeDepth = tnode->d;
      var rate = (var)TBeta[i]; // any per-eq scalar to inspect quickly
      var pred = predByTid(tid);

      // last parameter must be a string (avoid ternary; lite-C friendly)
      string expr = 0;
      if(LOG_EXPR_TEXT){
        if(G_Sym) expr = G_Sym[i];
        else expr = 0;
      }

      appendEqMetaLine(Bar, G_Epoch, G_CtxID,
        i, n1, n2, tid, nodeDepth, rate, pred, adv, G_Prop[i], (int)G_Mode[i],
        (var)G_WAdv[i], (var)G_WTree[i], G_MCF_PBull, G_MCF_Entropy, (int)G_MCF_State,
        expr);
    }
  }

  // advance rolling cursor
  G_UpdatePos = i1;

  // full pass completed?
  if(G_UpdatePos >= N){
    G_UpdatePos = 0;
    G_UpdatePasses += 1;
    return 1;
  }
  return 0;
}

// ----------------------------------------------------------------------
// K) Cycle tracker: pick leader eq on ring and update phase/speed
// ----------------------------------------------------------------------
void updateEquationCycle() {
  if(!G_EqTheta){ G_CycPh = wrapPi(G_CycPh); return; }

  // Leader = argmax Prop[i]
  int i, bestI = 0; var bestP = -1;
  for(i=0;i<G_N;i++){
    var p = (var)G_Prop[i];
    if(p > bestP){ bestP = p; bestI = i; }
  }
  var th = ifelse(G_EqTheta != 0, G_EqTheta[bestI], 0);

  // angular speed (wrapped diff)
  var d = angDiff(G_LeadTh, th);
  // EW smoothing for speed
  G_CycSpd = 0.9*G_CycSpd + 0.1*d;

  // integrate phase, keep wrapped
  G_CycPh = wrapPi( G_CycPh + G_CycSpd );

  G_LeadEq = bestI;
  G_LeadTh = th;
}

// ----------------------------------------------------------------------
// L) Markov orchestration per bar (5m every bar; 1H & Relation on close)
// ----------------------------------------------------------------------
int is_H1_close(){ return (Bar % TF_H1) == 0; }

void updateAllMarkov(){
	 // Don’t touch Markov until all chains are allocated
   if(!MC_Count || !MC_RowSum || !ML_Count || !ML_RowSum || !MR_Count || !MR_RowSum)
       return;
  // low TF always
  updateMarkov_5M();

  // on 1H close, refresh HTF & relation
  if(is_H1_close()){
    updateMarkov_1H();
    updateMarkov_REL();
  }

  // expose HTF features to DTREE (the legacy MC_* are HTF via MH_*)
  G_MCF_PBull   = MH_PBullNext;
  G_MCF_Entropy = MH_Entropy;
  G_MCF_State   = MH_Cur;
}

// ----------------------------------------------------------------------
// M) Rewire scheduler (chunked): decide batch and normalize periodically
// ----------------------------------------------------------------------
void maybeRewireNow(var lambda, var mean, var energy, var power){
  int mb = mem_mb_est();

  // Near budget? shrink batch or skip
  if(mb >= UPDATE_MEM_HARD) return;

  // choose batch by bar type
  int batch = ifelse(is_H1_close(), REWIRE_BATCH_EQ_H1, REWIRE_BATCH_EQ_5M);

  // soften by memory
  if(mb >= REWIRE_MEM_SOFT) batch = (batch>>1);
  if(batch < REWIRE_MIN_BATCH) batch = REWIRE_MIN_BATCH;

  int finished = rewireEpochChunk(lambda,mean,energy,power,batch);

  // Normalize proportions after completing a full pass (and every REWIRE_NORM_EVERY passes)
  if(finished && (G_RewirePasses % REWIRE_NORM_EVERY) == 0){
    normalizeProportions();

    // write a header once and roll context id every META_EVERY full passes
    writeEqHeaderOnce();
    if((G_RewirePasses % META_EVERY) == 0){
      // refresh context hash using adjacency (same as in rewireEpoch)
      int D = G_D, i, total = G_N * D;
      unsigned int h = 2166136261u;
      for(i=0;i<total;i++){
        unsigned int x = (unsigned int)G_Adj[i];
        h ^= x + 0x9e3779b9u + (h<<6) + (h>>2);
      }
      G_CtxID = (int)((h ^ ((unsigned int)G_Epoch<<8)) & 0x7fffffff);
    }
  }
}

// ----------------------------------------------------------------------
// N) Heavy update scheduler (chunked) for each bar
// ----------------------------------------------------------------------
void runHeavyUpdates(var lambda, var mean, var energy, var power){
  int mb = mem_mb_est();

  // Near hard ceiling? skip heavy work this bar
  if(mb >= UPDATE_MEM_HARD) return;

  int batch = ifelse(is_H1_close(), UPDATE_BATCH_EQ_H1, UPDATE_BATCH_EQ_5M);
  if(mb >= UPDATE_MEM_SOFT) batch = (batch>>1);
  if(batch < UPDATE_MIN_BATCH) batch = UPDATE_MIN_BATCH;

  heavyUpdateChunk(lambda,mean,energy,power,batch);
}

// ----------------------------------------------------------------------
// O) Hit-rate scorer (EW average of 1-bar directional correctness)
// ----------------------------------------------------------------------
void updateHitRates(){
  if(is(INITRUN)) return;
  if(Bar <= LookBack) return;

  int i;
  var r = G_Ret1; // realized 1-bar return provided by outer loop
  var sgnR = sign(r);

  for(i=0;i<G_N;i++){
    var a = (var)G_AdvPrev[i]; // last bar's advisor score (-1..+1)
    var sgnA = ifelse(a > HIT_EPS, 1, ifelse(a < -HIT_EPS, -1, 0));
    var hit = ifelse(sgnR == 0, 0.5, ifelse(sgnA == sgnR, 1.0, 0.0));
    G_HitEW[i] = (fvar)((1.0 - HIT_ALPHA)*(var)G_HitEW[i] + HIT_ALPHA*hit);
    G_HitN[i] += 1;
  }
}

// ----------------------------------------------------------------------
// P) Lambda/Gamma blend & accuracy sentinel
// ----------------------------------------------------------------------
var blendLambdaGamma(var lambda_raw, var gamma_raw){
  // adapt blend weight a bit with entropy: more uncertainty -> lean on gamma
  var w = clamp(G_FB_W + 0.15*(0.5 - G_MCF_Entropy), 0.4, 0.9);
  var x = w*lambda_raw + (1.0 - w)*gamma_raw;
  acc_update(lambda_raw, gamma_raw);
  return x;
}

// ----------------------------------------------------------------------
// Q) Per-bar orchestrator (no orders here; main run() will call this)
// ----------------------------------------------------------------------
// ----------------------------------------------------------------------
// Q) Per-bar orchestrator (no orders here; main run() will call this)
// Hardened for warmup & init guards (safe if called before LookBack)
// ----------------------------------------------------------------------
void alpha12_step(var ret1_now /*1-bar realized return for scoring*/)
{
  // If somehow called before init completes, do nothing
  if(!ALPHA12_READY) return;

  // 1) Markov update & expose HTF features (always safe)
  updateAllMarkov();

  // --- Warmup guard ---------------------------------------------------
  // Some environments (or external callers) may invoke this before LookBack.
  // In warmup we only maintain projections and exit; no rewires/heavy updates.
  if(Bar < LookBack){
    // keep projection cache alive if arrays exist
    computeProjection();
    G_Ret1 = ret1_now;  // harmless bookkeeping for scorer
    // optionally adapt MC threshold very slowly even in warmup
    {
      var h = 0.5;
      int i; var acc = 0;
      for(i=0;i<G_N;i++) acc += (var)G_HitEW[i];
      if(G_N > 0) h = acc/(var)G_N;

      var target = MC_ACT
                 + 0.15*(0.55 - h)
                 + 0.10*(G_MCF_Entropy - 0.5);
      target = clamp(target, 0.20, 0.50);
      G_MC_ACT = 0.95*G_MC_ACT + 0.05*target;
    }
    return; // <-- nothing heavy before LookBack
  }
  // --------------------------------------------------------------------

  // 2) Compute lambda from current projection snapshot
  var lambda = 0.0;
  {
    computeProjection();

    int K = keffClamped();   // clamped effective projection dimension
    int k;
    var e   = 0;
    var pwr = 0;

    for(k = 0; k < K; k++){
      var z = (var)G_Z[k];
      e   += z;
      pwr += z*z;
    }

    var mean   = 0;
    var energy = pwr; // total energy = sum of squares
    var power  = 0;

    if(K > 0){
      mean  = e   / (var)K;
      power = pwr / (var)K;
    }

    // local "lambda" = trend proxy mixing price-like aggregates
    lambda = 0.7*tanh(mean) + 0.3*tanh(0.05*power);

    // 3) Maybe rewire a slice this bar (uses same features)
    maybeRewireNow(lambda, mean, energy, power);

    // 4) Heavy updates for a slice
    runHeavyUpdates(lambda, mean, energy, power);
  }

  // 5) Gamma from coarse network projection (stable, uses whole state)
  var gamma = projectNet();

  // 6) Blend & store accuracy sentinel
  var x = blendLambdaGamma(lambda, gamma);

  // 7) Update ring / equation-cycle tracker
  updateEquationCycle();

  // 8) Score previous advisors against realized 1-bar return
  G_Ret1 = ret1_now;
  updateHitRates();

  // 9) Depth manager & elastic growth controller (memory-aware)
  depth_manager_runtime();
  edc_runtime();

  // 10) Adapt MC acceptance threshold by hit-rate/entropy
  {
    var h = 0.0;
    int i;
    for(i = 0; i < G_N; i++) h += (var)G_HitEW[i];
    if(G_N > 0) h /= (var)G_N; else h = 0.5;

    var target = MC_ACT
               + 0.15*(0.55 - h)
               + 0.10*(G_MCF_Entropy - 0.5);
    target = clamp(target, 0.20, 0.50);
    G_MC_ACT = 0.9*G_MC_ACT + 0.1*target;
  }

  // silence unused warning if trading block is removed
  x = x;
}

// ==================== Part 4/4 — Runtime, Trading, Init/Cleanup ====================

// ---- globals used by Part 4
var G_LastSig = 0;   // blended lambda/gamma used for trading view
int G_LastBarTraded = -1;

// ---- small guards for optional plotting
void plotSafe(string name, var v){
  if(ENABLE_PLOTS && !G_ChartsOff) plot(name, v, NEW|LINE, 0);
}

// ---- lite-C compatible calloc replacement ----
void* xcalloc(int count, int size)  // removed 'static' (lite-C)
{
  int bytes = count*size;
  void* p = malloc(bytes);
  if(p) memset(p,0,bytes);
  else  quit("Alpha12: OOM in xcalloc");
  return p;
}

// ======================= Markov alloc/free =======================
void allocMarkov()
{
  int NN = MC_STATES*MC_STATES;
  int bytesMat = NN*sizeof(int);
  int bytesRow = MC_STATES*sizeof(int);

  // --- HTF (1H) chain (legacy MC_*) ---
  MC_Count = (int*)malloc(bytesMat);
  MC_RowSum= (int*)malloc(bytesRow);
  if(!MC_Count || !MC_RowSum) quit("Alpha12: OOM in allocMarkov(MC)");
  memset(MC_Count, 0, bytesMat);
  memset(MC_RowSum, 0, bytesRow);

  // --- LTF (5M) chain ---
  ML_Count = (int*)malloc(bytesMat);
  ML_RowSum= (int*)malloc(bytesRow);
  if(!ML_Count || !ML_RowSum) quit("Alpha12: OOM in allocMarkov(ML)");
  memset(ML_Count, 0, bytesMat);
  memset(ML_RowSum, 0, bytesRow);

  // --- Relation chain (links 5M & 1H) ---
  MR_Count = (int*)malloc(bytesMat);
  MR_RowSum= (int*)malloc(MR_STATES*sizeof(int)); // MR_STATES == MC_STATES
  if(!MR_Count || !MR_RowSum) quit("Alpha12: OOM in allocMarkov(MR)");
  memset(MR_Count, 0, bytesMat);
  memset(MR_RowSum, 0, MR_STATES*sizeof(int));

  // --- initial states & defaults ---
  MC_Prev = MH_Prev = -1; MC_Cur = MH_Cur = 0;
  ML_Prev = -1;           ML_Cur = 0;
  MR_Prev = -1;           MR_Cur = 0;

  MC_PBullNext = 0.5; MC_Entropy = 1.0;
  ML_PBullNext = 0.5; ML_Entropy = 1.0;
  MR_PBullNext = 0.5; MR_Entropy = 1.0;
}

void freeMarkov(){
  if(MC_Count) free(MC_Count);
  if(MC_RowSum)free(MC_RowSum);
  if(ML_Count) free(ML_Count);
  if(ML_RowSum)free(ML_RowSum);
  if(MR_Count) free(MR_Count);
  if(MR_RowSum)free(MR_RowSum);
  MC_Count=MC_RowSum=ML_Count=ML_RowSum=MR_Count=MR_RowSum=0;
}

// ======================= Alpha12 init / cleanup =======================
void Alpha12_init()
{
   if(ALPHA12_READY) return;

  // 1) Session context first
  asset(ASSET_SYMBOL);
  BarPeriod = BAR_PERIOD;
  set(PLOTNOW); // plotting gated by ENABLE_PLOTS at call sites

  // 2) Warmup window
  LookBack = max(300, NWIN);

  // 3) Clamp effective projection size and reset projection cache
  if(G_Keff < 1)  G_Keff = 1;
  if(G_Keff > G_K) G_Keff = G_K;
  G_ProjBar = -1;
  G_ProjK   = -1;

  // 4) Core allocations
  allocateNet();
  allocMarkov();

  // 5) Depth LUT + initial tree + indexing
  if(!G_DepthW) G_DepthW = (var*)malloc(DEPTH_LUT_SIZE*sizeof(var));
  if(!Root)     Root = createNode(MAX_DEPTH);
  G_RT_TreeMaxDepth = MAX_DEPTH;
  refreshDepthW();
  reindexTreeAndMap(); // sizes pred cache & ring angles

  // 6) Bootstrap: RP, projection, one full rewire pass (also sets proportions & CtxID)
  rewireInit();
  computeProjection();
  rewireEpoch(0,0,0,0);

  // 7) Logging header once
  writeEqHeaderOnce();

  // 8) Reset rolling cursors / exposed Markov defaults
  G_RewirePos = 0;   G_RewirePasses = 0;
  G_UpdatePos = 0;   G_UpdatePasses = 0;
  G_MCF_PBull = 0.5; G_MCF_Entropy  = 1.0; G_MCF_State = 0;

  // 9) Done
  ALPHA12_READY = 1;
  printf("\n[Alpha12] init done: N=%i D=%i K=%i (Keff=%i) Depth=%i est=%i MB",
         G_N, G_D, G_K, G_Keff, G_RT_TreeMaxDepth, mem_mb_est());
}

void Alpha12_cleanup(){
  freeMarkov();
  if(Root){ freeTree(Root); Root=0; }
  freeNodePool();
  if(G_DepthW){ free(G_DepthW); G_DepthW=0; }
  freeNet();
  ALPHA12_READY = 0;
}

// ======================= Helpers for realized 1-bar return =======================
var realizedRet1(){
  // Basic 1-bar return proxy from close series
  vars C = series(priceClose());
  if(Bar <= LookBack) return 0;
  return C[0] - C[1];
}

// ======================= Trading gate =======================
// Combines blended network signal with Markov PBull gate.
// Returns signed signal in [-1..1].
var tradeSignal(){
  // --- EARLY GUARDS ---
  if(!ALPHA12_READY) return 0;              // init not completed
  if(!G_RP || !G_Z || !G_StateSq) return 0; // projection buffers not allocated

  // Recompute a lightweight lambda/gamma snapshot for display/decisions.
  // (Alpha12_step already ran heavy ops; this is cheap.)
  computeProjection();

  int Keff = keffClamped();     // clamped effective projection size
  if(Keff <= 0) return 0;       // nothing to project yet; be safe

  int k;
  var e   = 0;
  var pwr = 0;

  for(k = 0; k < Keff; k++){
    var z = (var)G_Z[k];
    e   += z;
    pwr += z*z;
  }

  // --- NO TERNARY: explicit guards for lite-C ---
  var mean  = 0;
  var power = 0;
  if(Keff > 0){
    mean  = e   / (var)Keff;
    power = pwr / (var)Keff;
  }

  var lambda = 0.7*tanh(mean) + 0.3*tanh(0.05*power);
  var gamma  = projectNet();

  var x = blendLambdaGamma(lambda, gamma);
  G_LastSig = x;

  // Markov (HTF) directional gating (no ternaries)
  var gLong  = 0;
  var gShort = 0;
  if(G_MCF_PBull >= PBULL_LONG_TH)  gLong  = 1.0;
  if(G_MCF_PBull <= PBULL_SHORT_TH) gShort = 1.0;

  // Symmetric gate around x (no ternary)
  var s = 0;
  if(x > 0) s = x * gLong;
  else      s = x * gShort;

  // Modulate by relation chain confidence (lower entropy -> stronger)
  var conf = 1.0 - 0.5*(MR_Entropy); // 0.5..1.0 typically
  s *= conf;

  return clamp(s, -1, 1);
}

// ======================= Position sizing & risk =======================
var posSizeFromSignal(var s){
  // Simple linear sizing, capped
  var base = 1;
  var scale = 2.0 * abs(s); // 0..2
  return base * (0.5 + 0.5*scale); // 0.5..1.5 lots (example)
}

void placeOrders(var s){
  // Basic long/short logic with soft handoff
  if(s > 0){
    if(!NumOpenLong)  enterLong(posSizeFromSignal(s));
    if(NumOpenShort)  exitShort();
  } else if(s < 0){
    if(!NumOpenShort) enterShort(posSizeFromSignal(s));
    if(NumOpenLong)   exitLong();
  }
  // if s==0 do nothing (hold)
}

// ======================= Main per-bar runtime =======================
void Alpha12_bar(){
  // 1) Provide last realized return to the engine scorer
  var r1 = realizedRet1();

  // 2) Run the engine step (updates Markov, rewires slices, heavy updates, etc.)
  alpha12_step(r1);

  // 3) Build trading signal & place orders (once per bar)
  var s = tradeSignal();
  placeOrders(s);

  // 4) Plots (guarded)
  plotSafe("PBull(1H)",   100*(G_MCF_PBull-0.5));
  plotSafe("PBull(5M)",   100*(ML_PBullNext-0.5));
  plotSafe("PBull(Rel)",  100*(MR_PBullNext-0.5));
  plotSafe("Entropy(1H)", 100*(G_MCF_Entropy));
  plotSafe("Sig",         100*G_LastSig);
}  

// ---- Zorro hooks (after macros!) ----
function init(){ Alpha12_init(); }

function run()
{
  // keep it lean; do NOT change BarPeriod/asset here anymore
  if(Bar < LookBack){
    updateAllMarkov();
    return;
  }
  Alpha12_bar();
}

function cleanup(){ Alpha12_cleanup(); }

Last edited by TipmyPip; 09/27/25 10:05.
Page 1 of 11 1 2 3 10 11

Moderated by  Petra 

Powered by UBB.threads™ PHP Forum Software 7.7.1