// AutoARIMAopt.c
// Optimized Zorro lite-C version.
// Input convention:
// Close[0] = most recent price
// Close[1] = previous price
// Close[2] = older price
//
// Main optimization:
// - A reusable AUTO_ARIMA_WORK memory block is allocated once.
// - The P/Q optimization loop reuses pointers into that block.
// - aa_arma_fit_with_work() performs no malloc/free inside the model grid.
// - Result->ar and Result->ma point into Work when using auto_arima_forecast_with_work().
// They remain valid until the next forecast call with the same Work, or until Work is freed.
//
// Notes:
// - No #include needed in Zorro lite-C.
// - Uses var/var* instead of double/double*.
// - No ternary operator, no const qualifiers.
#define AUTO_ARIMA_MAX_ITER 1000
#define AUTO_ARIMA_BIG 1e100
typedef struct AUTO_ARIMA_RESULT
{
int p;
int d;
int q;
int coeffs_owned; // 1 = free_auto_arima_result owns ar/ma; 0 = Work owns ar/ma
var c;
var sse;
var aicc;
var forecast;
var* ar;
var* ma;
} AUTO_ARIMA_RESULT;
typedef struct AUTO_ARIMA_WORK
{
int NCap;
int MaxPCap;
int MaxQCap;
// differencing and final residual reconstruction
var* Diff;
var* Diff1;
var* Diff2;
var* Errors;
// candidate and best coefficient storage
var* CandAR;
var* CandMA;
var* BestAR;
var* BestMA;
// reusable ARMA optimizer buffers
var* FitAR;
var* FitMA;
var* MAr;
var* VAr;
var* MMa;
var* VMa;
var* DEdC;
var* DEdAR;
var* DEdMA;
var* GradAR;
var* GradMA;
} AUTO_ARIMA_WORK;
// ---------- small helpers ----------
int aa_max_int(int A,int B)
{
if(A > B)
return A;
return B;
}
void aa_zero_vars(var* Ptr,int Count)
{
if(Ptr == 0)
return;
if(Count <= 0)
return;
memset(Ptr,0,Count*sizeof(var));
}
void aa_copy_vars(var* Dest,vars Src,int Count)
{
if(Dest == 0)
return;
if(Src == 0)
return;
if(Count <= 0)
return;
memcpy(Dest,Src,Count*sizeof(var));
}
// ---------- memory helpers ----------
var* aa_alloc_vars(int Count)
{
if(Count <= 0)
Count = 1;
var* Ptr = malloc(Count*sizeof(var));
if(Ptr == 0) {
printf("\nAutoARIMA: memory allocation failed.");
return 0;
}
memset(Ptr,0,Count*sizeof(var));
return Ptr;
}
void aa_free_vars(var* Ptr)
{
if(Ptr != 0)
free(Ptr);
}
void init_auto_arima_result(AUTO_ARIMA_RESULT* R)
{
if(R == 0)
return;
R->p = -1;
R->d = 0;
R->q = -1;
R->coeffs_owned = 0;
R->c = 0.;
R->sse = AUTO_ARIMA_BIG;
R->aicc = AUTO_ARIMA_BIG;
R->forecast = 0.;
R->ar = 0;
R->ma = 0;
}
void free_auto_arima_result(AUTO_ARIMA_RESULT* R)
{
if(R == 0)
return;
if(R->coeffs_owned) {
aa_free_vars(R->ar);
aa_free_vars(R->ma);
}
R->ar = 0;
R->ma = 0;
R->coeffs_owned = 0;
}
void init_auto_arima_work(AUTO_ARIMA_WORK* W)
{
if(W == 0)
return;
W->NCap = 0;
W->MaxPCap = 0;
W->MaxQCap = 0;
W->Diff = 0;
W->Diff1 = 0;
W->Diff2 = 0;
W->Errors = 0;
W->CandAR = 0;
W->CandMA = 0;
W->BestAR = 0;
W->BestMA = 0;
W->FitAR = 0;
W->FitMA = 0;
W->MAr = 0;
W->VAr = 0;
W->MMa = 0;
W->VMa = 0;
W->DEdC = 0;
W->DEdAR = 0;
W->DEdMA = 0;
W->GradAR = 0;
W->GradMA = 0;
}
void free_auto_arima_work(AUTO_ARIMA_WORK* W)
{
if(W == 0)
return;
aa_free_vars(W->Diff);
aa_free_vars(W->Diff1);
aa_free_vars(W->Diff2);
aa_free_vars(W->Errors);
aa_free_vars(W->CandAR);
aa_free_vars(W->CandMA);
aa_free_vars(W->BestAR);
aa_free_vars(W->BestMA);
aa_free_vars(W->FitAR);
aa_free_vars(W->FitMA);
aa_free_vars(W->MAr);
aa_free_vars(W->VAr);
aa_free_vars(W->MMa);
aa_free_vars(W->VMa);
aa_free_vars(W->DEdC);
aa_free_vars(W->DEdAR);
aa_free_vars(W->DEdMA);
aa_free_vars(W->GradAR);
aa_free_vars(W->GradMA);
init_auto_arima_work(W);
}
// Allocates once, or reallocates only when the requested capacity grows.
// MaxP and MaxQ are exclusive upper bounds: MaxP=5 tests p=0..4.
int prepare_auto_arima_work(AUTO_ARIMA_WORK* W,int N,int MaxP,int MaxQ)
{
if(W == 0)
return 0;
if(N <= 0)
return 0;
if(MaxP <= 0)
return 0;
if(MaxQ <= 0)
return 0;
if(W->Diff != 0 && W->NCap >= N && W->MaxPCap >= MaxP && W->MaxQCap >= MaxQ)
return 1;
free_auto_arima_work(W);
int PSize = aa_max_int(MaxP,1);
int QSize = aa_max_int(MaxQ,1);
int ArGradSize = N * PSize;
int MaGradSize = N * QSize;
W->Diff = aa_alloc_vars(N);
W->Diff1 = aa_alloc_vars(N);
W->Diff2 = aa_alloc_vars(N);
W->Errors = aa_alloc_vars(N);
W->CandAR = aa_alloc_vars(PSize);
W->CandMA = aa_alloc_vars(QSize);
W->BestAR = aa_alloc_vars(PSize);
W->BestMA = aa_alloc_vars(QSize);
W->FitAR = aa_alloc_vars(PSize);
W->FitMA = aa_alloc_vars(QSize);
W->MAr = aa_alloc_vars(PSize);
W->VAr = aa_alloc_vars(PSize);
W->MMa = aa_alloc_vars(QSize);
W->VMa = aa_alloc_vars(QSize);
W->DEdC = aa_alloc_vars(N);
W->DEdAR = aa_alloc_vars(ArGradSize);
W->DEdMA = aa_alloc_vars(MaGradSize);
W->GradAR = aa_alloc_vars(PSize);
W->GradMA = aa_alloc_vars(QSize);
if(W->Diff == 0 || W->Diff1 == 0 || W->Diff2 == 0 || W->Errors == 0 ||
W->CandAR == 0 || W->CandMA == 0 || W->BestAR == 0 || W->BestMA == 0 ||
W->FitAR == 0 || W->FitMA == 0 || W->MAr == 0 || W->VAr == 0 ||
W->MMa == 0 || W->VMa == 0 || W->DEdC == 0 || W->DEdAR == 0 ||
W->DEdMA == 0 || W->GradAR == 0 || W->GradMA == 0) {
free_auto_arima_work(W);
return 0;
}
W->NCap = N;
W->MaxPCap = MaxP;
W->MaxQCap = MaxQ;
return 1;
}
// ---------- math helpers ----------
var aa_mean(vars Series,int N)
{
if(Series == 0)
return 0.;
if(N <= 0)
return 0.;
var Sum = 0.;
int i;
for(i = 0; i < N; i++)
Sum += Series[i];
return Sum / (var)N;
}
var aa_variance(vars Series,int N)
{
if(Series == 0)
return 0.;
if(N < 2)
return 0.;
var Mu = aa_mean(Series,N);
var Sum = 0.;
int i;
for(i = 0; i < N; i++) {
var E = Series[i] - Mu;
Sum += E * E;
}
return Sum / (var)N;
}
int aa_calculate_d_with_work(vars Series,int N,AUTO_ARIMA_WORK* W)
{
if(Series == 0)
return 0;
if(W == 0)
return 0;
if(N < 3)
return 0;
var Var0 = aa_variance(Series,N);
int i;
for(i = 0; i < N-1; i++)
W->Diff1[i] = Series[i] - Series[i+1];
var Var1 = aa_variance(W->Diff1,N-1);
if(Var1 > 0.5 * Var0)
return 0;
if(Var1 < 0.1 * Var0)
return 1;
for(i = 0; i < N-2; i++)
W->Diff2[i] = W->Diff1[i] - W->Diff1[i+1];
var Var2 = aa_variance(W->Diff2,N-2);
if(Var2 < 0.5 * Var1)
return 2;
return 1;
}
// Applies d-order differencing and writes chronological output:
// Diff[0] = oldest transformed value
// Diff[last] = newest transformed value
// Diff must be allocated by the caller with at least N elements.
// This version writes directly and uses no temporary allocation.
int aa_difference_series_fast(int D,vars Close,int N,vars Diff)
{
if(Close == 0)
return 0;
if(Diff == 0)
return 0;
if(N <= D)
return 0;
int i;
if(D == 0) {
for(i = 0; i < N; i++)
Diff[i] = Close[N-1-i];
return N;
}
if(D == 1) {
int M = N-1;
for(i = 0; i < M; i++)
Diff[i] = Close[M-1-i] - Close[M-i];
return M;
}
if(D == 2) {
int M2 = N-2;
int K;
for(i = 0; i < M2; i++) {
K = M2-1-i;
Diff[i] = (Close[K] - Close[K+1]) - (Close[K+1] - Close[K+2]);
}
return M2;
}
return 0;
}
var aa_aicc_score(int N,var SSE,int P,int Q)
{
int K = P + Q + 1;
if(N - K - 1 <= 0)
return AUTO_ARIMA_BIG;
if(SSE <= 0.)
return AUTO_ARIMA_BIG;
var Sigma2 = SSE / (var)N;
if(Sigma2 < 0.000000000000001)
return AUTO_ARIMA_BIG;
var AIC = (var)N * log(Sigma2) + 2. * (var)K;
var AICc = AIC + (2. * (var)K * (var)(K + 1)) / (var)(N - K - 1);
return AICc;
}
// Trains ARMA(P,Q) on chronological Series.
// This optimized version reuses Work buffers; it does not allocate memory inside the P/Q grid.
// ArOut must have at least max(1,P) elements.
// MaOut must have at least max(1,Q) elements.
int aa_arma_fit_with_work(int P,int Q,vars Series,int N,var* OutSSE,var* OutC,vars ArOut,vars MaOut,AUTO_ARIMA_WORK* W)
{
if(Series == 0)
return 0;
if(N < 2)
return 0;
if(OutSSE == 0)
return 0;
if(OutC == 0)
return 0;
if(ArOut == 0)
return 0;
if(MaOut == 0)
return 0;
if(W == 0)
return 0;
*OutSSE = 0.;
*OutC = aa_mean(Series,N);
int PSize = 1;
int QSize = 1;
int ArGradSize = 1;
int MaGradSize = 1;
if(P > 0)
PSize = P;
if(Q > 0)
QSize = Q;
if(P > 0)
ArGradSize = N * P;
if(Q > 0)
MaGradSize = N * Q;
var* Ar = W->FitAR;
var* Ma = W->FitMA;
var* MAr = W->MAr;
var* VAr = W->VAr;
var* MMa = W->MMa;
var* VMa = W->VMa;
var* Errors = W->Errors;
var* DEdC = W->DEdC;
var* DEdAR = W->DEdAR;
var* DEdMA = W->DEdMA;
var* GradAR = W->GradAR;
var* GradMA = W->GradMA;
if(Ar == 0 || Ma == 0 || MAr == 0 || VAr == 0 || MMa == 0 || VMa == 0 ||
Errors == 0 || DEdC == 0 || DEdAR == 0 || DEdMA == 0 || GradAR == 0 || GradMA == 0)
return 0;
aa_zero_vars(Ar,PSize);
aa_zero_vars(Ma,QSize);
aa_zero_vars(MAr,PSize);
aa_zero_vars(VAr,PSize);
aa_zero_vars(MMa,QSize);
aa_zero_vars(VMa,QSize);
var C = *OutC;
var Beta1 = 0.9;
var Beta2 = 0.999;
var Eps = 0.00000001;
var Eta = 0.001;
var MC = 0.;
var VC = 0.;
var PrevSSE = AUTO_ARIMA_BIG;
var Beta1Pow = 1.;
var Beta2Pow = 1.;
int Iter;
int i;
int j;
int k;
int t;
for(Iter = 1; Iter < AUTO_ARIMA_MAX_ITER; Iter++) {
var SSELocal = 0.;
aa_zero_vars(Errors,N);
aa_zero_vars(DEdC,N);
aa_zero_vars(DEdAR,ArGradSize);
aa_zero_vars(DEdMA,MaGradSize);
aa_zero_vars(GradAR,PSize);
aa_zero_vars(GradMA,QSize);
Beta1Pow *= Beta1;
Beta2Pow *= Beta2;
// Forward pass: predictions and residuals.
for(t = 1; t < N; t++) {
var Pred = C;
for(i = 0; i < P; i++) {
if(t - 1 - i >= 0)
Pred += Ar[i] * Series[t - 1 - i];
}
for(j = 0; j < Q; j++) {
if(t - 1 - j >= 0)
Pred += Ma[j] * Errors[t - 1 - j];
}
Errors[t] = Series[t] - Pred;
SSELocal += Errors[t] * Errors[t];
}
if(Iter > 1) {
if(abs(PrevSSE - SSELocal) < 0.0000000001) {
*OutSSE = SSELocal;
break;
}
}
PrevSSE = SSELocal;
// Backpropagation through the residual recursion.
for(t = 1; t < N; t++) {
DEdC[t] = -1.;
for(j = 0; j < Q; j++) {
if(t - 1 - j >= 0)
DEdC[t] -= Ma[j] * DEdC[t - 1 - j];
}
for(i = 0; i < P; i++) {
var DerAR = 0.;
if(t - 1 - i >= 0)
DerAR = -Series[t - 1 - i];
for(j = 0; j < Q; j++) {
if(t - 1 - j >= 0)
DerAR -= Ma[j] * DEdAR[(t - 1 - j) * P + i];
}
DEdAR[t * P + i] = DerAR;
}
for(j = 0; j < Q; j++) {
var DerMA = 0.;
if(t - 1 - j >= 0)
DerMA = -Errors[t - 1 - j];
for(k = 0; k < Q; k++) {
if(t - 1 - k >= 0)
DerMA -= Ma[k] * DEdMA[(t - 1 - k) * Q + j];
}
DEdMA[t * Q + j] = DerMA;
}
}
var GradC = 0.;
for(t = 1; t < N; t++) {
GradC += 2. * Errors[t] * DEdC[t];
for(i = 0; i < P; i++)
GradAR[i] += 2. * Errors[t] * DEdAR[t * P + i];
for(j = 0; j < Q; j++)
GradMA[j] += 2. * Errors[t] * DEdMA[t * Q + j];
}
GradC /= (var)N;
for(i = 0; i < P; i++)
GradAR[i] /= (var)N;
for(j = 0; j < Q; j++)
GradMA[j] /= (var)N;
// Adam update for C.
MC = Beta1 * MC + (1. - Beta1) * GradC;
VC = Beta2 * VC + (1. - Beta2) * GradC * GradC;
var MCHat = MC / (1. - Beta1Pow);
var VCHat = VC / (1. - Beta2Pow);
C -= Eta * MCHat / (sqrt(VCHat) + Eps);
// Adam update for AR coefficients.
for(i = 0; i < P; i++) {
MAr[i] = Beta1 * MAr[i] + (1. - Beta1) * GradAR[i];
VAr[i] = Beta2 * VAr[i] + (1. - Beta2) * GradAR[i] * GradAR[i];
var MHatAR = MAr[i] / (1. - Beta1Pow);
var VHatAR = VAr[i] / (1. - Beta2Pow);
Ar[i] -= Eta * MHatAR / (sqrt(VHatAR) + Eps);
Ar[i] = clamp(Ar[i],-0.99,0.99);
}
// Adam update for MA coefficients.
for(j = 0; j < Q; j++) {
MMa[j] = Beta1 * MMa[j] + (1. - Beta1) * GradMA[j];
VMa[j] = Beta2 * VMa[j] + (1. - Beta2) * GradMA[j] * GradMA[j];
var MHatMA = MMa[j] / (1. - Beta1Pow);
var VHatMA = VMa[j] / (1. - Beta2Pow);
Ma[j] -= Eta * MHatMA / (sqrt(VHatMA) + Eps);
Ma[j] = clamp(Ma[j],-0.99,0.99);
}
}
// Final SSE with optimized coefficients.
aa_zero_vars(Errors,N);
var FinalSSE = 0.;
for(t = 1; t < N; t++) {
var PredFinal = C;
for(i = 0; i < P; i++) {
if(t - 1 - i >= 0)
PredFinal += Ar[i] * Series[t - 1 - i];
}
for(j = 0; j < Q; j++) {
if(t - 1 - j >= 0)
PredFinal += Ma[j] * Errors[t - 1 - j];
}
Errors[t] = Series[t] - PredFinal;
FinalSSE += Errors[t] * Errors[t];
}
*OutSSE = FinalSSE;
*OutC = C;
aa_zero_vars(ArOut,PSize);
aa_zero_vars(MaOut,QSize);
for(i = 0; i < P; i++)
ArOut[i] = Ar[i];
for(j = 0; j < Q; j++)
MaOut[j] = Ma[j];
return 1;
}
var aa_round_to_tick_size(var Price,var TickSize)
{
if(TickSize <= 0.)
return Price;
return round(Price / TickSize) * TickSize;
}
// Fast forecast function.
// Work owns the memory and coefficients. Result->ar and Result->ma point into Work.
// Do not call free_auto_arima_result() to free those coefficients; free_auto_arima_work() owns them.
int auto_arima_forecast_with_work(vars Close,int N,var TickSize,int MaxP,int MaxQ,AUTO_ARIMA_WORK* Work,AUTO_ARIMA_RESULT* Result)
{
if(Result == 0)
return 0;
init_auto_arima_result(Result);
if(Close == 0)
return 0;
if(N < 5)
return 0;
if(MaxP <= 0)
return 0;
if(MaxQ <= 0)
return 0;
if(Work == 0)
return 0;
if(!prepare_auto_arima_work(Work,N,MaxP,MaxQ))
return 0;
Result->forecast = Close[0];
int D = aa_calculate_d_with_work(Close,N,Work);
Result->d = D;
int NDiff = aa_difference_series_fast(D,Close,N,Work->Diff);
if(NDiff < 2)
return 0;
int MaxArSize = aa_max_int(MaxP,1);
int MaxMaSize = aa_max_int(MaxQ,1);
aa_zero_vars(Work->CandAR,MaxArSize);
aa_zero_vars(Work->CandMA,MaxMaSize);
aa_zero_vars(Work->BestAR,MaxArSize);
aa_zero_vars(Work->BestMA,MaxMaSize);
int BestP = -1;
int BestQ = -1;
var BestC = 0.;
var BestSSE = AUTO_ARIMA_BIG;
var BestAICc = AUTO_ARIMA_BIG;
int p;
int q;
int i;
int j;
for(p = 0; p < MaxP; p++) {
for(q = 0; q < MaxQ; q++) {
var SSE = 0.;
var C = 0.;
aa_zero_vars(Work->CandAR,MaxArSize);
aa_zero_vars(Work->CandMA,MaxMaSize);
if(!aa_arma_fit_with_work(p,q,Work->Diff,NDiff,&SSE,&C,Work->CandAR,Work->CandMA,Work))
continue;
var Score = aa_aicc_score(NDiff,SSE,p,q);
if(Score < BestAICc) {
BestAICc = Score;
BestP = p;
BestQ = q;
BestC = C;
BestSSE = SSE;
aa_zero_vars(Work->BestAR,MaxArSize);
aa_zero_vars(Work->BestMA,MaxMaSize);
for(i = 0; i < p; i++)
Work->BestAR[i] = Work->CandAR[i];
for(j = 0; j < q; j++)
Work->BestMA[j] = Work->CandMA[j];
}
}
}
if(BestP < 0 || BestQ < 0)
return 0;
aa_zero_vars(Work->Errors,NDiff);
// Reconstruct residual sequence with the best ARMA model.
int t;
for(t = 1; t < NDiff; t++) {
var Pred = BestC;
for(i = 0; i < BestP; i++) {
if(t - 1 - i >= 0)
Pred += Work->BestAR[i] * Work->Diff[t - 1 - i];
}
for(j = 0; j < BestQ; j++) {
if(t - 1 - j >= 0)
Pred += Work->BestMA[j] * Work->Errors[t - 1 - j];
}
Work->Errors[t] = Work->Diff[t] - Pred;
}
// One-step forecast on the differenced scale.
var ForecastDiff = BestC;
int LastT = NDiff - 1;
for(i = 0; i < BestP; i++) {
if(LastT - i >= 0)
ForecastDiff += Work->BestAR[i] * Work->Diff[LastT - i];
}
for(j = 0; j < BestQ; j++) {
if(LastT - j >= 1)
ForecastDiff += Work->BestMA[j] * Work->Errors[LastT - j];
}
// Integrate forecast back to price space.
var LastPrice = Close[0];
var ForecastPrice = LastPrice;
if(D == 0)
ForecastPrice = ForecastDiff;
else if(D == 1)
ForecastPrice = LastPrice + ForecastDiff;
else if(D == 2)
ForecastPrice = LastPrice + ForecastDiff + (LastPrice - Close[1]);
ForecastPrice = aa_round_to_tick_size(ForecastPrice,TickSize);
Result->p = BestP;
Result->d = D;
Result->q = BestQ;
Result->coeffs_owned = 0;
Result->c = BestC;
Result->sse = BestSSE;
Result->aicc = BestAICc;
Result->forecast = ForecastPrice;
Result->ar = Work->BestAR;
Result->ma = Work->BestMA;
return 1;
}
// Compatibility wrapper.
// This preserves the old "Result owns coefficients" behavior, but it allocates/free Work per call.
// For speed in an optimization loop or repeated strategy calls, use auto_arima_forecast_with_work().
int auto_arima_forecast(vars Close,int N,var TickSize,int MaxP,int MaxQ,AUTO_ARIMA_RESULT* Result)
{
if(Result == 0)
return 0;
AUTO_ARIMA_WORK Work;
init_auto_arima_work(&Work);
if(!auto_arima_forecast_with_work(Close,N,TickSize,MaxP,MaxQ,&Work,Result)) {
free_auto_arima_work(&Work);
return 0;
}
int P = Result->p;
int Q = Result->q;
var* OwnedAR = 0;
var* OwnedMA = 0;
if(P > 0) {
OwnedAR = aa_alloc_vars(P);
if(OwnedAR == 0) {
free_auto_arima_work(&Work);
init_auto_arima_result(Result);
return 0;
}
aa_copy_vars(OwnedAR,Work.BestAR,P);
}
if(Q > 0) {
OwnedMA = aa_alloc_vars(Q);
if(OwnedMA == 0) {
aa_free_vars(OwnedAR);
free_auto_arima_work(&Work);
init_auto_arima_result(Result);
return 0;
}
aa_copy_vars(OwnedMA,Work.BestMA,Q);
}
Result->ar = OwnedAR;
Result->ma = OwnedMA;
Result->coeffs_owned = 1;
free_auto_arima_work(&Work);
return 1;
}
// ---------- Standalone Zorro script demo ----------
// For strategy usage, replace this main() with your run() function.
// Zorro series are newest-first, so vars Close = series(priceClose()) matches the input convention.
void main()
{
var Close[30];
Close[0] = 101.25;
Close[1] = 101.10;
Close[2] = 100.95;
Close[3] = 100.70;
Close[4] = 100.85;
Close[5] = 100.40;
Close[6] = 100.20;
Close[7] = 100.05;
Close[8] = 99.90;
Close[9] = 100.10;
Close[10] = 99.70;
Close[11] = 99.55;
Close[12] = 99.30;
Close[13] = 99.50;
Close[14] = 99.10;
Close[15] = 98.95;
Close[16] = 99.20;
Close[17] = 98.80;
Close[18] = 98.60;
Close[19] = 98.75;
Close[20] = 98.40;
Close[21] = 98.20;
Close[22] = 98.00;
Close[23] = 97.85;
Close[24] = 97.65;
Close[25] = 97.90;
Close[26] = 97.55;
Close[27] = 97.35;
Close[28] = 97.20;
Close[29] = 97.05;
int N = 30;
var TickSize = 0.01;
int MaxP = 5;
int MaxQ = 5;
AUTO_ARIMA_WORK Work;
AUTO_ARIMA_RESULT R;
init_auto_arima_work(&Work);
init_auto_arima_result(&R);
if(!auto_arima_forecast_with_work(Close,N,TickSize,MaxP,MaxQ,&Work,&R)) {
printf("\nAutoARIMA forecast failed.");
free_auto_arima_work(&Work);
return;
}
printf("\nBest ARIMA(%i,%i,%i) AICc=%.10f SSE=%.10f",
R.p,R.d,R.q,R.aicc,R.sse);
printf("\nLast price: %.10f -> Forecast: %.10f",
Close[0],R.forecast);
printf("\nConstant c: %.10f",R.c);
if(R.p > 0) {
printf("\nAR coefficients:");
int i;
for(i = 0; i < R.p; i++)
printf(" %.10f",R.ar[i]);
}
if(R.q > 0) {
printf("\nMA coefficients:");
int j;
for(j = 0; j < R.q; j++)
printf(" %.10f",R.ma[j]);
}
free_auto_arima_work(&Work);
}
/*
// Example inside a Zorro strategy with persistent reusable memory:
AUTO_ARIMA_WORK GArimaWork;
int GArimaWorkInit = 0;
function run()
{
BarPeriod = 1440;
LookBack = 200;
if(is(INITRUN)) {
init_auto_arima_work(&GArimaWork);
GArimaWorkInit = 1;
}
if(is(EXITRUN)) {
if(GArimaWorkInit)
free_auto_arima_work(&GArimaWork);
GArimaWorkInit = 0;
return;
}
vars Close = series(priceClose());
if(is(LOOKBACK))
return;
AUTO_ARIMA_RESULT R;
init_auto_arima_result(&R);
// No malloc/free inside the P/Q optimization loop; Work is reused across bars.
if(auto_arima_forecast_with_work(Close,100,PIP,5,5,&GArimaWork,&R)) {
plot("ARIMA forecast",R.forecast,MAIN,BLUE);
printf("\nARIMA(%i,%i,%i) forecast %.5f",R.p,R.d,R.q,R.forecast);
}
// Do not free_auto_arima_result(&R) here; with_work result coefficients are owned by GArimaWork.
}
*/