
@ ===================================================== @
proc dist2(xmat,gam);
     /* inputs:
    xmat = (nx x nk) matrix of observations
    gam = (nk x 1) vector of weights for each variable;

    output:
    xc = (nx x nx) matrix of distances between observations
 */

local iter, xc_tmp, nx, nk, xc;

 nk = rows(gam);
 nx = rows(xmat);

   iter = 1;
   xc = 0;
   do until iter > nk;
     xc_tmp = (xmat[.,iter] * gam[iter,1]).*ones(nx,nx);
     xc=xc+(xc_tmp-xc_tmp')^2;
     iter = iter + 1;
   endo;

retp(sqrt(xc));
endp;



@ ============================================================ @

proc conc(th);
  /*  This proc performs filter, smoother, and evaluates the likelihood
     function based on concentrated likelihood function

   Input:
    th = (k+1 x 1) vector of population parameters
   Output:
    if ks = 0 and kc < 3, then proc returns -f0 = negative of log likelihood fun
ction
    if ks = 0 and kc = 3, then proc returns the vector alphahat | sqrt(sigeps);
    if ks > 0, then proc returns a (ks x 1) vector of is smoothed
       estimates of conditional mean at ks different values for x
   Global variables:
    n = number of observations
    k = number of nonlinear explanatory variables (excluding constant term)
    klin = total number of explanatory variables (including linear and contant t
erm)
    y = (n x 1) vector of dependent variables
    x = (n x k) matrix of explanatory variables
    xwhole = (n x klin) matrix of explanatory variables including constant

    ks = number of points at which smoothed inference is sought
         (ks = 0 means no smoothed inference is found, only likelihood
          function is evaluated)
    xs = (ks x k) matrix whose rows correspond to values of x at which
         smoothed inference is to be evaluated
    xswhole = (ks x klin) matrix whose rows correspond to full value of explanat
ory
           variables at which smoothed inference is to be evaluated (cols 2 thro
ugh 2+k
           of xswhole are same as cols of xs)
    gamx = (k x 1) vector which gamma weights are proportional to

    Local variables:
    alphahat = (klin x 1) vector of constant and linear drift terms
                 prior is E(y|xwhole) = xwhole*alphahat
    gam = (k x 1) vector of scales associated with each explanatory
            variable (g)
    sigeps = population parameter for variance of y minus true E(y|x)
    zeta = squared ratio of lambda to sigma
    P0 = (n x n) matrix of correlations whose row i, col j element is
             correlation between m(gam .* x_i) and m(gam .*x_j)
    q = (ks x n) matrix whose row i gives prior covariance between the
        mean evaluated at the value of x represented by the ith row of xs
        and the values of x represented by observations 1 through n
    xq = (n x k) matrix which is the hadamard product of x with gamma
    epshat = estimated residuals
     */
local gam,sigeps,p0,f0,q,zeta,xq,xx,
      f1, f2,xjunk,alphahat,epshat,stil;

@ ======================================================@
@ read in parameters @

gam = th[1:k,1];
gam = gam .* gamx;
zeta = th[rows(th),1]^2;



@ =====================================================@
@ set initial values @

xq = dist2(x,gam);
p0 = covary(k,xq);

f0 = 0;

@==================================================@
@ calculate log likelihood @

@ use alternative algorithms based on size of zeta for numerical stability @

if zeta < 1;
     f1 = invpd(zeta*p0+eye(n));
     f2 = ln(detl);
else;
    f1 = (1/zeta)*invpd(p0 + (1/zeta)*eye(n) );
    f2 = ln(detl) + n*ln(zeta);
endif;


xx = xwhole'*f1*xwhole;
alphahat = invpd(xx)*(xwhole'*f1*y);
epshat = y - xwhole*alphahat;
stil = (epshat'*f1*epshat);
sigeps = stil/n;
f0 = -(1/2)*f2 - (n/2)*ln(2*pi*sigeps) - (n/2);
if kc == 3;
     f0 = alphahat | sqrt(sigeps) ;
     f0 = -f0;
endif;

if kc == 2;
   "constant term and linear coefficients";;alphahat';
   "scaling parameters for explanatory variables (g)";;gam';
   "variance of residual";;sigeps;
   "eta squared";;zeta;

   "";"variance-covariance matrix for constant term and drift terms";;
   xjunk = sigeps*invpd(xx);
   xjunk;
   "standard errors";;sqrt(diag(xjunk) )';
   ""; "log likelihood:";;f0;
endif;


@ =========================================================@
@ this section finds smoothed inferences, if desired @


if ks > 0;
      xjunk = dist2(xs|x,gam);
      xjunk = zeta*covary(k,xjunk);
      q = xjunk[1:ks,ks+1:n+ks];
   f0 = xswhole*alphahat + q*f1*(y - xwhole*alphahat);
   f0 = -f0;
endif;

retp(-f0);
endp;




@ ============================================================= @
proc lm2(epshat,xe,xl);
    /* This proc performs bias-corrected LM test of null hypothesis
of linearity
    Input:
            epshat = (n0 x 1) vector of OLS residuals from regression of
                                         y on xe
            xe = (n0 x ke) matrix of explanatory variables used in regression
                                   from which epshat was formed (excluding const
ant)
            xl = (n0 x kl) matrix of explanatory variables on which nonlinearity
                                    may depend (excluding constant)
    Ouput:
            zeta = scalar LM test statistic  */

local xsig,xq,ht,n0,ke,kl,sigeps,zeta,m0,a0,sig0,xewhole;

n0 = rows(xl);
ke = cols(xe);
kl = cols(xl);
xewhole = ones(n0,1)~xe;
xsig = meanc(xl);
xsig = sqrt(meanc((xl - xsig')^2));  @ xsig is now (kl x 1) vector of standard
                                      deviations of x @
xsig = sqrt(kl)*xsig/2;
sigeps = epshat'*epshat/(n0 - ke - 1);
sig0 = sigeps;
xq = dist2(xl,1./xsig);
ht = covary(kl,xq);
m0 = xewhole*invpd(xewhole'*xewhole)*xewhole';
m0 = eye(n0) - m0;
a0 = m0*ht*m0;
zeta = a0 - m0*sumc(diag(a0))/(n0 - ke - 1);
zeta = sqrt(2*sumc(diag(zeta*zeta)));
zeta = (epshat'*ht*epshat - sigeps*sumc(diag(a0)))/(sig0*zeta);

if kc > 1;
    "LM test statistic is";; zeta;
    "chi-square form of test";;zeta^2;
    "p-value is";;cdfchic(zeta^2,1);

endif;

retp(zeta);
endp;

