Appendix

 

Chapter 1

%MinRisk Macro: Minimum Risk Test for Stratified Binary Data

%macro MinRisk(dataset);
/*
Inputs:

DATASET = Input data set with event rates observed in each stratum. The input data set must include variables named EVENT1 (number of events of interest in Treatment group 1), EVENT2 (number of events of interest in Treatment group 2), NOEVENT1 (number of non-events in Treatment group 1), NOEVENT2 (number of non-events in Treatment group 2) with one record per stratum.

*/proc iml;
      use &dataset;
      read all var {event1 noevent1 event2 noevent2} into data;
      m=nrow(data);
      p=j(m,2,0); n=j(m,2,0);
      n[,1]=data[,1]+data[,2];
      n[,2]=data[,3]+data[,4];
      total=sum(n);
      p[,1]=data[,1]/n[,1];
      p[,2]=data[,3]/n[,2];
      delta=p[,1]-p[,2];
      v=p[,1]#(1-p[,1])/n[,1]+p[,2]#(1-p[,2])/n[,2];
      pave=(p[,1]#n[,1]+p[,2]#n[,2])/(n[,1]+n[,2]);
      v0=pave#(1-pave)#(1/n[,1]+1/n[,2]);
      alpha=delta*sum(1/v)-sum(delta/v);
      c=1+alpha*sum((n[,1]+n[,2])#delta/total);
      h=diag(v*sum(1/v))+alpha*delta‘;
      wmr=inv(h)*c;
      dmr=wmr‘*delta;
      zmr1=abs(dmr)-3/(16*sum(n[,1]#n[,2]/(n[,1]+n[,2])));
      zmr2=sqrt(sum(wmr#wmr#v0));
      zmr=zmr1/zmr2; pmr=2*(1-probnorm(zmr));
      title={"Estimate", "Statistic", "P-value"};
      minrisk=dmr||zmr||pmr;
      print minrisk [colname=title format=best6.];
      win=(1/v)/sum(1/v);
      din=win‘*delta;
      zin=abs(din)/sqrt(sum(win#win#v0));
      pin=2*(1-probnorm(zin));
      invar=din||zin||pin;
      print invar [colname=title format=best6.];
      wss=(n[,1]#n[,2]/(n[,1]+n[,2]))/sum(n[,1]#n[,2]/(n[,1]+n[,2]));
      dss=wss‘*delta;
      zss=abs(dss)/sqrt(sum(wss#wss#v0));
      pss=2*(1-probnorm(zss));
      ssize=dss||zss||pss;
      print ssize [colname=title format=best6.];
      quit;
%mend MinRisk;

%GailSimon Macro: Gail-Simon Test for Qualitative Interaction

 

%macro GailSimon(dataset,est,stderr,testtype);
/*
Inputs:

DATASET = Data set with test statistics and associated standard errors for each stratum.
EST = Name of the variable containing the test statistics.
STDERR = Name of the variable containing the standard errors.
TESTTYPE = P, N, T to carry out the one-sided Gail-Simon test for positive or negative differences or the two-sided Gail-Simon test, respectively.

*/data pvalue;
      set &dataset nobs=m;
      format stat 6.3 p 6.4;
      retain qminus 0 qplus 0;
      qminus=qminus+(&est>0)*(&est/&stderr)**2;
      qplus=qplus+(&est<0)*(&est/&stderr)**2;
      if _n_=m then do;
         if upcase(&testtype)="P" then do; stat=qplus; df=m+1; end;
         if upcase(&testtype)="N" then do; stat=qminus; df=m+1; end;
         if upcase(&testtype)="T" then do; stat=min(qminus,qplus); df=m; end;
         p=0;
         do i=1 to df-1;
                p=p+pdf("binomial",i,0.5,df-1)*(1-probchi(stat,i));
         end;
       end;
       label stat="Test statistic" p="P-value";
       if _n_=m;
       keep stat p;
proc print data=pvalue noobs label;
       %if %upcase(&testtype)="P" %then %do;
           title "One-sided Gail-Simon test for positive differences"; %end;
       %if %upcase(&testtype)="N" %then %do;
           title "One-sided Gail-Simon test for negative differences"; %end;
       %if %upcase(&testtype)="T" %then %do;
           title "Two-sided Gail-Simon test"; %end;
       run;
%mend GailSimon;

%Pushback Macro: Pushback Test for Qualitative Interaction

%macro pushback(dataset,est,stderr,n,testtype,outdata);
/*
Inputs:

DATASET = Data set with test statistics, associated standard errors and numbers of patients for each strata.
EST = Name of the variable containing the test statistics.
STDERR = Name of the variable containing the standard errors.
N = Name of the variable containing the numbers of patients.
TESTTYPE = N, T to carry out the pushback test using the order statistics from a normal or t distribution, respectively.

*/

proc univariate data=&dataset noprint;
    var &est;
    weight &n;
    output out=med median=med;
data stand;
    set &dataset;
    if _n_=1 then set med;
    tau=(&est-med)/&stderr;
proc sort data=stand;
    by tau;
data &outdata;
    set stand nobs=m;
    ordstr=_n_;
    if upcase(&testtype)="N" then do;
      t=probit((3*ordstr-1)/(3*m+1));
    end;
    if upcase(&testtype)="T" then do;
        if ordstr<=m/2 then t=tinv(betainv(0.1,ordstr,m-ordstr+1),&n-2);
        if ordstr>m/2 then t=tinv(betainv(0.9,ordstr,m-ordstr+1),&n-2);
        if ordstr=(m+1)/2 then t=0;
    end;
    if tau*(tau-t)>0 then rho=tau-t;
    if tau*(tau-t)<=0 then rho=0;
    dstar=&stderr*rho+med;
    run;
 %mend pushback;

Chapter 2

%GlobTest Macro: Global Tests for Multiple Endpoints

%macro GlobTest(dataset,group,ngroups,varlist,test);
/*
Inputs:

DATASET = Data set to be analyzed
GROUP = Name of the group variable in the data set
NGROUPS = Number of groups in the data set
VARLIST = List of variable names corresponding to multiple endpoints
TEST = OLS, GLS, MGLS or RS, for OLS test, GLS test, modified GLS test or rank-sum test, respectively

*/
%if &test="RS" %then %do;

proc rank data=&dataset out=ranks;
    var &varlist;
data comp;
    set ranks;
    array endp{*} &varlist;
    comp=0;
    do i=1 to dim(endp);
    comp=comp+endp{i};
    end;
proc mixed data=comp;
    class &group;
    model comp=&group;
    ods output tests3=pval;
data pval;
    set pval;
    format fvalue 5.2 ndf 3.0 ddf 3.0 adjp 6.4;
    ndf=numdf;
    ddf=dendf;
    adjp=probf;
    label fvalue="F-value" adjp="Global p-value";
    keep ndf ddf fvalue adjp;
%end;
%else %do;
%let m=1;
%let word=%scan(&varlist,&m);
%do %while (&word ne);
    %let m=%eval(&m+1);
    %let word=%scan(&varlist,&m);
%end;
%let m=%eval(&m-1);
data stand;
%do i=1 %to &m;
    %let var=%scan(&varlist,&i);
    proc glm data=&dataset;
      class &group;
      model &var=&group;
      ods output FitStatistics=est(keep=rootmse depmean);
    data stand;
      set stand est;
%end;
data stand;
    set stand;
    if rootmse^=.;
data _null_;
    set &dataset nobs=m;
    call symput("n",trim(put(m,5.0)));
proc corr data=&dataset outp=corr(where=(_type_="CORR"))
    noprint;
    var &varlist;
proc iml;
    use &dataset var{&varlist};
    read all into data;
    use stand var{depmean};

    read all into mean;
    meanmat=j(nrow(data),ncol(data),1)*diag(mean);
    use stand var{rootmse};
    read all into pooledsd;
    sdmat=inv(diag(pooledsd));
    use corr var{&varlist};
    read all into r;
    stand=(data-meanmat)*sdmat;
    rinv=inv(r);
    if &test="OLS" then comp=stand*j(&m,1);
    if &test="GLS" then comp=stand*rinv*j(&m,1);
    if &test="MGLS" then comp=stand*sqrt(diag(rinv))*j(&m,1);
    create comp from comp;
    append from comp;
data comp;
    merge comp &dataset;
    comp=col1;
    drop col1;
proc mixed data=comp;
    class &group;
    model comp=&group;
    ods output tests3=pval;
data pval;
    set pval;
    format fvalue 5.2 ndf 3.0 ddf 3.0 adjp 6.4;
    ndf=&ngroups-1;
    ddf=&n-&m*&ngroups;
    adjp=1-probf(fvalue,ndf,ddf);
    label fvalue="F-value"
    adjp="Global p-value";
    keep ndf ddf fvalue adjp;
%end;
proc print data=pval noobs label;
    run;
%mend GlobTest;

%GateKeeper Macro: Gatekeeping Procedures Based on Marginal Multiple Tests

%macro GateKeeper(dataset,test,outdata);
/*
Inputs:

DATASET = Data set with information about sequential families of hypotheses (testing type, weights, relative importance of gatekeeper hypotheses and raw p-values).
TEST = B, MB, or S for Bonferroni, modified Bonferroni or Simes gatekeeping procedure, respectively.
OUTDATA = Output data set with adjusted p-values.

*/

proc iml;
    use &dataset;
    read all var {family serial weight relimp raw_p} into data;
    data=t(data);
    nhyps=ncol(data); nfams=data[1,ncol(data)]; nints=2**nhyps-1;
    h=j(nints,nhyps,0);

    do i=1 to nhyps;
      do j=0 to nints-1;
         k=floor(j/2**(nhyps-i));
         if k/2=floor(k/2) then h[j+1,i]=1;
      end;
    end;
    v=j(nints,nhyps,0); modv=j(nints,nhyps,0);
    hyp=j(nints,nhyps,0); adjp=j(nhyps,1,0);
    do i=1 to nints;
      r=1; tempv=j(1,nhyps,0); tempmodv=j(1,nhyps,0);
      do j=1 to nfams;
         window=(data[1,]=j);
         sumw=sum(window#data[3,]#h[i,]);
         serial=sum(window#data[2,]);
         if (serial=0 & j<nfams) & sumw>0 then do;
              tempv=r#window#data[3,]#h[i,]#
                      ((1-data[4,])+data[4,]/sumw);
              if sum(h[i,]#(data[1,]>j))=0 then
              tempmodv=r#window#data[3,]#h[i,]/sumw;
              else tempmodv=tempv;
         end;
         if (serial>0 |j=nfams) & sumw>0 then do;
              tempv=r#window#data[3,]#h[i,]/sumw;
              tempmodv=tempv;
         end;
         if sumw>0 then do;
              r=r-sum(tempv); v[i,]=v[i,]+tempv;
              modv[i,]=modv[i,]+tempmodv;
         end;
     end;
     if &test="B" then hyp[i,]=h[i,]*
       min(data[5,loc(v[i,])]/v[i,loc(v[i,])]);
     if &test="MB" then hyp[i,]=h[i,]*
       min(data[5,loc(modv[i,])]/modv[i,loc(modv[i,])]);
     if &test="S" then do;
         comb=data[5,loc(modv[i,])]//modv[i,loc(modv[i,])];
         temp=comb;
         comb[,rank(data[5,loc(modv[i,])])]=temp;
         hyp[i,]=h[i,]*min(comb[1,]/cusum(comb[2,]));
     end;
     end;
     do i=1 to nhyps; adjp[i]=max(hyp[,i]); end;
     create adjp from adjp[colname={adjp}];
     append from adjp;
     quit;
data &outdata;
     merge &dataset adjp;
     run;
%mend GateKeeper;

%ResamGate Macro: Resampling-Based Gatekeeping Procedures

%macro ResamGate(dataset,resp,test,outdata);
/*
Inputs:

DATASET = Data set with information about sequential families of hypotheses (testing type, weights, relative importance of gatekeeper hypotheses and raw p-values).
RESP = Data set with p-values obtained via resampling.
TEST = B, MB, or S for Bonferroni, modified Bonferroni or Simes gatekeeping procedure, respectively.
OUTDATA = Output data set with adjusted p-values.

*/

proc iml;
    use &dataset;
    read all var {family serial weight relimp raw_p} into data;
    data=t(data);
    use &resp;
    read all var _all_ into resp;
    nhyps=ncol(data); nfams=data[1,ncol(data)];
    nints=2**nhyps-1; nsims=nrow(resp);
    h=j(nints,nhyps,0);
    do i=1 to nhyps;
      do j=0 to nints-1;
         k=floor(j/2**(nhyps-i));
         if k/2=floor(k/2) then h[j+1,i]=1;
      end;
    end;
    v=j(nints,nhyps,0); modv=j(nints,nhyps,0);
    hyp=j(nints,nhyps,0); adjp=j(nhyps,1,0);
    start bonf(p,w);
      bonfp=min(p[loc(w)]/w[loc(w)]);
      return(bonfp);
    finish bonf;
    start simes(p,w);
      comb=t(p[loc(w)])//t(w[loc(w)]); temp=comb;
      comb[,rank(p[loc(w)])]=temp;
      simesp=min(comb[1,]/cusum(comb[2,]));
      return(simesp);
    finish simes;
    do i=1 to nints;
      r=1; tempv=j(1,nhyps,0); tempmodv=j(1,nhyps,0);
      do j=1 to nfams;
         window=(data[1,]=j);
         sumw=sum(window#data[3,]#h[i,]);
         serial=sum(window#data[2,]);
         if (serial=0 & j<nfams) & sumw>0 then do;
              tempv=r#window#data[3,]#h[i,]#
                     ((1-data[4,])+data[4,]/sumw);
               if sum(h[i,]#(data[1,]>j))=0 then
               tempmodv=r#window#data[3,]#h[i,]/sumw;
               else tempmodv=tempv;
               end;
               if (serial>0 |j=nfams) & sumw>0 then do;
                   tempv=r#window#data[3,]#h[i,]/sumw;
                   tempmodv=tempv;
               end;
               if sumw>0 then do;
                   r=r-sum(tempv);
                   v[i,]=v[i,]+tempv;
                   modv[i,]=modv[i,]+tempmodv;
               end;
              end;
              if &test="B" then samp=bonf(data[5,],v[i,]);
              if &test="MB" then samp=bonf(data[5,],modv[i,]);
              if &test="S" then samp=simes(data[5,],modv[i,]);
              do j=1 to nsims;
                 if &test="B" then resamp=bonf(resp[j,],v[i,]);
                 if &test="MB" then resamp=bonf(resp[j,],modv[i,]);
                 if &test="S" then resamp=simes(resp[j,],modv[i,]);
                 hyp[i,]=hyp[i,]+h[i,]*(resamp<samp)/nsims;
              end;
        end;
        do i=1 to nhyps; adjp[i]=max(hyp[,i]); end;
        create adjp from adjp[colname={adjp}];
        append from adjp;
        quit;
     data &outdata;
        merge &dataset adjp;
        run;
     %mend ResamGate;

Chapter 3

QTCCONC Data Set (Change in the QTc Interval and Plasma Drug Concentration)

data qtcconc;

    input qtcchangeconc @@;

    datalines;

-11 0.0 -12 0.0 0 0.0 -14 0.0 -11 0.0 -30 0.0 -8 0.0 -14 0.0 3 0.0
11 0.0 -25 0.0 8 0.0 10 0.0 -29 0.1 10 0.1 -2 0.1 10 0.2 4 0.2
0 0.2 12 0.2 -10 0.3 -11 0.3 -3 0.3 -4 0.3 2 0.3 -4 0.4 11 0.4
1 0.4 -12 0.4 1 0.4 3 0.4 -8 0.4 24 0.5 -3 0.5 8 0.5 -1 0.5
-1 0.6 -18 0.6 9 0.6 -5 0.6 -18 0.6 5 0.6 -8 0.6 11 0.6 -29 0.7
21 0.7 -6 0.7 -23 0.7 11 0.8 -25 0.8 3 0.8 0 0.8 7 0.8 10 0.8
-13 0.9 8 0.9 -30 0.9 5 0.9 3 1.0 0 1.0 6 1.0 -8 1.0 3 1.0
10 1.0 0 1.0 5 1.0 -15 1.0 6 1.0 -3 1.0 12 1.1 -7 1.1 12 1.2
-4 1.2 11 1.2 -11 1.2 9 1.2 5 1.2 8 1.2 -1 1.2 1 1.2 2 1.2
10 1.2 12 1.2 6 1.2 -16 1.2 16 1.3 7 1.3 10 1.3 2 1.3 3 1.3
1 1.3 -4 1.4 8 1.4 10 1.5 10 1.5 -6 1.5 14 1.5 15 1.5 0 1.5
-18 1.5 -3 1.6 9 1.6 -25 1.6 5 1.6 -10 1.6 -20 1.6 0 1.6 5 1.6
11 1.7 -28 1.7 9 1.7 -8 1.7 -18 1.8 11 1.8 14 1.8 16 1.8 12 1.8
-2 1.8 6 1.9 7 1.9 -14 1.9 -22 1.9 -25 1.9 -6 1.9 -5 1.9 -6 1.9
30 1.9 0 1.9 -25 2.0 -21 2.0 4 2.0 -5 2.0 10 2.0 3 2.0 -9 2.0
-25 2.0 6 2.0 1 2.0 -17 2.1 -9 2.1 20 2.1 7 2.1 2 2.1 21 2.1
-16 2.1 8 2.1 9 2.2 16 2.2 22 2.2 8 2.2 -4 2.2 -6 2.2 12 2.2
-14 2.2 12 2.2 0 2.3 21 2.3 -5 2.3 11 2.3 -8 2.3 -9 2.4 -13 2.4
1 2.4 -11 2.4 -20 2.4 -12 2.5 -11 2.5 2 2.5 15 2.5 9 2.5 -27 2.5
1 2.6 3 2.6 1 2.7 -3 2.7 -11 2.7 8 2.7 11 2.7 1 2.8 -4 2.8
-8 2.8 -10 2.8 18 2.8 7 2.8 20 2.9 -5 3.0 24 3.0 -2 3.0 0 3.0
-11 3.1 -11 3.1 14 3.1 5 3.2 17 3.3 22 3.3 -11 3.3 34 3.4 5 3.4
-11 3.5 -1 3.5 10 3.5 2 3.7 -11 3.8 7 3.9 -10 3.9 -6 4.0 -11 4.0
-11 4.0 17 4.0 0 4.0 -12 4.0 -24 4.0 -10 4.1 -3 4.1 18 4.1 -9 4.1
-2 4.2 0 4.3 16 4.3 -12 4.3 -8 4.3 8 4.3 -6 4.4 -6 4.4 14 4.5
11 4.5 -7 4.6 -1 4.6 5 4.6 22 4.7 18 4.7 -11 4.7 6 4.7 4 4.7
2 4.8 22 4.8 12 4.8 12 4.8 -23 4.8 2 4.9 30 4.9 -7 4.9 8 4.9
-11 4.9 15 4.9 0 4.9 3 4.9 13 5.0 -6 5.0 -8 5.0 -21 5.0 -9 5.1
12 5.1 -12 5.1 -5 5.1 -2 5.1 -5 5.2 -15 5.2 -3 5.2 -4 5.2 19 5.2
-9 5.3 8 5.4 0 5.4 18 5.4 0 5.4 9 5.4            
;  
RACOUNT Data Set (Patient Pain Assessment, Tender and Swollen Joints in a Rheumatoid Arthritis Trial)

data racount;

    input ptpain tjcount sjcount @@;

    datalines;

68 25 23 66 23 23 50 16 18 75 23 20 67 26 25 50 22 18 39 18 21
50 18 10 41 8 7 29 11 9 60 20 21 55 17 12 18 7 3 46 13 16
37 9 17 59 11 13 68 18 24 96 27 27 47 16 12 64 24 13 72 18 17
51 10 8 43 14 12 55 12 20 51 14 12 80 13 19 77 24 18 80 23 9
42 12 12 11 12 14 42 8 9 26 16 24 60 8 8 77 12 6 45 16 10
16 7 6 58 9 4 53 12 10 90 10 10 63 12 14 57 18 15 69 27 19
78 13 12 76 14 17 18 8 10 49 12 15 17 8 10 70 23 22 84 16 20
46 16 19 46 7 13 50 7 11 58 9 13 18 8 10 73 14 10 75 15 13
69 20 13 75 21 19 61 16 17 64 12 15 16 2 20 37 9 11 58 11 12
68 23 24 60 8 11 34 12 20 54 19 18 70 12 17 94 10 5 46 17 16
49 23 22 82 13 16 55 10 13 67 22 20 56 17 13 38 11 8 68 12 10
48 15 12 47 11 8 81 16 15 34 12 7 52 14 19 58 24 22 69 16 10
51 5 6 64 19 21 15 15 12 22 10 11 45 10 11 49 19 10 83 18 20
70 17 9 33 10 11 15 10 4 17 13 9 52 17 17 48 7 5 47 5 7
47 16 12 20 9 9 38 10 9 39 12 12 45 14 12 52 11 8 37 7 12
60 13 13 69 20 7 34 12 20 82 15 15 60 8 7 15 8 6 69 12 16
59 10 10 70 13 5 35 12 16 32 8 14 59 5 15 88 23 10 67 28 25
75 12 21 85 13 21 53 16 12 83 12 17 64 11 15 36 7 10 100 26 21
91 13 24 55 12 11 64 17 14 56 14 13 49 8 10 71 10 13 65 21 19
75 18 17 85 26 21 79 14 18 50 10 14 74 26 21 46 15 13 65 19 15
50 17 11 34 9 6 30 14 9 55 23 12 61 19 14 66 9 6 82 20 12
70 16 11 83 28 20 17 14 13 59 11 12 58 9 6 54 12 8 59 7 6
55 9 4 83 26 12 18 12 11 51 10 12 65 22 11 23 24 11 51 22 15
50 26 15 49 13 12 49 8 6 50 9 5 29 9 9 39 15 11 26 10 19
65 14 14 36 12 11 88 25 25 42 8 15 66 20 18 73 16 16 80 24 24
53 25 12 82 21 21 73 16 13 89 25 14 86 24 13 37 20 16 39 10 9
31 6 4 50 11 6 25 6 4 18 9 7 18 12 14 46 13 5 39 8 14
60 19 19 58 21 15 68 12 16 56 6 6 50 16 20 56 10 5 31 12 9
70 16 14 77 12 15 54 11 7 53 24 20 68 16 9 77 18 14 42 16 12
67 26 25 76 26 14 51 16 10 20 26 24 92 14 7 60 18 11 48 25 23
71 17 13 66 13 11 74 9 11 75 14 13 66 18 14 44 13 12 41 10 9
34 23 18 50 24 21 41 9 12 57 18 10 64 15 13 67 15 14 47 17 15
61 19 17 64 24 18 58 11 11 42 14 12 16 7 6 39 23 20 23 11 10
64 18 6 85 12 7 68 9 9 51 15 5 9 10 7 41 17 8 80 22 18
78 18 15 69 8 6 19 13 9 60 23 16 47 9 4 38 14 14 77 19 10
68 14 11 87 24 17 60 22 21 82 24 23 31 21 21 77 7 8      
;  
%GlobalQSmooth Macro: A Polynomial Approximation to the Conditional Quantile Function

/*************************************

Inputs:
DATASET = Data set to be analyzed
GRID = Data set with grid points (X variable)
GAMMA = Quantile of the conditional distribution function
P = Degree of the polynomial approximation
OUTDATA = Output data set containing the polynomial (POLY variable), its first derivative (FIRSTDER variable) and second derivative (SECDER variable) evaluated at the grid points

*************************************/

%macro GlobalQSmooth(dataset,grid,gamma,p,outdata);
data variable;
    set &dataset;
    array cov{*} cov1-cov&p;
    do i=1 to &p;
    cov{i}=x**i; end;
proc mixed data=variable;
    model y=cov1-cov&p/s;
    ods output SolutionF=param;
data equation;
    set param nobs=m;
    retain c0-c&p;
    if effect="Intercept" then c0=estimate;
    call symput("c0",compress(put(c0,best8.)));
    %do i=1 %to &p;
       if effect="cov&i" then c&i=estimate;
       call symput("c&i",compress(put(c&i,best8.)));
    %end;
    if _n_=m;
    run;
proc nlin data=variable nohalve maxiter=500 converge=0.0005;
    parms c0=&c0 %do i=1 %to &p; c&i=&&c&i %end;;
    model y=c0 %do i=1 %to &p; +c&i*cov&i %end;;
    der.c0=1;
       %do i=1 %to &p; der.c&i=cov&i; %end;
    resid=y-model.y;
    if resid>0 then _weight_=&gamma/resid;
    if resid<0 then _weight_=(&gamma-1)/resid;
    if resid=0 then _weight_=0;
    ods output ParameterEstimates=est;
data coef;
    set est nobs=m;
    retain d0-d&p;
    %do i=0 %to &p;
     if parameter="c&i" then d&i=estimate;
    %end;
    if _n_=m;
data &outdata;
    set &grid;
    if _n_=1 then set coef;
    array d{*} d1-d&p;
    poly=d0; firstder=d1;
    if &p>=2 then secder=2*d2; else secder=0;
    do i=1 to &p;
      poly=poly+d{i}*x**i;
      if i>=2 then firstder=firstder+i*d{i}*x**(i-1);
      if i>=3 then secder=secder+(i-1)*i*d{i}*x**(i-2);
    end;
    run;
%mend GlobalQSmooth;

%LocalQSmooth Macro: A Local Linear Approximation to the Conditional Quantile Function

/*************************************

Inputs:    
DATASET = Data set to be analyzed
GAMMA = Quantile of the conditional distribution function
INITIAL = Data set with the grid points (X variable), initial values of the intercept and slope of the local linear model (POLY and FIRSTDER variables) and the bandwidth parameter (H variable)
OUTDATA = Output data set containing the local estimates of the quantile function evaluated at the grid points (ESTIMATE variable)

*************************************/

%macro LocalQSmooth(dataset,gamma,initial,outdata);
data quantile;
data _null_;
    set &initial nobs=m;
    call symput("n",m);
run;
%do i=1 %to &n;
data _null_;
    set &initial;
    if _n_=&i then do;
     call symput("x",x);
     call symput("inita",poly);
     call symput("initb",firstder);
     call symput("h",h);
    end;
    run;

proc nlin data=&dataset nohalve noitprint maxiter=300 converge=0.001;
    parms a=&inita b=&initb;
    model y=a+b*(x-&x);
    der.a=1;
    der.b=x-&x; resid=y-model.y;
    if resid>0 then w1=&gamma/resid;
    if resid<0 then w1=(&gamma-1)/resid;
    if resid=0 then w1=0;
    w2=pdf("normal",(x-&x)/&h)/&h; _weight_=w1*w2;
    ods output ParameterEstimates=est(where=(parameter="a"));
data result;
    set est;
    x=&x;
data quantile;
    set quantile result;
    keep x estimate;
%end;
data &outdata;
    set quantile;
    where x^=.;
    run;
%mend LocalQSmooth;

%TolLimit Macro: Computation of Two-Sided and One-Sided Tolerance Intervals

/*************************************

Inputs:
DATASET = Data set to be analyzed
VAR = Variable for which tolerance limits will be computed
GAMMA = Content of the tolerance interval
BETA = Confidence of the tolerance interval
OUTDATA = Data set with one-sided and two-sided tolerance limits

*************************************/

%macro TolLimit(dataset,var,gamma,beta,outdata);
data _null_;
    set &dataset nobs=m;
    call symput("n",compress(put(m,6.0)));
    run;
data _null_;
    prev1=probbeta(&gamma,&n,1);
    do s=2 to &n;
    next1=probbeta(&gamma,&n-s+1,s);
    if prev1<=1-&beta and next1>1-&beta then
         call symput("rank1",compress(put(s-1,6.0)));
    prev1=next1;
    end;
prev2=probbeta(&gamma,&n-1,2);
    do s=2 to &n/2;
    next2=probbeta(&gamma,&n-2*s+1,2*s);
    if prev2<=1-&beta and next2>1-&beta then
         call symput("rank2",compress(put(s-1,6.0)));
    prev2=next2;
    end;
    run;
proc rank data=&dataset out=ranks;
    var &var;
    ranks ranky;
proc sort data=ranks;
    by ranky;
data upper1;
    set ranks;
    if ranky>&n-&rank1+1 then delete;
data _null_;
    set upper1 nobs=m;
    if _n_=m then call symput("upper1",compress(put(&var,best8.)));
data upper2;
    set ranks;
    if ranky>&n-&rank2+1 then delete;
data _null_;
    set upper2 nobs=m;
    if _n_=m then call symput("upper2",compress(put(&var,best8.)));
data lower2;
    set ranks;
    if ranky>&rank2 then delete;
data _null_;
    set lower2 nobs=m;
    if _n_=m then call symput("lower2",compress(put(&var,best8.)));
    run;
data &outdata;
    upper1=&upper1; lower2=&lower2; upper2=&upper2;
    label upper1="Upper one-sided tolerance limit"
          upper2="Upper two-sided tolerance limit"
          lower2="Lower two-sided tolerance limit";
    run;
%mend TolLimit;

%VarPlot Macro: Examination of Trends and Variability in Data Sets with Bivariate Measurements

/******************************

Inputs:
 
DATASET = Data set to be analyzed
GRID = Data set with the grid points (X variable)
Q1, Q2, Q3 = Low, middle and upper quantiles to be estimated
H = Bandwidth parameter for local smoothing
OUTDATA = Output data set containing the local estimates of the three quantile functions evaluated at the grid points (ESTIMATE variable) as well as the raw data points

******************************/

%macro VarPlot(dataset,grid,q1,q2,q3,h,outdata);
%GlobalQSmooth(dataset=&dataset,grid=&grid,gamma=&q1,p=5,outdata=upper);
%GlobalQSmooth(dataset=&dataset,grid=&grid,gamma=&q2,p=5,outdata=mid);
%GlobalQSmooth(dataset=&dataset,grid=&grid,gamma=&q3,p=5,outdata=lower);
data initial;
      set upper; h=&h;
%LocalQSmooth(dataset=&dataset,gamma=&q1,initial=initial,outdata=q1);
data initial;
      set mid; h=&h;
%LocalQSmooth(dataset=&dataset,gamma=&q2,initial=initial,outdata=q2);
data initial;
      set lower; h=&h;
%LocalQSmooth(dataset=&dataset,gamma=&q3,initial=initial,outdata=q3);
data q1;
    set q1;
    quantile=1;
data q2;
    set q2;
    quantile=2;
data q3;
    set q3;
    quantile=3;
data rawdata;
    set &dataset;
    quantile=0;
    estimate=y;
data &outdata;
    set q1 q2 q3 rawdata;
%mend VarPlot;

Chapter 4

%EffDesign Macro: Design of Group Sequential Trials for Efficacy Testing

%macro EffDesign(fraction, effsize, power, alpha, rho, boundary, sizepower);

/*******************************

Inputs:
FRACTION = Input data set that contains fractions of the total sample size accrued at successive analyses
EFFSIZE = True effect size
POWER = Power
ALPHA = One-sided Type I error probability
RHO = Shape parameter of stopping boundary (0.5 if Pocock boundary and 0 if O’Brien-Fleming boundary)
BOUNDARY = Output data set that contains stopping probabilities at scheduled looks
SIZEPOWER = Output data set that contains average sample number and power for selected effect sizes

*******************************/

proc iml;
    start DriftSearch(d) global(m,critvalue,stfract,inc,infinity);
      upper=critvalue*stfract##&rho;
      adjustment=d*stfract;
      boundary=infinity//(upper-adjustment);
      call seq(p,boundary) eps=1e-8 tscale=inc;
      diff=abs(1-&power-(p[2,]-p[1,])[m]);
      return(diff);
      finish;
      use &fraction;
      read all var _all_ into fraction;
      m=nrow(fraction);
      fract=t(fraction);
      stfract=fract/fract[1];
      inc=j(1,m-1,0);
      do i=1 to m-1;
      inc[i]=(fract[i+1]-fract[i])/fract[1];
      end;
      infinity=repeat(.m,1,m);
      upper=stfract##&rho;
      boundary=infinity//upper;
      call seqscale(prob,critvalue,boundary,1-&alpha) eps=1e-8 tscale=inc;
      upper=critvalue*stfract##&rho;
      boundary=infinity//upper;
      stopz=critvalue*stfract##(&rho-0.5);
      stopp=1-probnorm(stopz);
      call seq(prob0,boundary) eps=1e-8 tscale=inc;
      nfixed=2*((probit(&power)+probit(1-&alpha))/&effsize)**2;
      start=&effsize*sqrt(nfixed*fract[1]/2);
      tc=repeat(.,1,12);
      tc[1]=100;
      tc[3]=1e-5;
      call nlpdd(rc,drift,"DriftSearch",start) tc=tc;
      max=2*(drift/&effsize)*(drift/&effsize)/fract[1];
      upper=critvalue*stfract##&rho;
      adjustment=drift*stfract;
      boundary=infinity//(upper-adjustment);
      call seq(prob1,boundary) eps=1e-8 tscale=inc;
      &boundary=j(m,8,0);
      &boundary[,1]=t(1:m);
      &boundary[,2]=ceil(cusum(fraction)*max);
      &boundary[,3]=t(stopz);
      &boundary[,4]=t(stopp);
      &boundary[,5]=t(prob0[3,]-prob0[2,]+prob0[1,]);
      &boundary[,6]=t(cusum(prob0[3,]-prob0[2,]+prob0[1,]));
      &boundary[,7]=t(prob1[3,]-prob1[2,]+prob1[1,]);
      &boundary[,8]=t(cusum(prob1[3,]-prob1[2,]+prob1[1,]));
      varnames={"Analysis", "Size", "TestStBoundary", "PValBoundary", "ProbH0", "CumProbH0", "ProbH1", "CumProbH1"};
      create &boundary from &boundary[colname=varnames];
      append from &boundary;
&sizepower=j(21,3,0);
      do i=0 to 20;
          upper=critvalue*stfract##&rho;
          adjustment=i*&effsize*sqrt(max*fract[1]/2)*stfract/10;
          boundary=infinity//(upper-adjustment);
          call seq(prob2,boundary) eps=1e-8 tscale=inc;
          stop=prob2[3,]-prob2[2,]+prob2[1,];
          &sizepower[i+1,1]=i*&effsize/10;
          &sizepower[i+1,2]=ceil(max*(1-(1-fract)*stop‘));
          &sizepower[i+1,3]=1-(prob2[2,]-prob2[1,])[m];
      end;
      varnames={"EffSize", "AveSize", "Power"};
      create &sizepower from &sizepower[colname=varnames];
      append from &sizepower;
      summary=j(1,4,0);
      summary[1]=ceil(max); summary[2]=&sizepower[1,2];
      summary[3]=&sizepower[11,2];
      summary[4]=ceil(nfixed);
      create summary from summary; append from summary;
      quit;
data summary;
      set summary;
      format value best6.;
      length par $50;
      par="One-sided Type I error probability";
        value=&alpha; output;
      par="Power"; value=&power; output;
      par="True effect size"; value=&effsize; output;
      par="Stopping boundary parameter"; value=&rho; output;
      par="Maximum sample size per group"; value=col1; output;
      par="Average sample size per group under H0"; value=col2; output;
      par="Average sample size per group under H1"; value=col3; output;
      par="Fixed sample size per group"; value=col4; output;
      label par="Summary" value="Value";
      keep par value;
proc print data=summary noobs label;
      var par value;
data &boundary;
      set &boundary;
      format TestStBoundary PValBoundary ProbH0 CumProbH0 ProbH1 CumProbH1 6.4
          Analysis Size 4.0;
        label Analysis="Analysis"
          Size="Sample size per group"
          TestStBoundary="Stopping boundary (test statistic scale)"
          PValBoundary="Stopping boundary (p-value scale)"
          ProbH0="Stopping probability under H0"
          CumProbH0="Cumulative stopping probability under H0"
          ProbH1="Stopping probability under H1"
          CumProbH1="Cumulative stopping probability under H1";
data &sizepower;
    set &sizepower;
    format EffSize best6. AveSize 5.0;
    label EffSize="True effect size"
          AveSize="Average sample size per group"
          Power="Power";
    run;
%mend EffDesign;

%EffFutDesign Macro: Design of Group Sequential Trials for Simultaneous Efficacy and Futility Testing

%macro EffFutDesign(fraction, effsize, power, alpha, rhoeff, rhofut, boundary, sizepower);

/**************************

Inputs:
FRACTION = Input data set that contains fractions of the total sample size accrued at successive analyses
EFFSIZE = True effect size
POWER = Power
ALPHA = One-sided Type I error probability
RHOEFF = Shape parameter of upper (efficacy) stopping boundary (0.5 if Pocock boundary and 0 if O’Brien-Fleming boundary)
RHOFUT = Shape parameter of lower (futility) stopping boundary (0.5 if Pocock boundary and 0 if O’Brien-Fleming boundary)
BOUNDARY = Output data set that contains stopping probabilities at scheduled looks
SIZEPOWER = Output data set that contains average sample number and power for selected effect sizes

**************************/

proc iml;
    start ParSearch(c) global(m,lastlook,stfract,inc);
        drift=(c[1]*lastlook##&rhoeff+c[2]*lastlook##&rhofut)/lastlook;
        upper=c[1]*stfract##&rhoeff;
        lower=drift*stfract-c[2]*stfract##&rhofut;
        lower[m]=lower[m]-1e-5;
        boundary=lower//upper;
        call seq(p,boundary) eps=1e-8 tscale=inc;
        crossh0=sum(p[3,]-p[2,])-&alpha;
        adjustment=drift*stfract;
        boundary=(lower-adjustment)//(upper-adjustment);
        call seq(p,boundary) eps=1e-8 tscale=inc;
        crossh1=sum(p[3,]-p[2,])-&power;
        diff=abs(crossh0)+abs(crossh1);
        return(diff);
    finish;
    use &fraction;
    read all var _all_ into fraction;
    m=nrow(fraction);
    fract=t(fraction);
    stfract=fract/fract[1];
    inc=j(1,m-1,0);
    do i=1 to m-1;
      inc[i]=(fract[i+1]-fract[i])/fract[1];
    end;
    lastlook=stfract[m];
    nfixed=2*((probit(&power)+probit(1-&alpha))/&effsize)**2;
    start={1 1};
    tc=repeat(.,1,12);
    tc[1]=100;
    tc[3]=1e-5;
    call nlpdd(rc,c,"ParSearch",start) tc=tc;
    drift=(c[1]*lastlook##&rhoeff+c[2]*lastlook##&rhofut)/lastlook;
    upper=c[1]*stfract##&rhoeff;
    lower=drift*stfract-c[2]*stfract##&rhofut;
    lower[m]=lower[m]-1e-5; boundary=lower//upper;
    call seq(prob0,boundary) eps=1e-8 tscale=inc;
    adjustment=drift*stfract;
    boundary=(lower-adjustment)//(upper-adjustment);
    call seq(prob1,boundary) eps=1e-8 tscale=inc;
    upperz=(stfract##(-0.5))#upper;
    lowerz=(stfract##(-0.5))#lower;
    upperp=1-probnorm(upperz);
    lowerp=1-probnorm(lowerz);
    max=2*(drift/&effsize)*(drift/&effsize)/fract[1];
    boundary=j(m,10,0);
    boundary[,1]=t(1:m);
    boundary[,2]=ceil(cusum(fraction)*max);
    boundary[,3]=t(lowerz);
    boundary[,4]=t(upperz);
    boundary[,5]=t(lowerp);
    boundary[,6]=t(upperp);
    boundary[,7]=t(prob0[3,]-prob0[2,]+prob0[1,]);
    boundary[,8]=t(cusum(prob0[3,]-prob0[2,]+prob0[1,]));
    boundary[,9]=t(prob1[3,]-prob1[2,]+prob1[1,]);
    boundary[,10]=t(cusum(prob1[3,]-prob1[2,]+prob1[1,]));
    varnames={"Analysis", "Size", "LowerTestStBoundary", "UpperTestStBoundary",
        "LowerPValBoundary", "UpperPValBoundary",
        "ProbH0", "CumProbH0", "ProbH1", "CumProbH1"};
    create &boundary from boundary[colname=varnames];
    append from boundary;
    sizepower=j(21,3,0);
    do i=0 to 20;
        adjustment=i*drift*stfract/10;
      do i=0 to 20;
        boundary=(lower-adjustment)//(upper-adjustment);
      do i=0 to 20;
        call seq(prob2,boundary) eps=1e-8 tscale=inc;
      do i=0 to 20;
        stop=prob2[3,]-prob2[2,]+prob2[1,];
      do i=0 to 20;
        sizepower[i+1,1]=i*&effsize/10;
    do i=0 to 20;
        sizepower[i+1,2]=ceil(max*(1-(1-fract)*stop‘));
        sizepower[i+1,3]=sum(prob2[3,]-prob2[2,]);*1-(prob2[2,]-prob2[1,])[m];
    end;
    varnames={"EffSize", "AveSize", "Power"};
    create &sizepower from sizepower[colname=varnames];
    append from sizepower; summary=j(1,4,0);
    summary[1]=ceil(max);
    summary[2]=sizepower[1,2];
    summary[3]=sizepower[11,2];
    summary[4]=ceil(nfixed);
    create summary from summary; append from summary;
    quit;
data summary;
    set summary;
    format value best6.;
    length par $50;
    par="One-sided Type I error probability"; value=&alpha; output;
    par="Power"; value=&power; output; par="True effect size"; value=&effsize; output;
    par="Shape parameter of upper boundary"; value=&rhoeff; output; par="Shape parameter of lower boundary"; value=&rhofut; output;
    par="Maximum sample size per group"; value=col1; output;
    par="Average sample size per group under H0"; value=col2; output;
    par="Average sample size per group under H1"; value=col3; output;
    par="Fixed sample size per group"; value=col4; output;
    label par="Summary" value="Value";
    keep par value;
proc print data=summary noobs label;
    var par value;
data &boundary;
    set &boundary;
    format LowerTestStBoundary UpperTestStBoundary LowerPValBoundary UpperPValBoundary ProbH0 CumProbH0 ProbH1 CumProbH1 6.4 Analysis Size 4.0;
    label Analysis="Analysis"
          Size="Sample size per group"
          LowerTestStBoundary="Lower stopping boundary (test statistic scale)"
          UpperTestStBoundary="Upper stopping boundary (test statistic scale)"
          LowerPValBoundary="Lower stopping boundary (p-value scale)"
          UpperPValBoundary="Upper stopping boundary (p-value scale)"
          ProbH0="Stopping probability under H0"
          CumProbH0="Cumulative stopping probability under H0" ProbH1="Stopping probability under H1"
          CumProbH1="Cumulative stopping probability under H1";
data &sizepower;
    set &sizepower;
    format EffSize best6. AveSize 5.0;
    label EffSize="True effect size"
          AveSize="Average sample size per group"
          Power="Power";
    run;
%mend EffFutDesign;

%EffMonitor Macro: Efficacy Monitoring of Group Sequential Trials

%macro EffMonitor(fraction, data, effsize, power, alpha, rho, spfunction, sprho, decision, inference);

/****************************

Inputs:
FRACTION = Input data set that contains fractions of the total sample size accrued at successive analyses.
DATA = Input data set containing summary statistics computed at each analysis. The data set must include the following two variables: N is the number of patients in each treatment group (or the average of the numbers of patients if they are not the same) and STAT is the value of a normally distributed test statistic.
EFFSIZE = True effect size.
POWER = Power.
ALPHA = One-sided Type I error probability.
RHO = Shape parameter of stopping boundary (0.5 if Pocock boundary and 0 if O’Brien-Fleming boundary).
SPFUNCTION = Error spending function code (1, design-based function; 2, ten-look function; 3, a function the Lan-DeMets family; 4, a function from the Jennison-Turnbull family; 5, a function from the Hwang-Shih-DeCani family).
SPRHO = Shape parameter of the error spending function.
DECISION = Output data set that contains stopping boundaries and probabilities as well as one-sided repeated confidence intervals for treatment difference at each interim look.
INFERENCE = Output data set containing bias-adjusted estimate of treatment effect with a one-sided confidence interval computed at the last look.

****************************/

proc iml;
    start DriftSearch(d) global(m,critvalue,stfract,inc,infinity);
        upper=critvalue*stfract##&rho;
        adjustment=d*stfract; boundary=infinity//(upper-adjustment);
        call seq(p,boundary) eps=1e-8 tscale=inc;
        diff=abs(1-&power-(p[2,]-p[1,])[m]);
        return(diff);
    finish;
    start BoundSearch(new) global(stage,alpha,adjbound,sinf,infinc);
        alphainc=alpha[stage]-alpha[stage-1];
        tempb=t(adjbound[1:stage]#sqrt(sinf[1:stage]));
        tempb[stage]=new*sqrt(sinf[stage]);
        tempinf=repeat(.m,1,stage);
        tempinc=t(infinc[1:stage-1]);
        boundary=tempinf//tempb;
        call seq(p,boundary) eps=1e-8 tscale=tempinc;
        diff=abs(p[3,stage]-p[2,stage]-alphainc);
        return(diff);
    finish;
    start AdjQuant(est) global(quantile,sall,observed,tempinc,infinity);
        adjustment=est*sall;
        upper=observed#(sall##0.5);
        boundary=infinity//(upper-adjustment);
        call seq(prob,boundary) eps=1e-8 tscale=tempinc;
        sss=sum(prob[3,]-prob[2,]);
        diff=abs(quantile-sss);
        return(diff);
    finish;
    use &fraction;
    read all var _all_ into fraction;
    m=nrow(fraction);
    fract=t(fraction);
    stfract=fract/fract[1];
    inc=j(1,m-1,0);
    do i=1 to m-1;
      inc[i]=(fract[i+1]-fract[i])/fract[1];
    end;
    infinity=repeat(.m,1,m);
    upper=stfract##&rho;
    boundary=infinity//upper;
    call seqscale(prob,critvalue,boundary,1-&alpha) eps=1e-8 tscale=inc;
    upper=critvalue*stfract##&rho;
    boundary=infinity//upper;
    call seq(prob1,boundary) eps=1e-8 tscale=inc;
    spend1=cusum(prob1[3,]-prob1[2,]+prob1[1,]);
    beta=1-&power;
    nfixed=2*((probit(&power)+probit(1-&alpha))/&effsize)**2;
    start=&effsize*sqrt(nfixed*fract[1]/2);
    tc=repeat(.,1,12);
    tc[1]=100;
    tc[3]=1e-5;
    call nlpdd(rc,drift,"DriftSearch",start) tc=tc;
    max=2*(drift/&effsize)*(drift/&effsize)/fract[1];
    use &data;
    read all var {n stat} into mon;
    n=nrow(mon);
    ss=mon[,1];
    infofrac=ss/max;
    statistic=mon[,2];
    pvalue=1-probnorm(mon[,2]);
    sinf=infofrac/infofrac[1];
    if n>1 then do;
        infinc=j(1,n-1,0);
        do i=1 to n-1;
            infinc[i]=sinf[i+1]-sinf[i];
        end;
    end;
    else infinc=j(1,1,1);
    alpha=j(n,1,0);
    level=j(n,1,0);
    adjbound=j(n,1,0);
    tc=repeat(.,1,12);
    tc[1]=200;
    tc[3]=1e-6;
    * Design-based error spending function;
    if &spfunction=1 then
    do;
        t=0 || fract;
        s=0 || spend1;
        do stage=1 to n;
            x=infofrac[stage];
            do i=1 to m;
                if t[i]<=x & x<t[i+1] then
                   alpha[stage]=(s[i+1]*(x-t[i])+s[i]*(t[i+1]-x))/
    
           (t[i+1]-t[i]);
           end;
           if x>=1 then alpha[stage]=&alpha;
        end;
    end;
    * Ten-look error spending function;
    if &spfunction=2 then
    do;
      k=10;
      infinity=repeat(.m,1,k);
      upper=(1:k)##&rho;
      boundary=infinity//upper;
      call seqscale(prob,critvalue,boundary,1-&alpha) eps=1e-8;
      upper=critvalue*(1:k)##&rho;
      boundary=infinity//upper;
      call seq(prob0,boundary) eps=1e-8;
      spend=t(cusum(prob0[3,]-prob0[2,]+prob0[1,]));
      do stage=1 to n;
         x=infofrac[stage];
         if x<1 then do;
              l=floor(k*x); u=floor(k*x)+1;
              alpha[stage]=spend[l]+(k*x-l)*(spend[u]-spend[l])/(u-l);
         end;
         if x>=1 then alpha[stage]=&alpha;
      end;
    end;
    * Lan-DeMets error spending function;
    if &spfunction=3 then
    do;
      do stage=1 to n;
         x=infofrac[stage];
         if x<1 & &sprho=0 then alpha[stage]=2-
              2*probnorm(probit(1-&alpha/2)/sqrt(x));
         if x<1 & &sprho=0.5 then alpha[stage]=&alpha*log(1+(exp(1)-1)*x);
         if x>=1 then alpha[stage]=&alpha;
      end;
    end;
    * Jennison-Turnbull error spending function;
    if &spfunction=4 then
    do;
      do stage=1 to n;
         x=infofrac[stage]; if x<1 then alpha[stage]=&alpha*(x**&sprho);
         if x>=1 then alpha[stage]=&alpha;
      end;
    end;
    * Hwang-Shih-DeCani error spending function;
    if &spfunction=5 then
    do;
      do stage=1 to n;
         x=infofrac[stage];
         if x<1 & &sprho=0 then alpha[stage]=&alpha*(1-exp(-&sprho*x))/(1-exp(-&sprho));
         if x<1 & &sprho=0 then alpha[stage]=&alpha*x;
         if x>=1 then alpha[stage]=&alpha;
      end;
    end;
    do stage=1 to n;
      if stage=1 then do;
         adjbound[1]=probit(1-alpha[1]);
         level[1]=alpha[1];
      end;
      if stage>1 then do;
         new=probit(1-alpha[stage]);
         call nlpdd(rc,adj,"BoundSearch",new) tc=tc;
         adjbound[stage]=adj;
         level[stage]=1-probnorm(adj);
      end;
    end;
    lowercl=(statistic-adjbound)#sqrt(2/ss);
    reject=(statistic>adjbound);
    stop=0;
    do i=1 to n;
      if reject[i]=1 & stop=0 then stop=i;
    end;
    if stop=0 then last=n; else last=stop;
    observed=t(adjbound[1:last]);
    observed[last]=statistic[last];
    tall=t(ss[1:last]);
    k=ncol(tall);
    tall=tall/tall[k];
    sall=tall/tall[1];
    tempinc=j(1,k-1,0);
    do i=1 to k-1;
      tempinc[i]=sall[i+1]-sall[i];
    end;
    infinity=repeat(.m,1,k);
    tc=repeat(.,1,12);
    tc[1]=100;
    tc[3]=1e-5;
    inference=j(2,1,.);
    quantile=0.5; est=statistic[last];
    call nlpdd(rc,qest,"AdjQuant",est) tc=tc;
    qest=&effsize*qest/drift;
    inference[1]=qest;
    quantile=&alpha;
    est=((statistic-probit(1-&alpha))#sqrt(2/ss))[last];
    est=est*drift/&effsize;
    call nlpdd(rc,qest,"AdjQuant",est) tc=tc;
    qest=&effsize*qest/drift;
    inference[2]=qest;
    create &inference from inference;
    append from inference;
    &decision=j(last,9,0);
    &decision[,1]=t(1:last);
    &decision[,2]=ss[1:last];
    &decision[,3]=infofrac[1:last];
    &decision[,4]=statistic[1:last];
    &decision[,5]=pvalue[1:last];
    &decision[,6]=adjbound[1:last];
    &decision[,7]=level[1:last];
    &decision[,8]=lowercl[1:last];
    &decision[,9]=reject[1:last];
      varnames={"Analysis", "Size", "Fraction", "TestStatistic", "PValue", "TestStBoundary", "PValBoundary", "LowerLimit", "Reject"};
    create &decision from &decision[colname=varnames];
    append from &decision;
    quit;
%let conf=%sysevalf(100*(1-&alpha));
data &inference;
    set &inference;
    length par $50;
    format value best6.;
    if _n_=1 then par="Median unbiased estimate"; value=col1;
    if _n_=2 then par="Lower &conf.% confidence limit"; value=col1;
    label par="Parameter" value="Value";
    keep par value;
data &decision;
    set &decision;
    format TestStatistic PValue TestStBoundary PValBoundary LowerLimit
      Fraction 6.4 Analysis Size 4.0;
    length Decision $10.;
    label Analysis="Analysis"
      Size="Sample size per group"
      Fraction="Sample size fraction"
      TestStatistic="Test statistic"
      PValue="P-value"
      TestStBoundary="Stopping boundary (test statistic scale)"
      PValBoundary="Stopping boundary (p-value scale)"
      LowerLimit="Lower &conf.% repeated confidence limit"
      Decision="Decision";
    if reject=0 then decision="Continue"; else decision="Reject H0";
    drop reject;
    run;
%mend EffMonitor;

%EffFutMonitor Macro: Efficacy and Futility Monitoring of Group Sequential Trials

%macro EffFutMonitor(fraction,data,effsize,power,alpha,rhoeff,rhofut, spfunction,sprho,decision,inference);

/*

Inputs:
FRACTION = Input data set that contains fractions of the total sample size accrued at successive analyses.
DATA = Input data set containing summary statistics computed at each analysis. The data set must include the following two variables: N is the number of patients in each treatment group (or the average of the numbers of patients if they are not the same) and STAT is the value of a normally distributed test statistic.
EFFSIZE = True effect size.
POWER = Power.
ALPHA = One-sided Type I error probability.
RHOEFF = Shape parameter of upper (efficacy) stopping boundary (0.5 if Pocock boundary and 0 if O’Brien-Fleming boundary)
RHOFUT = Shape parameter of lower (futility) stopping boundary (0.5 if Pocock boundary and 0 if O’Brien-Fleming boundary)
SPFUNCTION = Error spending function code (1, design-based function; 2, ten-look function; 3, a function the Lan-DeMets family; 4, a function from the Jennison-Turnbull family; 5, a function from the Hwang-Shih-DeCani family).
SPRHO = Shape parameter of the error spending function.
DECISION = Output data set that contains stopping boundaries and probabilities as well as one-sided repeated confidence intervals for treatment difference at each interim look.
INFERENCE = Output data set containing bias-adjusted estimate of treatment effect with a one-sided confidence interval computed at the last look.

*/

proc iml;
    start ParSearch(c) global(lastlook,scf,scale);
      drift=(c[1]*lastlook##&rhoeff+c[2]*lastlook##&rhofut)/lastlook;
      upper=c[1]*scf##&rhoeff;
      lower=drift*scf-c[2]*scf##&rhofut;
      length=ncol(lower);
      lower[length]=lower[length]-1e-5;
      boundary=lower//upper;
      call seq(p,boundary) eps=1e-8 tscale=scale;
      crossh0=sum(p[3,]-p[2,])-&alpha;
      adjustment=drift*scf;
      boundary=(lower-adjustment)//(upper-adjustment);
      call seq(p,boundary) eps=1e-8 tscale=scale;
      crossh1=sum(p[3,]-p[2,])-&power;
      diff=abs(crossh0)+abs(crossh1);
      return(diff);
    finish;
    start BoundSearch(guess) global(stage,alpha,beta,adjlower,adjupper,
      ss,sinf,infinc);
      if guess[1]>guess[2] then do;
         alphainc=alpha[stage]-alpha[stage-1];
         betainc=beta[stage]-beta[stage-1];
         tempupp=t(adjupper[1:stage]#sqrt(sinf[1:stage]));
         templow=t(adjlower[1:stage]#sqrt(sinf[1:stage]));
         tempinc=t(infinc[1:stage-1]);
         tempupp[stage]=guess[1]*sqrt(sinf[stage]);
         templow[stage]=guess[2]*sqrt(sinf[stage]);
         boundary=templow//tempupp;
         call seq(p,boundary) eps=1e-8 tscale=tempinc;
         crossh0=p[3,stage]-p[2,stage]-alphainc;
         adjustment=&effsize*t(sqrt(ss[1:stage]#sinf[1:stage]/2));
         boundary=(templow-adjustment)//(tempupp-adjustment);
         call seq(p,boundary) eps=1e-8 tscale=tempinc;
         crossh1=p[1,stage]-betainc;
         diff=abs(crossh0)+abs(crossh1);
         return(diff);
      end;
      if guess[1]<=guess[2] then do;
         diff=1;
         return(diff);
      end;
    finish;
    start AdjQuant(est) global(quantile,sall,observed,tempinc,infinity);
        adjustment=est*sall;
        upper=observed#(sall##0.5);
        boundary=infinity//(upper-adjustment);
        call seq(prob,boundary) eps=1e-8 tscale=tempinc;
        sss=sum(prob[3,]-prob[2,]);
        diff=abs(quantile-sss);
        return(diff);
    finish;
    use &fraction;
    read all var _all_ into fraction;
    m=nrow(fraction);
    fract=t(fraction);
    stfract=fract/fract[1];
    inc=j(1,m-1,0);
    do i=1 to m-1;
      inc[i]=(fract[i+1]-fract[i])/fract[1];
    end;
    nfixed=2*((probit(&power)+probit(1-&alpha))/&effsize)**2;
    start={1 1}; tc=repeat(.,1,12);
    tc[1]=100; tc[3]=1e-5; lastlook=stfract[m];
    scf=stfract;
    scale=inc;
    call nlpdd(rc,c,"ParSearch",start) tc=tc;
    drift=(c[1]*lastlook##&rhoeff+c[2]*lastlook##&rhofut)/lastlook;
    max=2*(drift/&effsize)*(drift/&effsize)/fract[1];
    upper=c[1]*stfract##&rhoeff;
    lower=drift*stfract-c[2]*stfract##&rhofut;
    lower[m]=lower[m]-1e-5;
    boundary=lower//upper;
    call seq(prob0,boundary) eps=1e-8 tscale=inc;
    alspend=cusum(prob0[3,]-prob0[2,]);
    adjustment=drift*stfract;
    boundary=(lower-adjustment)//(upper-adjustment);
    call seq(prob1,boundary) eps=1e-8 tscale=inc;
    bespend=cusum(prob1[1,]);
    use &data;
    read all var {n stat} into mon;
    n=nrow(mon);
    ss=mon[,1];
    infofrac=ss/max;
    statistic=mon[,2];
    pvalue=1-probnorm(statistic);
    sinf=infofrac/infofrac[1];
    infinc=j(1,n-1,0);
    do i=1 to n-1;
      infinc[i]=sinf[i+1]-sinf[i];
    end;
    alpha=j(n,1,0);
    beta=j(n,1,0);
    * Design-based error spending function;
    if &spfunction=1 then
    do;
      t=0 || fract;
      a=0 || alspend;
      b=0 || bespend;
      do stage=1 to n;
         x=infofrac[stage];
         do i=1 to m;
                if t[i]<=x & x<t[i+1] then do;
                    alpha[stage]=(a[i+1]*(x-t[i])+a[i]*(t[i+1]-x))/
    
         (t[i+1]-t[i]);
                    beta[stage]=(b[i+1]*(x-t[i])+b[i]*(t[i+1]-x))/
    
         (t[i+1]-t[i]);
                end;
         end;
         if x>=1 then do;
                alpha[stage]=&alpha;
                beta[stage]=1-&power;
         end;
      end;
    end;
    * Ten-look error spending function;
    if &spfunction=2 then
    do;
      k=10;
      t=repeat(0.1,1,10);
      cf=cusum(t);
      scf=cf/cf[1];
      in=j(1,k-1,0);
      do i=1 to k-1;
         in[i]=t[i+1]/t[1];
      end;
      start={1 1};
      tc=repeat(.,1,12);
      tc[1]=100;
      tc[3]=1e-5;
      lastlook=k;
      scale=in;
      call nlpdd(rc,c,"ParSearch",start) tc=tc;
      drift=(c[1]*lastlook##&rhoeff+c[2]*lastlook##&rhofut)/lastlook;
      upper=c[1]*scf##&rhoeff;
      lower=drift*scf-c[2]*scf##&rhofut;
      boundary=lower//upper;
      call seq(prob0,boundary) eps=1e-8 tscale=in;
      als=cusum(prob0[3,]-prob0[2,]);
      adjustment=drift*scf; boundary=(lower-adjustment)//(upper-adjustment);
      call seq(prob1,boundary) eps=1e-8 tscale=in;
      bes=cusum(prob1[1,]);
      do stage=1 to n;
         x=infofrac[stage];
         if x<1 then do;
                l=floor(k*x); u=floor(k*x)+1;
                alpha[stage]=als[l]+(k*x-l)*(als[u]-als[l])/(u-l);
                beta[stage]=bes[l]+(k*x-l)*(bes[u]-bes[l])/(u-l);
         end;
         if x>=1 then do;
                alpha[stage]=&alpha;
                beta[stage]=1-&power;
         end;
      end;
    end;
    * Lan-DeMets error spending function;
    if &spfunction=3 then
    do;
      do stage=1 to n;
         x=infofrac[stage];
         if x<1 & &sprho=0 then alpha[stage]=2-
                2*probnorm(probit(1-&alpha/2)/sqrt(x));
         if x<1 & &sprho=0.5 then alpha[stage]=&alpha*log(1+(exp(1)-1)*x);
         if x>=1 then alpha[stage]=&alpha;
      end;
    end;
    * Jennison-Turnbull error spending function;
    if &spfunction=4 then
    do;
      do stage=1 to n;
         x=infofrac[stage];
         if x<1 then alpha[stage]=&alpha*(x**&sprho);
         if x>=1 then alpha[stage]=&alpha;
      end;
    end;
    * Hwang-Shih-DeCani error spending function;
    if &spfunction=5 then
    do;
      do stage=1 to n;
         x=infofrac[stage];
         if x<1 & &sprho=0 then alpha[stage]=
                &alpha*(1-exp(-&sprho*x))/(1-exp(-&sprho));
         if x<1 & &sprho=0 then alpha[stage]=&alpha*x;
         if x>=1 then alpha[stage]=&alpha;
      end;
    end;
    adjlower=j(n,1,0);
    adjupper=j(n,1,0);
    adjlowp=j(n,1,0);
    adjuppp=j(n,1,0);
    reject=j(n,1,0);
    guess=j(1,2,0);
    tc=repeat(.,1,12);
    tc[1]=100;
    tc[3]=1e-5;
    do stage=1 to n;
      if stage=1 then do;
         adjupper[1]=probit(1-alpha[1]);
         stdelta=&effsize*sqrt(ss[1]/2);
         adjlower[1]=stdelta+probit(beta[1]);
         adjuppp[1]=alpha[1];
         adjlowp[1]=1-probnorm(adjlower[1]);
      end;
      if stage>1 then do;
         guess[1]=probit(1-alpha[stage]);
         stdelta=&effsize*sqrt(ss[stage]/2);
         guess[2]=stdelta+probit(beta[stage]);
         call nlpdd(rc,adj,"BoundSearch",guess) tc=tc;
         adjupper[stage]=adj[1];
         adjlower[stage]=adj[2];
         adjuppp[stage]=1-probnorm(adj[1]);
         adjlowp[stage]=1-probnorm(adj[2]);
      end;
    end;
    lowercl=(statistic-adjupper)#sqrt(2/ss);
    reject=(statistic>adjupper)-(statistic<adjlower);
    stop=0;
    do i=1 to n;
      if reject[i]^=0 & stop=0 then stop=i;
    end;
    if stop=0 then last=n; else last=stop;
    observed=t(adjupper[1:last]);
    observed[last]=statistic[last];
    tall=t(ss[1:last]);
    k=ncol(tall);
    tall=tall/tall[k];
    sall=tall/tall[1];
    tempinc=j(1,k-1,0);
    do i=1 to k-1;
      tempinc[i]=sall[i+1]-sall[i];
    end;
    infinity=repeat(.m,1,k);
    tc=repeat(.,1,12);
    tc[1]=100;
    tc[3]=1e-5;
    inference=j(2,1,.);
    quantile=0.5;
    est=statistic[last];
    call nlpdd(rc,qest,"AdjQuant",est) tc=tc;
    qest=&effsize*qest/drift;
    inference[1]=qest;
    quantile=&alpha;
    est=((statistic-probit(1-&alpha))#sqrt(2/ss))[last];
    est=est*drift/&effsize;
    call nlpdd(rc,qest,"AdjQuant",est) tc=tc;
    qest=&effsize*qest/drift;
    inference[2]=qest;
    create &inference from inference;
    append from inference;
    &decision=j(last,11,0);
    &decision[,1]=t(1:last);
    &decision[,2]=ss[1:last];
    &decision[,3]=infofrac[1:last];
    &decision[,4]=statistic[1:last];
    &decision[,5]=pvalue[1:last];
    &decision[,6]=adjupper[1:last]; &decision[,7]=adjlower[1:last];
    &decision[,8]=adjuppp[1:last];
    &decision[,9]=adjlowp[1:last];
    &decision[,10]=lowercl[1:last];
    &decision[,11]=reject[1:last];
    varnames={"Analysis", "Size", "Fraction", "TestStatistic", "PValue",
         "UpperTestStBoundary", "LowerTestStBoundary", "UpperPValBoundary",
         "LowerPValBoundary", "LowerLimit", "Reject"};
    create &decision from &decision[colname=varnames];
    append from &decision;
    quit;
%let conf=%sysevalf(100*(1-&alpha));
data &inference;
    set &inference;
    length par $50;
    format value best6.;
    if _n_=1 then par="Median unbiased estimate"; value=col1;
    if _n_=2 then par="Lower &conf.% confidence limit"; value=col1;
    label par=’Parameter’ value=’Value’;
    keep par value;
data &decision;
    set &decision;
    format TestStatistic PValue UpperTestStBoundary LowerTestStBoundary UpperPValBoundary LowerPValBoundary LowerLimit Fraction 6.4 Analysis Size 4.0;
    length Decision $10.;
    label Analysis=’Analysis’
          Size=’Sample size per group’
          Fraction=’Sample size fraction’
          TestStatistic=’Test statistic’
          PValue=’P-value’
          UpperTestStBoundary=’Upper stopping boundary (test statistic scale)’
          LowerTestStBoundary=’Lower stopping boundary (test statistic scale)’
          UpperPValBoundary=’Upper stopping boundary (p-value scale)’
          LowerPValBoundary=’Lower stopping boundary (p-value scale)’
          LowerLimit="Lower &conf.% repeated confidence limit"
          Decision=’Decision’;
    if reject=0 then decision=’Continue’;
    if reject=1 then decision=’Reject H0’;
    if reject=-1 then decision=’Reject H1’;
    drop reject;
    run;
%mend EffFutMonitor;

%CondPowerLSH Macro: Lan-Simon-Halperin Conditional Power Test

%macro CondPowerLSH(data,effsize,alpha,gamma,nn,prob,boundary);

/*******************************

Inputs:
DATA = Data set to be analyzed (includes number of patients per group and test statistic at each interim look)
EFFSIZE = Hypothesized effect size
ALPHA = One-sided Type I error probability of the significance test carried out at the end of the trial
GAMMA = Futility index
NN = Projected number of patients per treatment group
PROB = Name of an output data set containing conditional power at each interim look
BOUNDARY = Name of an output data set containing stopping boundary of the conditional power test

*******************************/

proc iml;
    use &data;
    read all var {n teststat} into data;
    m=nrow(data);
    n=data[,1];
    teststat=data[,2];
    prob=j(m,4,0);
    prob[,1]=t(1:m);
    prob[,2]=n/&nn;
    prob[,3]=teststat;
    prob[,4]=1-probnorm((sqrt(&nn)*probit(1-&alpha)-sqrt(n)#teststat-(&nn-n)*&effsize/sqrt(2))/sqrt(&nn-n));
    varnames={"Analysis" "Fraction" "TestStat" "CondPower"};
    create &prob from prob[colname=varnames];
    append from prob;
    bound=j(50,2,0);
    frac=(1:50)*&nn/50;
    bound[,1]=t(frac/&nn);
    bound[,2]=t(sqrt(&nn/frac)*probit(1-&alpha)+sqrt((&nn-frac)/frac)
         *probit(1-&gamma)-&effsize*(&nn-frac)/sqrt(2*frac));
    varnames={"Fraction" "StopBoundary"};
    create &boundary from bound[colname=varnames];
    append from bound;
    quit;
data &prob;
    set &prob;
    format Fraction 4.2 TestStat 7.4 CondPower 6.4;
    label Fraction="Fraction of total sample size"
      TestStat="Test statistic"
      CondPower="Conditional power";
%mend CondPowerLSH;

%CondPowerPAB Macro: Pepe-Anderson-Betensky Conditional Power Test

%macro CondPowerPAB(data,alpha,gamma,c,nn,prob,boundary);

/****************************

Inputs:
DATA = Data set to be analyzed (includes number of patients per group and test statistic at each interim look)
ALPHA = One-sided Type I error probability of the significance test carried out at the end of the trial
GAMMA = Futility index
C = Parameter determining the shape of the stopping boundary (Pepe and Anderson (1992) set C to 1 and Betensky (1997) recommended to set C to 2.326)
NN = Projected number of patients per treatment group
PROB = Name of an output data set containing conditional power at each interim look
BOUNDARY = Name of an output data set containing stopping boundary of the conditional power test

******************************/

proc iml;
    use &data;
    read all var {n teststat} into data;
    m=nrow(data);
    n=data[,1];
    teststat=data[,2];
    prob=j(m,4,0);
    prob[,1]=t(1:m);
    prob[,2]=n/&nn;
    prob[,3]=teststat;
    prob[,4]=1-probnorm((sqrt(&nn)*probit(1-&alpha)-sqrt(n)#teststat-(&nn-n)#(teststat+&c)/sqrt(n))/sqrt(&nn-n));
    varnames={"Analysis" "Fraction" "TestStat" "CondPower"};
    create &prob from prob[colname=varnames];
    append from prob;
    bound=j(50,2,0);
    frac=(1:50)*&nn/50;
    bound[,1]=t(frac/&nn);
    bound[,2]=t(sqrt(frac/&nn)*probit(1-&alpha)+sqrt(frac#(&nn-frac)/(&nn*&nn))*probit(1-&gamma)-&c*(&nn-frac)/&nn);
    varnames={"Fraction" "StopBoundary"};
    create &boundary from bound[colname=varnames];
    append from bound;
    quit;
data &prob;
    set &prob;
    format Fraction 4.2 TestStat 7.4 CondPower 6.4;
    label Fraction="Fraction of total sample size"
      TestStat="Test statistic"
      CondPower="Conditional power";
%mend CondPowerPAB;

%BayesFutilityCont Macro: Predictive Power and Predictive Probability Tests for Continuous, Normally Distributed Endpoints

%macro BayesFutilityCont(data,par,delta,eta,alpha,prob);

/******************************

Inputs:    
DATA = Data set to be analyzed (includes number of patients, estimated mean treatment effects and sample standard deviations in two treatment groups at each interim look)
PAR = Name of an input data set with projected sample size in each treatment group and parameters of prior distributions
DELTA = Clinically significant difference (required by Bayesian predictive probability method and ignored by predictive power method)
ETA = Confidence level of Bayesian predictive probability method (required by Bayesian predictive probability method and ignored by predictive power method)
ALPHA = One-sided Type I error probability of the significance test carried out at the end of the trial (required by predictive power method and ignored by Bayesian predictive probability method
PROB = Name of an output data set containing predictive power and predictive probability at each interim look

*******************************/

proc iml;
    use &data;
    read all var {n1 mean1 sd1 n2 mean2 sd2} into data;
    n=(data[,1]+data[,4])/2;
    s=sqrt(((data[,1]-1)#data[,3]#data[,3]+(data[,4]-1)
    #data[,6]#data[,6])/(data[,1]+data[,4]-2));
    z=(data[,2]-data[,5])/(s#sqrt(1/data[,1]+1/data[,4]));
    m=nrow(data);
    use &par;
    read all var {nn1 nn2 mu1 mu2 sigma} into par;
    nn=(par[,1]+par[,2])/2;
    sigma=par[,5];
    a1=(n-nn)/nn+(nn-n)/(nn#(1+(s/sigma)#(s/sigma)/n));
    b1=sqrt(n/(2*nn))#(nn-n)#(par[,3]-par[,4])/(1+n#(sigma/s)
     #(sigma/s));
    c1=1/(1+n#(sigma/s)#(sigma/s));
    output=j(m,4,0);
    output[,1]=t(1:m);
    output[,2]=n/nn;
    num1=sqrt(nn)#z#(1+a1)+b1/s-sqrt(n)*probit(1-&alpha);
    den1=sqrt((nn-n)#((nn-n)#(1-c1)+n)/nn);
    output[,3]=probnorm(num1/den1);
    an=1/(1+n#(sigma/s)#(sigma/s));
    ann=1/(1+nn#(sigma/s)#(sigma/s));
    b2=sqrt(n#nn/2)#(par[,3]-par[,4])/(1+n#(sigma/s)#(sigma/s));
    c2=1-1/sqrt(1+(s/sigma)#(s/sigma)/nn);
    num2=sqrt(nn)#z#(1-an)+b2/s-(&delta/s)#sqrt(n#nn/2)-sqrt(n)
      #(1-c2)*probit(&eta);
    den2=sqrt((1-ann)#(1-ann)#(nn-n)#((nn-n)#(1-an)+n)/nn);
    output[,4]=probnorm(num2/den2);
    varnames={"Analysis", "Fraction", "PredPower", "PredProb"};
    create &prob from output[colname=varnames];
    append from output;
    quit;
    data &prob;
    set &prob;
    format Fraction 4.2 PredPower PredProb 6.4;
    label Fraction="Fraction of total sample size"
     PredPower="Predictive power"
     PredProb="Predictive probability";
%mend BayesFutilityCont;

%BayesFutilityBin Macro: Predictive Power and Predictive Probability Tests for Binary Endpoints

%macro BayesFutilityBin(data,par,delta,eta,alpha,prob);

/*******************************

Inputs:    
DATA = Data set to be analyzed (includes number of patients and observed event counts in two treatment groups at each interim look)
PAR = Name of an input data set with projected sample size in each treatment group and parameters of prior distributions
DELTA = Clinically significant difference (required by Bayesian predictive probability method and ignored by predictive power method)
ETA = Confidence level of Bayesian predictive probability method
(required by Bayesian predictive probability method and ignored by predictive power method)
ALPHA = One-sided Type I error probability of the significance test carried out at the end of the trial (required by predictive power method and ignored by Bayesian predictive probability method
PROB = Name of an output data set containing predictive power and predictive probability at each interim look

*****************************/

proc iml;
    start integral(p) global(ast,bst);
      i=p**(ast[1]-1)*(1-p)**(bst[1]-1)*
          probbeta(p-&delta,ast[2],bst[2]);
      return(i);
    finish;
    start beta(a,b);
      beta=exp(lgamma(a)+lgamma(b)-lgamma(a+b));
      return(beta);
    finish;
    use &data;
    read all var {n1 count1 n2 count2} into data;
    n=data[,1]||data[,3];
    s=data[,2]||data[,4];
    m=nrow(n);
    use &par;
    read all var {nn1 nn2 alpha1 alpha2 beta1 beta2} into par;
    nn=t(par[1:2]);
    a=t(par[3:4]);
    b=t(par[5:6]);
    t=j(1,2,0);
    output=j(m,4,0);
    range=j(1,2,0);
    range[1]=&delta; range[2]=1;
    do i=1 to m;
    output[i,1]=i;
    output[i,2]=(n[i,1]+n[i,2])/(nn[1]+nn[2]);
    do t1=0 to nn[1]-n[i,1];
    do t2=0 to nn[2]-n[i,2];
      t[1]=t1; t[2]=t2;
      ast=s[i,]+t+a;
      bst=nn-s[i,]-t+b;
      b1=beta(ast,bst);
      b2=exp(lgamma(nn-n[i,]+1)-lgamma(nn-n[i,]-t+1)-lgamma(t+1));
      b3=beta(s[i,]+a,n[i,]-s[i,]+b);
      pr=(b1#b2)/b3;
      mult=pr[1]*pr[2];
      p=(s[i,]+t)/nn;
      ave=(p[1]+p[2])/2;
      aven=(nn[1]+nn[2])/2;
      teststat=(p[1]-p[2])/sqrt(2*ave*(1-ave)/aven);
      output[i,3]=output[i,3]+(teststat>probit(1-&alpha))*mult;
      call quad(value,"integral",range) eps=1e-8;
      output[i,4]=output[i,4]+(value/beta(ast[1],bst[1])>&eta)*mult;
    end;
    end;
    end;
    varnames={"Analysis", "Fraction", "PredPower", "PredProb"};
    create &prob from output[colname=varnames];
    append from output;
    quit;
data &prob;
    set &prob;
    format Fraction 4.2 PredPower PredProb 6.4;
    label Fraction="Fraction of total sample size"
     PredPower="Predictive power"
     PredProb="Predictive probability";
%mend BayesFutilityBin;

..................Content has been hidden....................

You can't read the all page of ebook, please click here login for view all page.
Reset
18.117.145.173