+/* cobyla : contrained optimization by linear approximation */
+ * Copyright (c) 1992, Michael J. D. Powell (M.J.D.Powell@damtp.cam.ac.uk)
+ * Copyright (c) 2004, Jean-Sebastien Roy (js@jeannot.org)
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ * This software is a C version of COBYLA2, a contrained optimization by linear
+ * approximation package developed by Michael J. D. Powell in Fortran.
+ * The original source code can be found at :
+ * http://plato.la.asu.edu/topics/problems/nlores.html
+static char const rcsid[] =
+ "@(#) $Jeannot: cobyla.c,v 1.11 2004/04/18 09:51:36 js Exp $";
+#define min(a,b) ((a) <= (b) ? (a) : (b))
+#define max(a,b) ((a) >= (b) ? (a) : (b))
+#define abs(x) ((x) >= 0 ? (x) : -(x))
+char *cobyla_rc_string[6] =
+ "Memory allocation failed",
+ "Normal return from cobyla",
+ "Maximum number of function evaluations reached",
+ "Rounding errors are becoming damaging",
+ "User requested end of minimization"
+static int cobylb(int *n, int *m, int *mpp, double *x, double *rhobeg,
+ double *rhoend, int *iprint, int *maxfun, double *con, double *sim,
+ double *simi, double *datmat, double *a, double *vsig, double *veta,
+ double *sigbar, double *dx, double *w, int *iact, cobyla_function *calcfc,
+static int trstlp(int *n, int *m, double *a, double *b, double *rho,
+ double *dx, int *ifull, int *iact, double *z__, double *zdota, double *vmultc,
+ double *sdirn, double *dxnew, double *vmultd);
+/* ------------------------------------------------------------------------ */
+int cobyla(int n, int m, double *x, double rhobeg, double rhoend, int iprint,
+ int *maxfun, cobyla_function *calcfc, void *state)
+ int icon, isim, isigb, idatm, iveta, isimi, ivsig, iwork, ia, idx, mpp, rc;
+ * This subroutine minimizes an objective function F(X) subject to M
+ * inequality constraints on X, where X is a vector of variables that has
+ * N components. The algorithm employs linear approximations to the
+ * objective and constraint functions, the approximations being formed by
+ * linear interpolation at N+1 points in the space of the variables.
+ * We regard these interpolation points as vertices of a simplex. The
+ * parameter RHO controls the size of the simplex and it is reduced
+ * automatically from RHOBEG to RHOEND. For each RHO the subroutine tries
+ * to achieve a good vector of variables for the current size, and then
+ * RHO is reduced until the value RHOEND is reached. Therefore RHOBEG and
+ * RHOEND should be set to reasonable initial changes to and the required
+ * accuracy in the variables respectively, but this accuracy should be
+ * viewed as a subject for experimentation because it is not guaranteed.
+ * The subroutine has an advantage over many of its competitors, however,
+ * which is that it treats each constraint individually when calculating
+ * a change to the variables, instead of lumping the constraints together
+ * into a single penalty function. The name of the subroutine is derived
+ * from the phrase Constrained Optimization BY Linear Approximations.
+ * The user must set the values of N, M, RHOBEG and RHOEND, and must
+ * provide an initial vector of variables in X. Further, the value of
+ * IPRINT should be set to 0, 1, 2 or 3, which controls the amount of
+ * printing during the calculation. Specifically, there is no output if
+ * IPRINT=0 and there is output only at the end of the calculation if
+ * IPRINT=1. Otherwise each new value of RHO and SIGMA is printed.
+ * Further, the vector of variables and some function information are
+ * given either when RHO is reduced or when each new value of F(X) is
+ * computed in the cases IPRINT=2 or IPRINT=3 respectively. Here SIGMA
+ * is a penalty parameter, it being assumed that a change to X is an
+ * improvement if it reduces the merit function
+ * F(X)+SIGMA*MAX(0.0,-C1(X),-C2(X),...,-CM(X)),
+ * where C1,C2,...,CM denote the constraint functions that should become
+ * nonnegative eventually, at least to the precision of RHOEND. In the
+ * printed output the displayed term that is multiplied by SIGMA is
+ * called MAXCV, which stands for 'MAXimum Constraint Violation'. The
+ * argument MAXFUN is an int variable that must be set by the user to a
+ * limit on the number of calls of CALCFC, the purpose of this routine being
+ * given below. The value of MAXFUN will be altered to the number of calls
+ * of CALCFC that are made. The arguments W and IACT provide real and
+ * int arrays that are used as working space. Their lengths must be at
+ * least N*(3*N+2*M+11)+4*M+6 and M+1 respectively.
+ * In order to define the objective and constraint functions, we require
+ * a subroutine that has the name and arguments
+ * SUBROUTINE CALCFC (N,M,X,F,CON)
+ * DIMENSION X(*),CON(*) .
+ * The values of N and M are fixed and have been defined already, while
+ * X is now the current vector of variables. The subroutine should return
+ * the objective and constraint functions at X in F and CON(1),CON(2),
+ * ...,CON(M). Note that we are trying to adjust X so that F(X) is as
+ * small as possible subject to the constraint functions being nonnegative.
+ * Partition the working space array W to provide the storage that is needed
+ * for the main calculation.
+ if (iprint>=1) fprintf(stderr, "cobyla: N==0.\n");
+ if (iprint>=1) fprintf(stderr, "cobyla: N<0 or M<0.\n");
+ /* workspace allocation */
+ w = malloc((n*(3*n+2*m+11)+4*m+6)*sizeof(*w));
+ if (iprint>=1) fprintf(stderr, "cobyla: memory allocation error.\n");
+ iact = malloc((m+1)*sizeof(*iact));
+ if (iprint>=1) fprintf(stderr, "cobyla: memory allocation error.\n");
+ /* Parameter adjustments */
+ isimi = isim + n * n + n;
+ ia = idatm + n * mpp + mpp;
+ ivsig = ia + m * n + n;
+ rc = cobylb(&n, &m, &mpp, &x[1], &rhobeg, &rhoend, &iprint, maxfun,
+ &w[icon], &w[isim], &w[isimi], &w[idatm], &w[ia], &w[ivsig], &w[iveta],
+ &w[isigb], &w[idx], &w[iwork], &iact[1], calcfc, state);
+ /* Parameter adjustments (reverse) */
+/* ------------------------------------------------------------------------- */
+int cobylb(int *n, int *m, int *mpp, double
+ *x, double *rhobeg, double *rhoend, int *iprint, int *
+ maxfun, double *con, double *sim, double *simi,
+ double *datmat, double *a, double *vsig, double *veta,
+ double *sigbar, double *dx, double *w, int *iact, cobyla_function *calcfc,
+ /* System generated locals */
+ int sim_dim1, sim_offset, simi_dim1, simi_offset, datmat_dim1,
+ datmat_offset, a_dim1, a_offset, i__1, i__2, i__3;
+ double alpha, delta, denom, tempa, barmu;
+ double beta, cmin = 0.0, cmax = 0.0;
+ double cvmaxm, dxsign, prerem = 0.0;
+ double edgmax, pareta, prerec = 0.0, phimin, parsig = 0.0;
+ double phi, rho, sum = 0.0;
+ double ratio, vmold, parmu, error, vmnew;
+ int isdirn, nfvals, izdota;
+ int mp, np, iz, ibrnch;
+ int nbest, ifull, iptem, jdrop;
+/* Set the initial values of some parameters. The last column of SIM holds */
+/* the optimal vertex of the current simplex, and the preceding N columns */
+/* hold the displacements from the optimal vertex to the other vertices. */
+/* Further, SIMI holds the inverse of the matrix that is contained in the */
+/* first N columns of SIM. */
+ /* Parameter adjustments */
+ a_offset = 1 + a_dim1 * 1;
+ simi_offset = 1 + simi_dim1 * 1;
+ sim_offset = 1 + sim_dim1 * 1;
+ datmat_offset = 1 + datmat_dim1 * 1;
+ datmat -= datmat_offset;
+ "cobyla: the initial value of RHO is %12.6E and PARMU is set to zero.\n",
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ sim[i__ + np * sim_dim1] = x[i__];
+ for (j = 1; j <= i__2; ++j) {
+ sim[i__ + j * sim_dim1] = 0.;
+ simi[i__ + j * simi_dim1] = 0.;
+ sim[i__ + i__ * sim_dim1] = rho;
+ simi[i__ + i__ * simi_dim1] = temp;
+/* Make the next call of the user-supplied subroutine CALCFC. These */
+/* instructions are also used for calling CALCFC during the iterations of */
+ if (nfvals >= *maxfun && nfvals > 0) {
+ "cobyla: maximum number of function evaluations reach.\n");
+ if (calcfc(*n, *m, &x[1], &f, &con[1], state))
+ fprintf(stderr, "cobyla: user requested end of minimization.\n");
+ for (k = 1; k <= i__1; ++k) {
+ d__1 = resmax, d__2 = -con[k];
+ resmax = max(d__1,d__2);
+ if (nfvals == *iprint - 1 || *iprint == 3) {
+ fprintf(stderr, "cobyla: NFVALS = %4d, F =%13.6E, MAXCV =%13.6E\n",
+ fprintf(stderr, "cobyla: X =");
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if (i__>1) fprintf(stderr, " ");
+ fprintf(stderr, "%13.6E", x[i__]);
+ for (i__ = iptemp; i__ <= i__1; ++i__) {
+ if (!((i__-1) % 4)) fprintf(stderr, "\ncobyla: ");
+ fprintf(stderr, "%15.6E", x[i__]);
+/* Set the recently calculated function values in a column of DATMAT. This */
+/* array has a column for each vertex of the current simplex, the entries of */
+/* each column being the values of the constraint functions (if any) */
+/* followed by the objective function and the greatest constraint violation */
+ for (k = 1; k <= i__1; ++k) {
+ datmat[k + jdrop * datmat_dim1] = con[k];
+/* Exchange the new vertex of the initial simplex with the optimal vertex if */
+/* necessary. Then, if the initial simplex is not complete, pick its next */
+/* vertex and calculate the function values there. */
+ if (datmat[mp + np * datmat_dim1] <= f) {
+ x[jdrop] = sim[jdrop + np * sim_dim1];
+ sim[jdrop + np * sim_dim1] = x[jdrop];
+ for (k = 1; k <= i__1; ++k) {
+ datmat[k + jdrop * datmat_dim1] = datmat[k + np * datmat_dim1]
+ datmat[k + np * datmat_dim1] = con[k];
+ for (k = 1; k <= i__1; ++k) {
+ sim[jdrop + k * sim_dim1] = -rho;
+ for (i__ = k; i__ <= i__2; ++i__) {
+ temp -= simi[i__ + k * simi_dim1];
+ simi[jdrop + k * simi_dim1] = temp;
+/* Identify the optimal vertex of the current simplex. */
+ phimin = datmat[mp + np * datmat_dim1] + parmu * datmat[*mpp + np *
+ for (j = 1; j <= i__1; ++j) {
+ temp = datmat[mp + j * datmat_dim1] + parmu * datmat[*mpp + j *
+ } else if (temp == phimin && parmu == 0.) {
+ if (datmat[*mpp + j * datmat_dim1] < datmat[*mpp + nbest *
+/* Switch the best vertex into pole position if it is not there already, */
+/* and also update SIM, SIMI and DATMAT. */
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ temp = datmat[i__ + np * datmat_dim1];
+ datmat[i__ + np * datmat_dim1] = datmat[i__ + nbest * datmat_dim1]
+ datmat[i__ + nbest * datmat_dim1] = temp;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ temp = sim[i__ + nbest * sim_dim1];
+ sim[i__ + nbest * sim_dim1] = 0.;
+ sim[i__ + np * sim_dim1] += temp;
+ for (k = 1; k <= i__2; ++k) {
+ sim[i__ + k * sim_dim1] -= temp;
+ tempa -= simi[k + i__ * simi_dim1];
+ simi[nbest + i__ * simi_dim1] = tempa;
+/* Make an error return if SIGI is a poor approximation to the inverse of */
+/* the leading N by N submatrix of SIG. */
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ for (j = 1; j <= i__2; ++j) {
+ for (k = 1; k <= i__3; ++k) {
+ temp += simi[i__ + k * simi_dim1] * sim[k + j * sim_dim1];
+ d__1 = error, d__2 = abs(temp);
+ error = max(d__1,d__2);
+ fprintf(stderr, "cobyla: rounding errors are becoming damaging.\n");
+/* Calculate the coefficients of the linear approximations to the objective */
+/* and constraint functions, placing minus the objective function gradient */
+/* after the constraint gradients in the array A. The vector W is used for */
+ for (k = 1; k <= i__2; ++k) {
+ con[k] = -datmat[k + np * datmat_dim1];
+ for (j = 1; j <= i__1; ++j) {
+ w[j] = datmat[k + j * datmat_dim1] + con[k];
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ for (j = 1; j <= i__3; ++j) {
+ temp += w[j] * simi[j + i__ * simi_dim1];
+ a[i__ + k * a_dim1] = temp;
+/* Calculate the values of sigma and eta, and set IFLAG=0 if the current */
+/* simplex is not acceptable. */
+ for (j = 1; j <= i__1; ++j) {
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ d__1 = simi[j + i__ * simi_dim1];
+ d__1 = sim[i__ + j * sim_dim1];
+ vsig[j] = 1. / sqrt(wsig);
+ if (vsig[j] < parsig || veta[j] > pareta) {
+/* If a new vertex is needed to improve acceptability, then decide which */
+/* vertex to drop from the simplex. */
+ if (ibrnch == 1 || iflag == 1) {
+ for (j = 1; j <= i__1; ++j) {
+ for (j = 1; j <= i__1; ++j) {
+/* Calculate the step to the new vertex and its sign. */
+ temp = gamma * rho * vsig[jdrop];
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ dx[i__] = temp * simi[jdrop + i__ * simi_dim1];
+ for (k = 1; k <= i__1; ++k) {
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ sum += a[i__ + k * a_dim1] * dx[i__];
+ temp = datmat[k + np * datmat_dim1];
+ d__1 = cvmaxp, d__2 = -sum - temp;
+ cvmaxp = max(d__1,d__2);
+ d__1 = cvmaxm, d__2 = sum - temp;
+ cvmaxm = max(d__1,d__2);
+ if (parmu * (cvmaxp - cvmaxm) > sum + sum) {
+/* Update the elements of SIM and SIMI, and set the next X. */
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ dx[i__] = dxsign * dx[i__];
+ sim[i__ + jdrop * sim_dim1] = dx[i__];
+ temp += simi[jdrop + i__ * simi_dim1] * dx[i__];
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ simi[jdrop + i__ * simi_dim1] /= temp;
+ for (j = 1; j <= i__1; ++j) {
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ temp += simi[j + i__ * simi_dim1] * dx[i__];
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ simi[j + i__ * simi_dim1] -= temp * simi[jdrop + i__ *
+ x[j] = sim[j + np * sim_dim1] + dx[j];
+/* Calculate DX=x(*)-x(0). Branch if the length of DX is less than 0.5*RHO. */
+ trstlp(n, m, &a[a_offset], &con[1], &rho, &dx[1], &ifull, &iact[1], &w[
+ iz], &w[izdota], &w[ivmc], &w[isdirn], &w[idxnew], &w[ivmd]);
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if (temp < rho * .25 * rho) {
+/* Predict the change to F and the new maximum constraint violation if the */
+/* variables are altered from x(0) to x(0)+DX. */
+ for (k = 1; k <= i__1; ++k) {
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ sum -= a[i__ + k * a_dim1] * dx[i__];
+ resnew = max(resnew,sum);
+/* Increase PARMU if necessary and branch back if this change alters the */
+/* optimal vertex. Otherwise PREREM and PREREC will be set to the predicted */
+/* reductions in the merit function and the maximum constraint violation */
+ prerec = datmat[*mpp + np * datmat_dim1] - resnew;
+ if (parmu < barmu * 1.5) {
+ fprintf(stderr, "cobyla: increase in PARMU to %12.6E\n", parmu);
+ phi = datmat[mp + np * datmat_dim1] + parmu * datmat[*mpp + np *
+ for (j = 1; j <= i__1; ++j) {
+ temp = datmat[mp + j * datmat_dim1] + parmu * datmat[*mpp + j *
+ if (temp == phi && parmu == 0.f) {
+ if (datmat[*mpp + j * datmat_dim1] < datmat[*mpp + np *
+ prerem = parmu * prerec - sum;
+/* Calculate the constraint and objective functions at x(*). Then find the */
+/* actual reduction in the merit function. */
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ x[i__] = sim[i__ + np * sim_dim1] + dx[i__];
+ vmold = datmat[mp + np * datmat_dim1] + parmu * datmat[*mpp + np *
+ vmnew = f + parmu * resmax;
+ trured = vmold - vmnew;
+ if (parmu == 0. && f == datmat[mp + np * datmat_dim1]) {
+ trured = datmat[*mpp + np * datmat_dim1] - resmax;
+/* Begin the operations that decide whether x(*) should replace one of the */
+/* vertices of the current simplex, the change being mandatory if TRURED is */
+/* positive. Firstly, JDROP is set to the index of the vertex that is to be */
+ for (j = 1; j <= i__1; ++j) {
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ temp += simi[j + i__ * simi_dim1] * dx[i__];
+ sigbar[j] = temp * vsig[j];
+/* Calculate the value of ell. */
+ for (j = 1; j <= i__1; ++j) {
+ if (sigbar[j] >= parsig || sigbar[j] >= vsig[j]) {
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ d__1 = dx[i__] - sim[i__ + j * sim_dim1];
+/* Revise the simplex by updating the elements of SIM, SIMI and DATMAT. */
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ sim[i__ + jdrop * sim_dim1] = dx[i__];
+ temp += simi[jdrop + i__ * simi_dim1] * dx[i__];
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ simi[jdrop + i__ * simi_dim1] /= temp;
+ for (j = 1; j <= i__1; ++j) {
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ temp += simi[j + i__ * simi_dim1] * dx[i__];
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ simi[j + i__ * simi_dim1] -= temp * simi[jdrop + i__ *
+ for (k = 1; k <= i__1; ++k) {
+ datmat[k + jdrop * datmat_dim1] = con[k];
+/* Branch back for further iterations with the current RHO. */
+ if (trured > 0. && trured >= prerem * .1) {
+/* Otherwise reduce RHO if it is not at its least value and reset PARMU. */
+ if (rho <= *rhoend * 1.5) {
+ for (k = 1; k <= i__1; ++k) {
+ cmin = datmat[k + np * datmat_dim1];
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ d__1 = cmin, d__2 = datmat[k + i__ * datmat_dim1];
+ d__1 = cmax, d__2 = datmat[k + i__ * datmat_dim1];
+ if (k <= *m && cmin < cmax * .5) {
+ temp = max(cmax,0.) - cmin;
+ denom = min(denom,temp);
+ } else if (cmax - cmin < parmu * denom) {
+ parmu = (cmax - cmin) / denom;
+ fprintf(stderr, "cobyla: reduction in RHO to %12.6E and PARMU =%13.6E\n",
+ fprintf(stderr, "cobyla: NFVALS = %4d, F =%13.6E, MAXCV =%13.6E\n",
+ nfvals, datmat[mp + np * datmat_dim1], datmat[*mpp + np * datmat_dim1]);
+ fprintf(stderr, "cobyla: X =");
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if (i__>1) fprintf(stderr, " ");
+ fprintf(stderr, "%13.6E", sim[i__ + np * sim_dim1]);
+ for (i__ = iptemp; i__ <= i__1; ++i__) {
+ if (!((i__-1) % 4)) fprintf(stderr, "\ncobyla: ");
+ fprintf(stderr, "%15.6E", x[i__]);
+/* Return the best calculated values of the variables. */
+ fprintf(stderr, "cobyla: normal return.\n");
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ x[i__] = sim[i__ + np * sim_dim1];
+ f = datmat[mp + np * datmat_dim1];
+ resmax = datmat[*mpp + np * datmat_dim1];
+ fprintf(stderr, "cobyla: NFVALS = %4d, F =%13.6E, MAXCV =%13.6E\n",
+ fprintf(stderr, "cobyla: X =");
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if (i__>1) fprintf(stderr, " ");
+ fprintf(stderr, "%13.6E", x[i__]);
+ for (i__ = iptemp; i__ <= i__1; ++i__) {
+ if (!((i__-1) % 4)) fprintf(stderr, "\ncobyla: ");
+ fprintf(stderr, "%15.6E", x[i__]);
+/* ------------------------------------------------------------------------- */
+int trstlp(int *n, int *m, double *a,
+ double *b, double *rho, double *dx, int *ifull,
+ int *iact, double *z__, double *zdota, double *vmultc,
+ double *sdirn, double *dxnew, double *vmultd)
+ /* System generated locals */
+ int a_dim1, a_offset, z_dim1, z_offset, i__1, i__2;
+ double optnew, stpful, sum, tot, acca, accb;
+ double ratio, vsave, zdotv, zdotw, dd;
+ double sp, ss, resold = 0.0, zdvabs, zdwabs, sumabs, resmax, optold;
+ int nact, icon = 0, mcon;
+/* This subroutine calculates an N-component vector DX by applying the */
+/* following two stages. In the first stage, DX is set to the shortest */
+/* vector that minimizes the greatest violation of the constraints */
+/* A(1,K)*DX(1)+A(2,K)*DX(2)+...+A(N,K)*DX(N) .GE. B(K), K=2,3,...,M, */
+/* subject to the Euclidean length of DX being at most RHO. If its length is */
+/* strictly less than RHO, then we use the resultant freedom in DX to */
+/* minimize the objective function */
+/* -A(1,M+1)*DX(1)-A(2,M+1)*DX(2)-...-A(N,M+1)*DX(N) */
+/* subject to no increase in any greatest constraint violation. This */
+/* notation allows the gradient of the objective function to be regarded as */
+/* the gradient of a constraint. Therefore the two stages are distinguished */
+/* by MCON .EQ. M and MCON .GT. M respectively. It is possible that a */
+/* degeneracy may prevent DX from attaining the target length RHO. Then the */
+/* value IFULL=0 would be set, but usually IFULL=1 on return. */
+/* In general NACT is the number of constraints in the active set and */
+/* IACT(1),...,IACT(NACT) are their indices, while the remainder of IACT */
+/* contains a permutation of the remaining constraint indices. Further, Z is */
+/* an orthogonal matrix whose first NACT columns can be regarded as the */
+/* result of Gram-Schmidt applied to the active constraint gradients. For */
+/* J=1,2,...,NACT, the number ZDOTA(J) is the scalar product of the J-th */
+/* column of Z with the gradient of the J-th active constraint. DX is the */
+/* current vector of variables and here the residuals of the active */
+/* constraints should be zero. Further, the active constraints have */
+/* nonnegative Lagrange multipliers that are held at the beginning of */
+/* VMULTC. The remainder of this vector holds the residuals of the inactive */
+/* constraints at DX, the ordering of the components of VMULTC being in */
+/* agreement with the permutation of the indices of the constraints that is */
+/* in IACT. All these residuals are nonnegative, which is achieved by the */
+/* shift RESMAX that makes the least residual zero. */
+/* Initialize Z and some other variables. The value of RESMAX will be */
+/* appropriate to DX=0, while ICON will be the index of a most violated */
+/* constraint if RESMAX is positive. Usually during the first stage the */
+/* vector SDIRN gives a search direction that reduces all the active */
+/* constraint violations by one simultaneously. */
+ /* Parameter adjustments */
+ z_offset = 1 + z_dim1 * 1;
+ a_offset = 1 + a_dim1 * 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ for (j = 1; j <= i__2; ++j) {
+ z__[i__ + j * z_dim1] = 0.;
+ z__[i__ + i__ * z_dim1] = 1.;
+ for (k = 1; k <= i__1; ++k) {
+ for (k = 1; k <= i__1; ++k) {
+ vmultc[k] = resmax - b[k];
+ for (i__ = 1; i__ <= i__1; ++i__) {
+/* End the current stage of the calculation if 3 consecutive iterations */
+/* have either failed to reduce the best calculated value of the objective */
+/* function or to increase the number of active constraints since the best */
+/* value was calculated. This strategy prevents cycling, but there is a */
+/* remote possibility that it will cause premature termination. */
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ optnew -= dx[i__] * a[i__ + mcon * a_dim1];
+ if (icount == 0 || optnew < optold) {
+ } else if (nact > nactx) {
+/* If ICON exceeds NACT, then we add the constraint with index IACT(ICON) to */
+/* the active set. Apply Givens rotations so that the last N-NACT-1 columns */
+/* of Z are orthogonal to the gradient of the new constraint, a scalar */
+/* product being set to zero if its nonzero value could be due to computer */
+/* rounding errors. The array DXNEW is used for working space. */
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ dxnew[i__] = a[i__ + kk * a_dim1];
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ temp = z__[i__ + k * z_dim1] * dxnew[i__];
+ acca = spabs + abs(sp) * .1;
+ accb = spabs + abs(sp) * .2;
+ if (spabs >= acca || acca >= accb) {
+ temp = sqrt(sp * sp + tot * tot);
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ temp = alpha * z__[i__ + k * z_dim1] + beta * z__[i__ + kp *
+ z__[i__ + kp * z_dim1] = alpha * z__[i__ + kp * z_dim1] -
+ beta * z__[i__ + k * z_dim1];
+ z__[i__ + k * z_dim1] = temp;
+/* Add the new constraint if this can be done without a deletion from the */
+ vmultc[icon] = vmultc[nact];
+/* The next instruction is reached if a deletion has to be made from the */
+/* active set in order to make room for the new active constraint, because */
+/* the new constraint gradient is a linear combination of the gradients of */
+/* the old active constraints. Set the elements of VMULTD to the multipliers */
+/* of the linear combination. Further, set IOUT to the index of the */
+/* constraint to be deleted, but branch if no suitable index can be found. */
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ temp = z__[i__ + k * z_dim1] * dxnew[i__];
+ acca = zdvabs + abs(zdotv) * .1;
+ accb = zdvabs + abs(zdotv) * .2;
+ if (zdvabs < acca && acca < accb) {
+ temp = zdotv / zdota[k];
+ if (temp > 0. && iact[k] <= *m) {
+ tempa = vmultc[k] / temp;
+ if (ratio < 0. || tempa < ratio) {
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ dxnew[i__] -= temp * a[i__ + kw * a_dim1];
+/* Revise the Lagrange multipliers and reorder the active constraints so */
+/* that the one to be replaced is at the end of the list. Also calculate the */
+/* new value of ZDOTA(NACT) and branch if it is not acceptable. */
+ for (k = 1; k <= i__1; ++k) {
+ d__1 = 0., d__2 = vmultc[k] - ratio * vmultd[k];
+ vmultc[k] = max(d__1,d__2);
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ sp += z__[i__ + k * z_dim1] * a[i__ + kw * a_dim1];
+ temp = sqrt(sp * sp + d__1 * d__1);
+ alpha = zdota[kp] / temp;
+ zdota[kp] = alpha * zdota[k];
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ temp = alpha * z__[i__ + kp * z_dim1] + beta * z__[i__ + k *
+ z__[i__ + kp * z_dim1] = alpha * z__[i__ + k * z_dim1] - beta *
+ z__[i__ + kp * z_dim1];
+ z__[i__ + k * z_dim1] = temp;
+ vmultc[k] = vmultc[kp];
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ temp += z__[i__ + nact * z_dim1] * a[i__ + kk * a_dim1];
+/* Update IACT and ensure that the objective function continues to be */
+/* treated as the last active constraint when MCON>M. */
+ iact[icon] = iact[nact];
+ if (mcon > *m && kk != mcon) {
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ sp += z__[i__ + k * z_dim1] * a[i__ + kk * a_dim1];
+ temp = sqrt(sp * sp + d__1 * d__1);
+ alpha = zdota[nact] / temp;
+ zdota[nact] = alpha * zdota[k];
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ temp = alpha * z__[i__ + nact * z_dim1] + beta * z__[i__ + k *
+ z__[i__ + nact * z_dim1] = alpha * z__[i__ + k * z_dim1] - beta *
+ z__[i__ + nact * z_dim1];
+ z__[i__ + k * z_dim1] = temp;
+ vmultc[k] = vmultc[nact];
+/* If stage one is in progress, then set SDIRN to the direction of the next */
+/* change to the current vector of variables. */
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ temp += sdirn[i__] * a[i__ + kk * a_dim1];
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ sdirn[i__] -= temp * z__[i__ + nact * z_dim1];
+/* Delete the constraint that has the index IACT(ICON) from the active set. */
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ sp += z__[i__ + k * z_dim1] * a[i__ + kk * a_dim1];
+ temp = sqrt(sp * sp + d__1 * d__1);
+ alpha = zdota[kp] / temp;
+ zdota[kp] = alpha * zdota[k];
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ temp = alpha * z__[i__ + kp * z_dim1] + beta * z__[i__ + k *
+ z__[i__ + kp * z_dim1] = alpha * z__[i__ + k * z_dim1] - beta *
+ z__[i__ + kp * z_dim1];
+ z__[i__ + k * z_dim1] = temp;
+ vmultc[k] = vmultc[kp];
+/* If stage one is in progress, then set SDIRN to the direction of the next */
+/* change to the current vector of variables. */
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ temp += sdirn[i__] * z__[i__ + (nact + 1) * z_dim1];
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ sdirn[i__] -= temp * z__[i__ + (nact + 1) * z_dim1];
+/* Pick the next search direction of stage two. */
+ temp = 1. / zdota[nact];
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ sdirn[i__] = temp * z__[i__ + nact * z_dim1];
+/* Calculate the step to the boundary of the trust region or take the step */
+/* that reduces RESMAX to zero. The two statements below that include the */
+/* factor 1.0E-6 prevent some harmless underflows that occurred in a test */
+/* calculation. Further, we skip the step if it could be zero within a */
+/* reasonable tolerance for computer rounding errors. */
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if ((d__1 = dx[i__], abs(d__1)) >= *rho * 1e-6f) {
+ sd += dx[i__] * sdirn[i__];
+ if (abs(sd) >= temp * 1e-6f) {
+ temp = sqrt(ss * dd + sd * sd);
+ stpful = dd / (temp + sd);
+ acca = step + resmax * .1;
+ accb = step + resmax * .2;
+ if (step >= acca || acca >= accb) {
+ step = min(step,resmax);
+/* Set DXNEW to the new variables if STEP is the steplength, and reduce */
+/* RESMAX to the corresponding maximum residual if stage one is being done. */
+/* Because DXNEW will be changed during the calculation of some Lagrange */
+/* multipliers, it will be restored to the following value later. */
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ dxnew[i__] = dx[i__] + step * sdirn[i__];
+ for (k = 1; k <= i__1; ++k) {
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ temp -= a[i__ + kk * a_dim1] * dxnew[i__];
+ resmax = max(resmax,temp);
+/* Set VMULTD to the VMULTC vector that would occur if DX became DXNEW. A */
+/* device is included to force VMULTD(K)=0.0 if deviations from this value */
+/* can be attributed to computer rounding errors. First calculate the new */
+/* Lagrange multipliers. */
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ temp = z__[i__ + k * z_dim1] * dxnew[i__];
+ acca = zdwabs + abs(zdotw) * .1;
+ accb = zdwabs + abs(zdotw) * .2;
+ if (zdwabs >= acca || acca >= accb) {
+ vmultd[k] = zdotw / zdota[k];
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ dxnew[i__] -= vmultd[k] * a[i__ + kk * a_dim1];
+ d__1 = 0., d__2 = vmultd[nact];
+ vmultd[nact] = max(d__1,d__2);
+/* Complete VMULTC by finding the new constraint residuals. */
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ dxnew[i__] = dx[i__] + step * sdirn[i__];
+ for (k = kl; k <= i__1; ++k) {
+ sumabs = resmax + (d__1 = b[kk], abs(d__1));
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ temp = a[i__ + kk * a_dim1] * dxnew[i__];
+ acca = sumabs + abs(sum) * .1f;
+ accb = sumabs + abs(sum) * .2f;
+ if (sumabs >= acca || acca >= accb) {
+/* Calculate the fraction of the step from DX to DXNEW that will be taken. */
+ for (k = 1; k <= i__1; ++k) {
+ temp = vmultc[k] / (vmultc[k] - vmultd[k]);
+/* Update DX, VMULTC and RESMAX. */
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ dx[i__] = temp * dx[i__] + ratio * dxnew[i__];
+ for (k = 1; k <= i__1; ++k) {
+ d__1 = 0., d__2 = temp * vmultc[k] + ratio * vmultd[k];
+ vmultc[k] = max(d__1,d__2);
+ resmax = resold + ratio * (resmax - resold);
+/* If the full step is not acceptable then begin another iteration. */
+/* Otherwise switch to stage two or end the calculation. */
+/* We employ any freedom that may be available to reduce the objective */
+/* function before returning a DX whose length is less than RHO. */