SUBROUTINE DOGLEG(n,r,lr,diag,qtb,delta,x,wa1,wa2)
! Given an M by N matrix A, an N by N nonsingular diagonal
! matrix D, an M-vector B, and a positive number DELTA, the
! problem is to determine the convex combination X of the
! Gauss-Newton and scaled gradient directions that minimizes
! (A*X - B) in the least squares sense, subject to the
! restriction that the Euclidean norm of D*X be at most DELTA.
!
! This subroutine completes the solution of the problem
! if it is provided with the necessary information from the
! QR factorization of A. That is, if A = Q*R, where Q has
! orthogonal columns and R is an upper triangular matrix,
! then DOGLEG expects the full upper triangle of R and
! the first N components of (Q transpose)*B.
!
! The subroutine statement is
!
! subroutine dogleg(n,r,lr,diag,qtb,delta,x,wa1,wa2)
!
! where
!
! N is a positive integer input variable set to the order of R.
!
! R is an input array of length LR which must contain the upper
! triangular matrix R stored by rows.
!
! LR is a positive integer input variable not less than
! (N*(N+1))/2.
!
! DIAG is an input array of length N which must contain the
! diagonal elements of the matrix D.
!
! QTB is an input array of length N which must contain the first
! N elements of the vector (Q transpose)*B.
!
! DELTA is a positive input variable which specifies an upper
! bound on the Euclidean norm of D*X.
!
! X is an output array of length N which contains the desired
! convex combination of the Gauss-Newton direction and the
! scaled gradient direction.
!
! WA1 and WA2 are work arrays of length N.
!
! Argonne National Laboratory. Minpack project. March 1980.
! Burton S. Garbow, Kenneth E. Hillstrom, Jorge J. More
IMPLICIT NONE
INTEGER n,lr,i,j,jj,jp1,k,l
real(dp) delta
real(dp) r(lr),diag(n),qtb(n),x(n),wa1(n),wa2(n)
real(dp) alpha,bnorm,epsmch,gnorm,one,qnorm,sgnorm, &
sum,temp,zero
one = 1.0D0
zero = 0.0D0
! Machine precision
epsmch = spmpar(1)
! First, calculate the Gauss-Newton direction.
jj = (n*(n + 1))/2 + 1
do k = 1, n
j = n - k + 1
jp1 = j + 1
jj = jj - k
l = jj + 1
sum = zero
if (n < jp1) goto 20
do i = jp1, n
sum = sum + r(l)*x(i)
l = l + 1
end do
20 continue
temp = r(jj)
if (temp /= zero) goto 40
l = j
do i = 1, j
temp = max(temp,abs(r(l)))
l = l + n - i
end do
temp = epsmch*temp
if (temp == zero) temp = epsmch
40 continue
x(j) = (qtb(j) - sum)/temp
end do
! Test whether the Gauss-Newton direction is acceptable.
do j = 1, n
wa1(j) = zero
wa2(j) = diag(j)*x(j)
end do
qnorm = enorm(n,wa2)
if (qnorm <= delta) goto 140
! The Gauss-Newton direction is not acceptable.
! Next, calculate the scaled gradient direction.
l = 1
do j = 1, n
temp = qtb(j)
do i = j, n
wa1(i) = wa1(i) + r(l)*temp
l = l + 1
end do
wa1(j) = wa1(j)/diag(j)
end do
! Calculate the norm of the scaled gradient and test for
! the special case in which the scaled gradient is zero.
gnorm = enorm(n,wa1)
sgnorm = zero
alpha = delta/qnorm
if (gnorm == zero) goto 120
! Calculate the point along the scaled gradient
! at which the quadratic is minimized.
do j = 1, n
wa1(j) = (wa1(j)/gnorm)/diag(j)
end do
l = 1
do j = 1, n
sum = zero
do i = j, n
sum = sum + r(l)*wa1(i)
l = l + 1
end do
wa2(j) = sum
end do
temp = enorm(n,wa2)
sgnorm = (gnorm/temp)/temp
! Test whether the scaled gradient direction is acceptable.
alpha = zero
if (sgnorm >= delta) goto 120
! The scaled gradient direction is not acceptable.
! Finally, calculate the point along the dogleg
! at which the quadratic is minimized.
bnorm = enorm(n,qtb)
temp = (bnorm/gnorm)*(bnorm/qnorm)*(sgnorm/delta)
temp = temp - (delta/qnorm)*(sgnorm/delta)**2 &
+ sqrt((temp-(delta/qnorm))**2 &
+ (one-(delta/qnorm)**2)*(one-(sgnorm/delta)**2))
alpha = ((delta/qnorm)*(one - (sgnorm/delta)**2))/temp
120 continue
! Form appropriate convex combination of the Gauss-Newton
! direction and the scaled gradient direction.
temp = (one - alpha)*min(sgnorm,delta)
do j = 1, n
x(j) = temp*wa1(j) + alpha*x(j)
end do
140 continue
return
end SUBROUTINE DOGLEG