dogleg Subroutine

public subroutine dogleg(n, r, Lr, Diag, Qtb, Delta, x, Wa1, Wa2)

given an m by n matrix a, an n by n nonsingular diagonal matrix d, an m-vector b, and a positive number delta, the problem is to determine the convex combination x of the gauss-newton and scaled gradient directions that minimizes (ax - b) in the least squares sense, subject to the restriction that the euclidean norm of dx be at most delta.

this subroutine completes the solution of the problem if it is provided with the necessary information from the qr factorization of a. that is, if a = qr, where q has orthogonal columns and r is an upper triangular matrix, then dogleg expects the full upper triangle of r and the first n components of (q transpose)b.

Arguments

TypeIntentOptionalAttributesName
integer, intent(in) :: n

a positive integer input variable set to the order of r.

real(kind=wp), intent(in) :: r(Lr)

an input array of length lr which must contain the upper triangular matrix r stored by rows.

integer, intent(in) :: Lr

a positive integer input variable not less than (n*(n+1))/2.

real(kind=wp), intent(in) :: Diag(n)

an input array of length n which must contain the diagonal elements of the matrix d.

real(kind=wp), intent(in) :: Qtb(n)

an input array of length n which must contain the first n elements of the vector (q transpose)*b.

real(kind=wp), intent(in) :: Delta

a positive input variable which specifies an upper bound on the euclidean norm of d*x.

real(kind=wp), intent(out) :: x(n)

an output array of length n which contains the desired convex combination of the gauss-newton direction and the scaled gradient direction.

real(kind=wp), intent(inout) :: Wa1(n)

work arrays of length n

real(kind=wp), intent(inout) :: Wa2(n)

work arrays of length n


Calls

proc~~dogleg~~CallsGraph proc~dogleg dogleg proc~enorm enorm proc~dogleg->proc~enorm

Called by

proc~~dogleg~~CalledByGraph proc~dogleg dogleg proc~hybrd hybrd proc~hybrd->proc~dogleg proc~hybrj hybrj proc~hybrj->proc~dogleg proc~minpack_hybrd minpack_hybrd proc~minpack_hybrd->proc~hybrd proc~hybrj1 hybrj1 proc~hybrj1->proc~hybrj proc~hybrd1 hybrd1 proc~hybrd1->proc~hybrd proc~minpack_hybrj minpack_hybrj proc~minpack_hybrj->proc~hybrj proc~minpack_hybrj1 minpack_hybrj1 proc~minpack_hybrj1->proc~hybrj1 proc~minpack_hybrd1 minpack_hybrd1 proc~minpack_hybrd1->proc~hybrd1

Contents

Source Code


Source Code

    subroutine dogleg(n, r, Lr, Diag, Qtb, Delta, x, Wa1, Wa2)

        implicit none

        integer, intent(in) :: n !! a positive integer input variable set to the order of r.
        integer, intent(in) :: Lr !! a positive integer input variable not less than (n*(n+1))/2.
        real(wp), intent(in) :: Delta !! a positive input variable which specifies an upper
                                 !! bound on the euclidean norm of d*x.
        real(wp), intent(in) :: r(Lr) !! an input array of length lr which must contain the upper
                                 !! triangular matrix r stored by rows.
        real(wp), intent(in) :: Diag(n) !! an input array of length n which must contain the
                                   !! diagonal elements of the matrix d.
        real(wp), intent(in) :: Qtb(n) !! an input array of length n which must contain the first
                                   !! n elements of the vector (q transpose)*b.
        real(wp), intent(out) :: x(n) !! an output array of length n which contains the desired
                                 !! convex combination of the gauss-newton direction and the
                                 !! scaled gradient direction.
        real(wp), intent(inout) :: Wa1(n) !! work arrays of length n
        real(wp), intent(inout) :: Wa2(n) !! work arrays of length n

        integer :: i, j, jj, jp1, k, l
        real(wp) :: alpha, bnorm, gnorm, qnorm, sgnorm, sum, temp

        ! first, calculate the gauss-newton direction.

        jj = (n*(n + 1))/2 + 1
        do k = 1, n
            j = n - k + 1
            jp1 = j + 1
            jj = jj - k
            l = jj + 1
            sum = zero
            if (n >= jp1) then
                do i = jp1, n
                    sum = sum + r(l)*x(i)
                    l = l + 1
                end do
            end if
            temp = r(jj)
            if (temp == zero) then
                l = j
                do i = 1, j
                    temp = max(temp, abs(r(l)))
                    l = l + n - i
                end do
                temp = epsmch*temp
                if (temp == zero) temp = epsmch
            end if
            x(j) = (Qtb(j) - sum)/temp
        end do

        ! test whether the gauss-newton direction is acceptable.

        do j = 1, n
            Wa1(j) = zero
            Wa2(j) = Diag(j)*x(j)
        end do
        qnorm = enorm(n, Wa2)
        if (qnorm > Delta) then

            ! the gauss-newton direction is not acceptable.
            ! next, calculate the scaled gradient direction.

            l = 1
            do j = 1, n
                temp = Qtb(j)
                do i = j, n
                    Wa1(i) = Wa1(i) + r(l)*temp
                    l = l + 1
                end do
                Wa1(j) = Wa1(j)/Diag(j)
            end do

            ! calculate the norm of the scaled gradient and test for
            ! the special case in which the scaled gradient is zero.

            gnorm = enorm(n, Wa1)
            sgnorm = zero
            alpha = Delta/qnorm
            if (gnorm /= zero) then

                ! calculate the point along the scaled gradient
                ! at which the quadratic is minimized.

                do j = 1, n
                    Wa1(j) = (Wa1(j)/gnorm)/Diag(j)
                end do
                l = 1
                do j = 1, n
                    sum = zero
                    do i = j, n
                        sum = sum + r(l)*Wa1(i)
                        l = l + 1
                    end do
                    Wa2(j) = sum
                end do
                temp = enorm(n, Wa2)
                sgnorm = (gnorm/temp)/temp

                ! test whether the scaled gradient direction is acceptable.

                alpha = zero
                if (sgnorm < Delta) then

                    ! the scaled gradient direction is not acceptable.
                    ! finally, calculate the point along the dogleg
                    ! at which the quadratic is minimized.

                    bnorm = enorm(n, Qtb)
                    temp = (bnorm/gnorm)*(bnorm/qnorm)*(sgnorm/Delta)
                    temp = temp - (Delta/qnorm)*(sgnorm/Delta)**2 + &
                           sqrt((temp - (Delta/qnorm))**2 + &
                                (one - (Delta/qnorm)**2)*(one - (sgnorm/Delta)**2))
                    alpha = ((Delta/qnorm)*(one - (sgnorm/Delta)**2))/temp
                end if
            end if

            ! form appropriate convex combination of the gauss-newton
            ! direction and the scaled gradient direction.

            temp = (one - alpha)*min(sgnorm, Delta)
            do j = 1, n
                x(j) = temp*Wa1(j) + alpha*x(j)
            end do
        end if

    end subroutine dogleg