1

I'm trying to solve the following optimization problem with Ceres Solver:

minimize Φ

Where Φ = ||T/tmax||∞

Subject to:

TᵀB T <= 0
||G(J)ᵀT+w||² = 0
-Tmax <= T <= Tmax

with cost function gradient:

∇Φ = tᵢ / tmax; if tᵢ = tmax or 0 otherwise.

The gradient of the constraints:

2TᵀB
2(G(J†)ᵀT +w)ᵀG(J†)ᵀ

Considering:

  • T is an Eigen::VectorXd with size 32 elements
  • G is an Eigen::MatrixXd with size 6x24
  • J is an Eigen::MatrixXd with size 24*32
  • B is an Eigen::MatrixXd with size 32*32
  • w is an Eigen::VectorXd with size 6 elements
  • ᵀ is the transpose of the vector or matrix

I have implemented the problem in Ceres Solver, but when I try to compile I get the following errors:

parameter_dims.h:75:3: error: static_assert failed due to requirement 'internal::ParameterDims<false, -1>::kIsValid' "Invalid parameter block dimension detected. Each parameter block dimension must be bigger than zero."

autodiff_cost_function.h:162:5: error: static_assert failed due to requirement '-1 != DYNAMIC' "Can't run the fixed-size constructor if the number of residuals is set to ceres::DYNAMIC."

sized_cost_function.h:53:3: error: static_assert failed due to requirement 'internal::ParameterDims<false, -1>::kIsValid' "Invalid parameter block dimension detected. Each parameter block dimension must be bigger than zero."

I'm not an expert in Ceres Solver, but here is the implementation I did following the Ceres documentation:

Objective Function:

struct TorqueObjective {
   explicit TorqueObjective(const Eigen::VectorXd& tmax) : tmax_(tmax) {}

template <typename T>
bool operator()(const T* const taos, T* residual) const {
    Eigen::Matrix<T, Eigen::Dynamic, 1> t(tmax_.size());
    for (int i = 0; i < tmax_.size(); ++i) {
        t(i) = taos[i];
    }

    T max_tao = T(0);
    int pos = -1;
    for (int i = 0; i < t.size(); ++i) {
        T quotient = t(i) / tmax_(i);
        if (quotient >= max_tao) {
            max_tao = quotient;
            pos = i;
        }
    }

    for (int i = 0; i < t.size(); ++i) {
        residual[i] = (i == pos) ? t(i) / tmax_(i) : T(0);
    }

    return true;
}

private:
    const Eigen::VectorXd tmax_;
};

QuadraticConstraint TᵀB T <= 0

struct QuadraticConstraint {
    explicit QuadraticConstraint(const Eigen::MatrixXd& Bi) : Bi_(Bi) {}

    template <typename T>
    bool operator()(const T* const taos, T* residual) const {
        Eigen::Matrix<T, Eigen::Dynamic, 1> t(Bi_.cols());
        for (int i = 0; i < Bi_.cols(); ++i) {
            t(i) = taos[i];
        }

        residual[0] = t.transpose() * Bi_ * t;

        return true;
    }

    private:
        const Eigen::MatrixXd Bi_;
     };

Linear Constraint ||G(J)ᵀT+w||² = 0:

struct LinearConstraint {
    explicit LinearConstraint(const Eigen::MatrixXd& G, const Eigen::MatrixXd& J, const Eigen::VectorXd& Ext)
        : G_(G), J_(J), Ext_(Ext) {}

    template <typename T>
    bool operator()(const T* const taos, T* residual) const {
        Eigen::Matrix<T, Eigen::Dynamic, 1> t(J_.cols());
        for (int i = 0; i < J_.cols(); ++i) {
            t(i) = taos[i];
        }

        Eigen::Matrix<T, Eigen::Dynamic, 1> We = (G_ * J_ * t) + Ext_;
        T norm_We_squared = We.squaredNorm();

        residual[0] = norm_We_squared;

        return true;
    }

private:
    const Eigen::MatrixXd G_;
    const Eigen::MatrixXd J_;
    const Eigen::VectorXd Ext_;
};

Optimization Class constructor:

CeresOptimization::CeresOptimization() {}

CeresOptimization::~CeresOptimization() {}

Solver function:

void CeresOptimization::solve(const Eigen::MatrixXd& B, const Eigen::MatrixXd& G, const Eigen::MatrixXd& J, const Eigen::VectorXd& w) {
    data.Bi = B;
    data.G = G;
    data.J = J; //The J is already transposed
    data.Ext = w;

    const int dim = B.cols();

    double T[dim];
    std::fill_n(T, dim, 0.6);

    ceres::Problem problem;

    ceres::CostFunction* torque_objective = createTorqueObjective();
    problem.AddResidualBlock(torque_objective, nullptr, T);

    ceres::CostFunction* quadratic_constraint = createQuadraticConstraint(&data);
    problem.AddResidualBlock(quadratic_constraint, nullptr, T);

    ceres::CostFunction* linear_constraint = createLinearConstraint(&data);
    problem.AddResidualBlock(linear_constraint, nullptr, T);

    ceres::Solver::Options options;
    options.linear_solver_type = ceres::DENSE_QR;
    options.minimizer_progress_to_stdout = true;

    ceres::Solver::Summary summary;
    ceres::Solve(options, &problem, &summary);

    std::cout << summary.BriefReport() << std::endl;
}

Functions to create objective function and constrainst

ceres::CostFunction* CeresOptimization::createTorqueObjective() {
    return new ceres::AutoDiffCostFunction<TorqueObjective, Eigen::Dynamic, Eigen::Dynamic>(
        new TorqueObjective(Eigen::VectorXd::Constant(32, 0.6)));
}

ceres::CostFunction* CeresOptimization::createQuadraticConstraint(const DataConstraint* data) {
    return new ceres::AutoDiffCostFunction<QuadraticConstraint, 1, Eigen::Dynamic>(
        new QuadraticConstraint(data->Bi));
}

ceres::CostFunction* CeresOptimization::createLinearConstraint(const DataConstraint* data) {
    return new ceres::AutoDiffCostFunction<LinearConstraint, 1, Eigen::Dynamic>(
        new LinearConstraint(data->G, data->J, data->Ext));
}

In this implementation the gradients are not used as I think it's not necessary. Somebody can help me to figure out what is wrong in the code that is rising the errors cited before?

Thanks in advance.

Abiud Rds
  • 139
  • 1
  • 1
  • 7

0 Answers0