Skip to content

Commit

Permalink
remove annotations
Browse files Browse the repository at this point in the history
  • Loading branch information
albestro committed Sep 29, 2023
1 parent e8d3fb5 commit a36baa8
Show file tree
Hide file tree
Showing 2 changed files with 0 additions and 18 deletions.
15 changes: 0 additions & 15 deletions include/dlaf/eigensolver/tridiag_solver/merge.h
Original file line number Diff line number Diff line change
Expand Up @@ -1200,8 +1200,6 @@ void solveRank1ProblemDist(CommSender&& row_comm, CommSender&& col_comm, const S
auto& barrier_ptr) {
using dlaf::comm::internal::transformMPI;

pika::scoped_annotation _("solveRank1ProblemDist");

common::Pipeline<comm::Communicator> row_comm_chain(row_comm_wrapper.get());
const dlaf::comm::Communicator& col_comm = col_comm_wrapper.get();

Expand All @@ -1220,7 +1218,6 @@ void solveRank1ProblemDist(CommSender&& row_comm, CommSender&& col_comm, const S
// to be dropped soon.
// Note: use last threads that in principle should have less work to do
if (k < n && thread_idx == nthreads - 1) {
pika::scoped_annotation _("fill1_and_xevals");
const T* eval_initial_ptr = d_tiles_futs[0].get().ptr();
T* eval_ptr = eval_tiles[0].ptr();

Expand Down Expand Up @@ -1261,7 +1258,6 @@ void solveRank1ProblemDist(CommSender&& row_comm, CommSender&& col_comm, const S

// STEP 0b: Initialize workspaces (single-thread)
if (thread_idx == 0) {
pika::scoped_annotation _("init_workspaces");
// Note:
// - nthreads are used for both LAED4 and weight calculation (one per worker thread)
// - last one is used for reducing weights from all workers
Expand Down Expand Up @@ -1291,7 +1287,6 @@ void solveRank1ProblemDist(CommSender&& row_comm, CommSender&& col_comm, const S

// STEP 1: LAED4 (multi-thread)
{
pika::scoped_annotation _("laed4");
common::internal::SingleThreadedBlasScope single; // TODO needed also for laed?

T* eval_ptr = eval_tiles[0].ptr();
Expand Down Expand Up @@ -1367,7 +1362,6 @@ void solveRank1ProblemDist(CommSender&& row_comm, CommSender&& col_comm, const S
// permutations
// during the switch from col axis to row axis we must keep the matching between eigenvectors
if (thread_idx == 0) {
pika::scoped_annotation _("copy-diag");
for (SizeType ieg_el_lc = 0; ieg_el_lc < m_el_lc; ++ieg_el_lc) {
const SizeType ieg_el =
dist_sub.globalElementFromLocalElement<Coord::Row>(ieg_el_lc);
Expand Down Expand Up @@ -1403,8 +1397,6 @@ void solveRank1ProblemDist(CommSender&& row_comm, CommSender&& col_comm, const S

// STEP 2b: compute weights
{
pika::scoped_annotation _("compute-weights");

for (SizeType jeg_el_lc = begin; jeg_el_lc < end; ++jeg_el_lc) {
const SizeType jeg_el =
dist_sub.globalElementFromLocalElement<Coord::Col>(jeg_el_lc);
Expand Down Expand Up @@ -1449,8 +1441,6 @@ void solveRank1ProblemDist(CommSender&& row_comm, CommSender&& col_comm, const S

// STEP 2c: reduce, then finalize computation with sign and square root (single-thread)
if (thread_idx == 0) {
pika::scoped_annotation _("reduce-weights");

// local reduction from all bulk workers
for (SizeType i = 0; i < m_el_lc; ++i) {
for (std::size_t tidx = 1; tidx < nthreads; ++tidx) {
Expand Down Expand Up @@ -1481,7 +1471,6 @@ void solveRank1ProblemDist(CommSender&& row_comm, CommSender&& col_comm, const S

// STEP 3a: Form evecs using weights vector and compute (local) sum of squares
{
pika::scoped_annotation _("form-evecs");
common::internal::SingleThreadedBlasScope single;

const T* w = ws_cols[nthreads]();
Expand Down Expand Up @@ -1526,7 +1515,6 @@ void solveRank1ProblemDist(CommSender&& row_comm, CommSender&& col_comm, const S

// STEP 3b: Reduce to get the sum of all squares on all ranks
if (thread_idx == 0) {
pika::scoped_annotation _("reduce-squares-norm");
// TODO it can be limited to k_lc
tt::sync_wait(ex::just(std::cref(col_comm), MPI_SUM,
common::make_data(ws_row(), n_el_lc)) |
Expand All @@ -1537,7 +1525,6 @@ void solveRank1ProblemDist(CommSender&& row_comm, CommSender&& col_comm, const S

// STEP 3c: Normalize (compute norm of each column and scale column vector)
{
pika::scoped_annotation _("normalize-evecs");
common::internal::SingleThreadedBlasScope single;

const T* sum_squares = ws_row();
Expand Down Expand Up @@ -1576,8 +1563,6 @@ void mergeDistSubproblems(comm::CommunicatorGrid grid,
DistWorkSpaceHostMirror<T, D>& ws_hm) {
namespace ex = pika::execution::experimental;

pika::scoped_annotation _("mergeDistSubproblems");

const matrix::Distribution& dist_evecs = ws.e0.distribution();

// Calculate the size of the upper subproblem
Expand Down
3 changes: 0 additions & 3 deletions include/dlaf/permutations/general/impl.h
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,6 @@
#include <mpi.h>

#include <pika/algorithm.hpp>
#include <pika/threading_base/scoped_annotation.hpp>

#include <dlaf/blas/tile.h>
#include <dlaf/common/assert.h>
Expand Down Expand Up @@ -216,8 +215,6 @@ void permuteJustLocal(const SizeType i_begin, const SizeType i_end, Matrix<const
Matrix<const T, D>& mat_in, Matrix<T, D>& mat_out) {
static_assert(C == Coord::Col, "Just column permutation");

pika::scoped_annotation _("permuteJustLocal");

namespace ut = matrix::util;
namespace ex = pika::execution::experimental;
namespace di = dlaf::internal;
Expand Down

0 comments on commit a36baa8

Please sign in to comment.