/usr/include/trilinos/KokkosKernels_SparseUtils.hpp is in libtrilinos-kokkos-kernels-dev 12.12.1-5.
This file is owned by root:root, with mode 0o644.
The actual contents of the file can be viewed below.
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 | /*
//@HEADER
// ************************************************************************
//
// KokkosKernels 0.9: Linear Algebra and Graph Kernels
// Copyright 2017 Sandia Corporation
//
// Under the terms of Contract DE-AC04-94AL85000 with Sandia Corporation,
// the U.S. Government retains certain rights in this software.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// 1. Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// 2. Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
//
// 3. Neither the name of the Corporation nor the names of the
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY SANDIA CORPORATION "AS IS" AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL SANDIA CORPORATION OR THE
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Questions? Contact Siva Rajamanickam (srajama@sandia.gov)
//
// ************************************************************************
//@HEADER
*/
#ifndef _KOKKOSKERNELS_SPARSEUTILS_HPP
#define _KOKKOSKERNELS_SPARSEUTILS_HPP
#include "Kokkos_Core.hpp"
#include "Kokkos_Atomic.hpp"
#include "impl/Kokkos_Timer.hpp"
#include "KokkosKernels_SimpleUtils.hpp"
#include "KokkosKernels_IOUtils.hpp"
#include "KokkosKernels_ExecSpaceUtils.hpp"
//#include "KokkosKernels_Handle.hpp"
namespace KokkosKernels{
namespace Experimental{
namespace Util{
template <typename in_row_view_t,
typename in_nnz_view_t,
typename in_scalar_view_t,
typename out_row_view_t,
typename out_nnz_view_t,
typename out_scalar_view_t,
typename tempwork_row_view_t,
typename MyExecSpace>
struct TransposeMatrix{
struct CountTag{};
struct FillTag{};
typedef struct CountTag CountTag;
typedef struct FillTag FillTag;
typedef Kokkos::TeamPolicy<CountTag, MyExecSpace> team_count_policy_t ;
typedef Kokkos::TeamPolicy<FillTag, MyExecSpace> team_fill_policy_t ;
typedef Kokkos::TeamPolicy<CountTag, MyExecSpace, Kokkos::Schedule<Kokkos::Dynamic> > dynamic_team_count_policy_t ;
typedef Kokkos::TeamPolicy<FillTag, MyExecSpace, Kokkos::Schedule<Kokkos::Dynamic> > dynamic_team_fill_policy_t ;
typedef typename team_count_policy_t::member_type team_count_member_t ;
typedef typename team_fill_policy_t::member_type team_fill_member_t ;
typedef typename in_nnz_view_t::non_const_value_type nnz_lno_t;
typedef typename in_row_view_t::non_const_value_type size_type;
typename in_nnz_view_t::non_const_value_type num_rows;
typename in_nnz_view_t::non_const_value_type num_cols;
in_row_view_t xadj;
in_nnz_view_t adj;
in_scalar_view_t vals;
out_row_view_t t_xadj; //allocated
out_nnz_view_t t_adj; //allocated
out_nnz_view_t t_vals; //allocated
tempwork_row_view_t tmp_txadj;
bool transpose_values;
nnz_lno_t team_work_size;
TransposeMatrix(
nnz_lno_t num_rows_,
nnz_lno_t num_cols_,
in_row_view_t xadj_,
in_nnz_view_t adj_,
in_scalar_view_t vals_,
out_row_view_t t_xadj_,
out_nnz_view_t t_adj_,
out_nnz_view_t t_vals_,
tempwork_row_view_t tmp_txadj_,
bool transpose_values_,
nnz_lno_t team_row_work_size_):
num_rows(num_rows_), num_cols(num_cols_),
xadj(xadj_), adj(adj_), vals(vals_),
t_xadj(t_xadj_), t_adj(t_adj_), t_vals(t_vals_),
tmp_txadj(tmp_txadj_), transpose_values(transpose_values_), team_work_size(team_row_work_size_) {}
KOKKOS_INLINE_FUNCTION
void operator()(const CountTag&, const team_count_member_t & teamMember) const {
const nnz_lno_t team_row_begin = teamMember.league_rank() * team_work_size;
const nnz_lno_t team_row_end = KOKKOSKERNELS_MACRO_MIN(team_row_begin + team_work_size, num_rows);
//TODO we dont need to go over rows
//just go over nonzeroes.
Kokkos::parallel_for(Kokkos::TeamThreadRange(teamMember,team_row_begin,team_row_end), [&] (const nnz_lno_t& row_index) {
const size_type col_begin = xadj[row_index];
const size_type col_end = xadj[row_index + 1];
const nnz_lno_t left_work = col_end - col_begin;
Kokkos::parallel_for(
Kokkos::ThreadVectorRange(teamMember, left_work),
[&] (nnz_lno_t i) {
const size_type adjind = i + col_begin;
const nnz_lno_t colIndex = adj[adjind];
Kokkos::atomic_fetch_add(&(t_xadj(colIndex)),1);
});
});
}
KOKKOS_INLINE_FUNCTION
void operator()(const FillTag&, const team_fill_member_t & teamMember) const {
const nnz_lno_t team_row_begin = teamMember.league_rank() * team_work_size;
const nnz_lno_t team_row_end = KOKKOSKERNELS_MACRO_MIN(team_row_begin + team_work_size, num_rows);
Kokkos::parallel_for(Kokkos::TeamThreadRange(teamMember,team_row_begin,team_row_end), [&] (const nnz_lno_t& row_index) {
const nnz_lno_t teamsize = teamMember.team_size();
//for (nnz_lno_t row_index = team_row_begin + teamMember.team_rank(); row_index < team_row_end; row_index += teamsize){
const size_type col_begin = xadj[row_index];
const size_type col_end = xadj[row_index + 1];
const nnz_lno_t left_work = col_end - col_begin;
Kokkos::parallel_for(
Kokkos::ThreadVectorRange(teamMember, left_work),
[&] (nnz_lno_t i) {
const size_type adjind = i + col_begin;
const nnz_lno_t colIndex = adj[adjind];
const size_type pos = Kokkos::atomic_fetch_add(&(tmp_txadj(colIndex)),1);
t_adj(pos) = row_index;
if (transpose_values){
t_vals(pos) = vals[adjind];
}
});
//}
});
}
};
/**
* \brief function returns transpose of the given graph.
* \param num_rows: num rows in input graph
* \param num_cols: num cols in input graph
* \param xadj: row pointers of the input graph
* \param adj: column indices of the input graph
* \param t_xadj: output, the row indices of the output graph. MUST BE INITIALIZED WITH ZEROES.
* \param t_adj: output, column indices. No need for initializations.
* \param vector_size: suggested vector size, optional. if -1, kernel will decide.
* \param suggested_team_size: suggested team size, optional. if -1, kernel will decide.
* \param team_work_chunk_size: suggested work size of a team, optional. if -1, kernel will decide.
* \param use_dynamic_scheduling: whether to use dynamic scheduling. Default is true.
*/
template <typename in_row_view_t,
typename in_nnz_view_t,
typename out_row_view_t,
typename out_nnz_view_t,
typename tempwork_row_view_t,
typename MyExecSpace>
inline void kk_transpose_graph(
typename in_nnz_view_t::non_const_value_type num_rows,
typename in_nnz_view_t::non_const_value_type num_cols,
in_row_view_t xadj,
in_nnz_view_t adj,
out_row_view_t t_xadj, //pre-allocated -- initialized with 0
out_nnz_view_t t_adj, //pre-allocated -- no need for initialize
int vector_size = -1,
int suggested_team_size = -1,
typename in_nnz_view_t::non_const_value_type team_work_chunk_size = -1,
bool use_dynamic_scheduling = true
){
//allocate some memory for work for row pointers
tempwork_row_view_t tmp_row_view(Kokkos::ViewAllocateWithoutInitializing("tmp_row_view"), num_cols + 1);
in_nnz_view_t tmp1;
out_nnz_view_t tmp2;
//create the functor for tranpose.
typedef TransposeMatrix <
in_row_view_t, in_nnz_view_t, in_nnz_view_t,
out_row_view_t, out_nnz_view_t, out_nnz_view_t,
tempwork_row_view_t, MyExecSpace> TransposeFunctor_t;
TransposeFunctor_t tm ( num_rows, num_cols, xadj, adj, tmp1,
t_xadj, t_adj, tmp2,
tmp_row_view,
false,
team_work_chunk_size);
typedef typename TransposeFunctor_t::team_count_policy_t count_tp_t;
typedef typename TransposeFunctor_t::team_fill_policy_t fill_tp_t;
typedef typename TransposeFunctor_t::dynamic_team_count_policy_t d_count_tp_t;
typedef typename TransposeFunctor_t::dynamic_team_fill_policy_t d_fill_tp_t;
typename in_row_view_t::non_const_value_type nnz = adj.dimension_0();
//set the vector size, if not suggested.
if (vector_size == -1)
vector_size = kk_get_suggested_vector_size(num_rows, nnz, kk_get_exec_space_type<MyExecSpace>());
//set the team size, if not suggested.
if (suggested_team_size == -1)
suggested_team_size = kk_get_suggested_team_size(vector_size, kk_get_exec_space_type<MyExecSpace>());
//set the chunk size, if not suggested.
if (team_work_chunk_size == -1)
team_work_chunk_size = suggested_team_size;
if (use_dynamic_scheduling){
Kokkos::parallel_for( d_count_tp_t(num_rows / team_work_chunk_size + 1 , suggested_team_size, vector_size), tm);
}
else {
Kokkos::parallel_for( count_tp_t(num_rows / team_work_chunk_size + 1 , suggested_team_size, vector_size), tm);
}
MyExecSpace::fence();
kk_exclusive_parallel_prefix_sum<out_row_view_t, MyExecSpace>(num_cols+1, t_xadj);
MyExecSpace::fence();
Kokkos::deep_copy(tmp_row_view, t_xadj);
MyExecSpace::fence();
if (use_dynamic_scheduling){
Kokkos::parallel_for( fill_tp_t(num_rows / team_work_chunk_size + 1 , suggested_team_size, vector_size), tm);
}
else {
Kokkos::parallel_for( d_fill_tp_t(num_rows / team_work_chunk_size + 1 , suggested_team_size, vector_size), tm);
}
MyExecSpace::fence();
}
template <typename forward_map_type, typename reverse_map_type>
struct Fill_Reverse_Scale_Functor{
struct CountTag{};
struct FillTag{};
typedef struct CountTag CountTag;
typedef struct FillTag FillTag;
typedef typename forward_map_type::value_type forward_type;
typedef typename reverse_map_type::value_type reverse_type;
forward_map_type forward_map;
reverse_map_type reverse_map_xadj;
reverse_map_type reverse_map_adj;
const reverse_type multiply_shift_for_scale;
const reverse_type division_shift_for_bucket;
Fill_Reverse_Scale_Functor(
forward_map_type forward_map_,
reverse_map_type reverse_map_xadj_,
reverse_map_type reverse_map_adj_,
reverse_type multiply_shift_for_scale_,
reverse_type division_shift_for_bucket_):
forward_map(forward_map_), reverse_map_xadj(reverse_map_xadj_), reverse_map_adj(reverse_map_adj_),
multiply_shift_for_scale(multiply_shift_for_scale_),
division_shift_for_bucket(division_shift_for_bucket_){}
KOKKOS_INLINE_FUNCTION
void operator()(const CountTag&, const size_t &ii) const {
forward_type fm = forward_map[ii];
fm = fm << multiply_shift_for_scale;
fm += ii >> division_shift_for_bucket;
Kokkos::atomic_fetch_add( &(reverse_map_xadj(fm)), 1);
}
KOKKOS_INLINE_FUNCTION
void operator()(const FillTag&, const size_t &ii) const {
forward_type fm = forward_map[ii];
fm = fm << multiply_shift_for_scale;
fm += ii >> division_shift_for_bucket;
const reverse_type future_index = Kokkos::atomic_fetch_add( &(reverse_map_xadj(fm )), 1);
reverse_map_adj(future_index) = ii;
}
};
template <typename from_view_t, typename to_view_t>
struct StridedCopy1{
const from_view_t from;
to_view_t to;
const size_t stride;
StridedCopy1(
const from_view_t from_,
to_view_t to_,
size_t stride_):from(from_), to (to_), stride(stride_){}
KOKKOS_INLINE_FUNCTION
void operator()(const size_t &ii) const {
to[ii] = from[(ii) * stride];
}
};
template <typename forward_map_type, typename reverse_map_type>
struct Reverse_Map_Functor{
struct CountTag{};
struct FillTag{};
typedef struct CountTag CountTag;
typedef struct FillTag FillTag;
typedef typename forward_map_type::value_type forward_type;
typedef typename reverse_map_type::value_type reverse_type;
forward_map_type forward_map;
reverse_map_type reverse_map_xadj;
reverse_map_type reverse_map_adj;
Reverse_Map_Functor(
forward_map_type forward_map_,
reverse_map_type reverse_map_xadj_,
reverse_map_type reverse_map_adj_):
forward_map(forward_map_), reverse_map_xadj(reverse_map_xadj_), reverse_map_adj(reverse_map_adj_){}
KOKKOS_INLINE_FUNCTION
void operator()(const CountTag&, const size_t &ii) const {
forward_type fm = forward_map[ii];
Kokkos::atomic_fetch_add( &(reverse_map_xadj(fm)), 1);
}
KOKKOS_INLINE_FUNCTION
void operator()(const FillTag&, const size_t &ii) const {
forward_type c = forward_map[ii];
const reverse_type future_index = Kokkos::atomic_fetch_add( &(reverse_map_xadj(c)), 1);
reverse_map_adj(future_index) = ii;
}
};
/**
* \brief Utility function to obtain a reverse map given a map.
* Input is a map with the number of elements within the map.
* forward_map[c] = i, where c is a forward element and forward_map has a size of num_forward_elements.
* i is the value that c is mapped in the forward map, and the range of that is num_reverse_elements.
* Output is the reverse_map_xadj and reverse_map_adj such that,
* all c, forward_map[c] = i, will appear in reverse_map_adj[ reverse_map_xadj[i]: reverse_map_xadj[i+1])
* \param: num_forward_elements: the number of elements in the forward map, the size of the forward map.
* \param: num_reverse_elements: the number of elements that forward map is mapped to. It is the value of max i.
* \param: forward_map: input forward_map, where forward_map[c] = i.
* \param: reverse_map_xadj: reverse map xadj, that is it will hold the beginning and
* end indices on reverse_map_adj such that all values mapped to i will be [ reverse_map_xadj[i]: reverse_map_xadj[i+1])
* its size will be num_reverse_elements + 1. NO NEED TO INITIALIZE.
* \param: reverse_map_adj: reverse map adj, holds the values of reverse maps. Its size is num_forward_elements.
*
*/
template <typename forward_array_type, typename reverse_array_type, typename MyExecSpace>
void kk_create_reverse_map(
const typename reverse_array_type::value_type &num_forward_elements, //num_vertices
const typename forward_array_type::value_type &num_reverse_elements, //num_colors
const forward_array_type &forward_map, //vertex to colors
const reverse_array_type &reverse_map_xadj, // colors to vertex xadj
const reverse_array_type &reverse_map_adj){ //colros to vertex adj
typedef typename reverse_array_type::value_type lno_t;
typedef typename forward_array_type::value_type reverse_lno_t;
const lno_t MINIMUM_TO_ATOMIC = 128;
//typedef Kokkos::TeamPolicy<CountTag, MyExecSpace> team_count_policy_t ;
//typedef Kokkos::TeamPolicy<FillTag, MyExecSpace> team_fill_policy_t ;
typedef Kokkos::RangePolicy<MyExecSpace> my_exec_space;
//IF There are very few reverse elements, atomics are likely to create contention.
if (num_reverse_elements < MINIMUM_TO_ATOMIC){
const lno_t scale_size = 1024;
const lno_t multiply_shift_for_scale = 10;
//there will be 1024 buckets
const lno_t division_shift_for_bucket =
lno_t (ceil(log(double (num_forward_elements) / scale_size)/log(2)));
//coloring indices are base-1. we end up using not using element 1.
const reverse_lno_t tmp_reverse_size =
(num_reverse_elements + 1) << multiply_shift_for_scale;
typename reverse_array_type::non_const_type
tmp_color_xadj ("TMP_REVERSE_XADJ", tmp_reverse_size + 1);
typedef Fill_Reverse_Scale_Functor<forward_array_type, reverse_array_type> frsf;
typedef typename frsf::CountTag cnt_tag;
typedef typename frsf::FillTag fill_tag;
typedef Kokkos::RangePolicy<cnt_tag, MyExecSpace> my_cnt_exec_space;
typedef Kokkos::RangePolicy<fill_tag, MyExecSpace> my_fill_exec_space;
frsf frm (forward_map, tmp_color_xadj, reverse_map_adj,
multiply_shift_for_scale, division_shift_for_bucket);
Kokkos::parallel_for (my_cnt_exec_space (0, num_forward_elements) , frm);
MyExecSpace::fence();
//kk_inclusive_parallel_prefix_sum<reverse_array_type, MyExecSpace>(tmp_reverse_size + 1, tmp_color_xadj);
kk_exclusive_parallel_prefix_sum<reverse_array_type, MyExecSpace>
(tmp_reverse_size + 1, tmp_color_xadj);
MyExecSpace::fence();
Kokkos::parallel_for (
my_exec_space (0, num_reverse_elements + 1) ,
StridedCopy1<reverse_array_type, reverse_array_type>
(tmp_color_xadj, reverse_map_xadj, scale_size));
MyExecSpace::fence();
Kokkos::parallel_for (my_fill_exec_space (0, num_forward_elements) , frm);
MyExecSpace::fence();
}
else
//atomic implementation.
{
reverse_array_type tmp_color_xadj ("TMP_REVERSE_XADJ", num_reverse_elements + 1);
typedef Reverse_Map_Functor<forward_array_type, reverse_array_type> rmp_functor_type;
typedef typename rmp_functor_type::CountTag cnt_tag;
typedef typename rmp_functor_type::FillTag fill_tag;
typedef Kokkos::RangePolicy<cnt_tag, MyExecSpace> my_cnt_exec_space;
typedef Kokkos::RangePolicy<fill_tag, MyExecSpace> my_fill_exec_space;
rmp_functor_type frm (forward_map, tmp_color_xadj, reverse_map_adj);
Kokkos::parallel_for (my_cnt_exec_space (0, num_forward_elements) , frm);
MyExecSpace::fence();
//kk_inclusive_parallel_prefix_sum<reverse_array_type, MyExecSpace>(num_reverse_elements + 1, reverse_map_xadj);
kk_exclusive_parallel_prefix_sum<reverse_array_type, MyExecSpace>
(num_reverse_elements + 1, tmp_color_xadj);
MyExecSpace::fence();
Kokkos::deep_copy (reverse_map_xadj, tmp_color_xadj);
MyExecSpace::fence();
Kokkos::parallel_for (my_fill_exec_space (0, num_forward_elements) , frm);
MyExecSpace::fence();
}
}
template <typename in_row_view_t, typename in_nnz_view_t, typename in_color_view_t,
typename team_member>
struct ColorChecker{
typedef typename in_row_view_t::value_type size_type;
typedef typename in_nnz_view_t::value_type lno_t;
typedef typename in_color_view_t::value_type color_t;
in_row_view_t xadj;
in_nnz_view_t adj;
in_color_view_t color_view;
lno_t team_row_chunk_size;
lno_t num_rows;
ColorChecker(
lno_t num_rows_,
in_row_view_t xadj_,
in_nnz_view_t adj_,
in_color_view_t color_view_,
lno_t chunk_size):
num_rows(num_rows_),
xadj(xadj_), adj(adj_), color_view(color_view_),
team_row_chunk_size(chunk_size){}
KOKKOS_INLINE_FUNCTION
void operator()(const team_member & teamMember, size_t &num_conflicts) const {
//get the range of rows for team.
const lno_t team_row_begin = teamMember.league_rank() * team_row_chunk_size;
const lno_t team_row_end = KOKKOSKERNELS_MACRO_MIN(team_row_begin + team_row_chunk_size, num_rows);
size_t nf = 0;
Kokkos::parallel_reduce(Kokkos::TeamThreadRange(teamMember, team_row_begin, team_row_end), [&] (const lno_t& row_index, size_t &team_num_conf)
{
color_t my_color = color_view(row_index);
const size_type col_begin = xadj[row_index];
const size_type col_end = xadj[row_index + 1];
const lno_t left_work = col_end - col_begin;
size_t conf1= 0;
Kokkos::parallel_reduce(
Kokkos::ThreadVectorRange(teamMember, left_work),
[&] (lno_t i, size_t & valueToUpdate) {
const size_type adjind = i + col_begin;
const lno_t colIndex = adj[adjind];
if (colIndex != row_index){
color_t second_color = color_view(colIndex);
if (second_color == my_color)
valueToUpdate += 1;
}
},
conf1);
team_num_conf += conf1;
}, nf);
num_conflicts += nf;
}
};
/**
* \brief given a graph and a coloring function returns true or false if distance-1 coloring is valid or not.
* \param num_rows: num rows in input graph
* \param num_cols: num cols in input graph
* \param xadj: row pointers of the input graph
* \param adj: column indices of the input graph
* \param t_xadj: output, the row indices of the output graph. MUST BE INITIALIZED WITH ZEROES.
* \param vector_size: suggested vector size, optional. if -1, kernel will decide.
* \param suggested_team_size: suggested team size, optional. if -1, kernel will decide.
* \param team_work_chunk_size: suggested work size of a team, optional. if -1, kernel will decide.
* \param use_dynamic_scheduling: whether to use dynamic scheduling. Default is true.
*/
template <typename in_row_view_t,
typename in_nnz_view_t,
typename in_color_view_t,
typename MyExecSpace>
inline size_t kk_is_d1_coloring_valid(
typename in_nnz_view_t::non_const_value_type num_rows,
typename in_nnz_view_t::non_const_value_type num_cols,
in_row_view_t xadj,
in_nnz_view_t adj,
in_color_view_t v_colors
){
KokkosKernels::Experimental::Util::ExecSpaceType my_exec_space = KokkosKernels::Experimental::Util::kk_get_exec_space_type<MyExecSpace>();
int vector_size = KokkosKernels::Experimental::Util::kk_get_suggested_vector_size(num_rows, adj.dimension_0(), my_exec_space);
int suggested_team_size = KokkosKernels::Experimental::Util::kk_get_suggested_team_size(vector_size, my_exec_space);;
typename in_nnz_view_t::non_const_value_type team_work_chunk_size = suggested_team_size;
typedef Kokkos::TeamPolicy<MyExecSpace, Kokkos::Schedule<Kokkos::Dynamic> > dynamic_team_policy ;
typedef typename dynamic_team_policy::member_type team_member_t ;
struct ColorChecker <in_row_view_t, in_nnz_view_t, in_color_view_t, team_member_t> cc(num_rows, xadj, adj, v_colors, team_work_chunk_size);
size_t num_conf = 0;
Kokkos::parallel_reduce( dynamic_team_policy(num_rows / team_work_chunk_size + 1 ,
suggested_team_size, vector_size), cc, num_conf);
MyExecSpace::fence();
return num_conf;
}
template <typename lno_view_t,
typename lno_nnz_view_t,
typename scalar_view_t,
typename out_nnz_view_t,
typename out_scalar_view_t,
typename MyExecSpace>
void kk_sort_graph(
lno_view_t in_xadj,
lno_nnz_view_t in_adj,
scalar_view_t in_vals,
out_nnz_view_t out_adj,
out_scalar_view_t out_vals){
typename lno_view_t::HostMirror hr = Kokkos::create_mirror_view (in_xadj);
Kokkos::deep_copy (hr, in_xadj);
typename lno_nnz_view_t::HostMirror he = Kokkos::create_mirror_view (in_adj);
Kokkos::deep_copy (he, in_adj);
typename scalar_view_t::HostMirror hv = Kokkos::create_mirror_view (in_vals);
Kokkos::deep_copy (hv, in_vals);
typename lno_nnz_view_t::HostMirror heo = Kokkos::create_mirror_view (out_adj);
typename scalar_view_t::HostMirror hvo = Kokkos::create_mirror_view (out_vals);
typedef typename lno_view_t::non_const_value_type size_type;
typedef typename lno_nnz_view_t::non_const_value_type lno_t;
typedef typename scalar_view_t::non_const_value_type scalar_t;
lno_t nrows = in_xadj.dimension_0() - 1;
std::vector <KokkosKernels::Experimental::Util::Edge<lno_t, scalar_t> > edges(in_adj.dimension_0());
for (lno_t i = 0; i < nrows; ++i){
size_type row_size = 0;
for (size_type j = hr(i); j < hr(i + 1); ++j){
edges[row_size].src = i;
edges[row_size].dst = he(j);
edges[row_size++].ew = hv(j);
}
std::sort (edges.begin(), edges.begin() + row_size);
size_type row_ind = 0;
for (size_type j = hr(i); j < hr(i + 1); ++j){
heo(j) = edges[row_ind].dst;
hvo(j) = edges[row_ind++].ew;
}
}
Kokkos::deep_copy (out_adj, heo);
Kokkos::deep_copy (out_vals, hvo);
}
}
}
}
#endif
|