Dataset Viewer
Auto-converted to Parquet Duplicate
filename
stringlengths
19
182
omp_pragma_line
stringlengths
24
416
context_chars
int64
100
100
text
stringlengths
152
177k
OSGeo/grass/raster3d/r3.gradient/main.c
#pragma omp parallel for schedule
100
== max_i - 1) { /* compute gradient */ /* disabled openMP <LOOP-START>* (static) private (k) */ for (k = 0; k <= j; k++) { Rast3d_gradient_double(&(blocks[k].input), step, &(blocks[k].dx)...
OSGeo/grass/raster/r.proj/main.c
#pragma omp parallel for schedule(static)
100
does not always work, * segfaults in the interpolation functions * can happen */ <LOOP-START>for (col = 0; col < outcellhd.cols; col++) { void *obufptr = (void *)((const unsigned char *)obuffer + col * cell_size); double xcoord1 = xcoord2 + (col)*outcellhd.ew_...
OSGeo/grass/lib/gmath/blas_level_1.c
#pragma omp parallel for schedule(static) reduction(+ : s)
100
G_math_d_x_dot_y(double *x, double *y, double *value, int rows) { int i; double s = 0.0; <LOOP-START>for (i = rows - 1; i >= 0; i--) { s += x[i] * y[i]; }<LOOP-END> <OMP-START>#pragma omp parallel for schedule(static) reduction(+ : s)<OMP-END>
OSGeo/grass/lib/gmath/blas_level_1.c
#pragma omp parallel for schedule(static) reduction(+ : s)
100
*/ void G_math_d_euclid_norm(double *x, double *value, int rows) { int i; double s = 0.0; <LOOP-START>for (i = rows - 1; i >= 0; i--) { s += x[i] * x[i]; }<LOOP-END> <OMP-START>#pragma omp parallel for schedule(static) reduction(+ : s)<OMP-END>
OSGeo/grass/lib/gmath/blas_level_1.c
#pragma omp parallel for schedule(static) reduction(+ : s)
100
void G_math_d_asum_norm(double *x, double *value, int rows) { int i = 0; double s = 0.0; <LOOP-START>for (i = rows - 1; i >= 0; i--) { s += fabs(x[i]); }<LOOP-END> <OMP-START>#pragma omp parallel for schedule(static) reduction(+ : s)<OMP-END>
OSGeo/grass/lib/gmath/blas_level_1.c
#pragma omp parallel for schedule(static) reduction(+ : s)
100
void G_math_f_x_dot_y(float *x, float *y, float *value, int rows) { int i; float s = 0.0; <LOOP-START>for (i = rows - 1; i >= 0; i--) { s += x[i] * y[i]; }<LOOP-END> <OMP-START>#pragma omp parallel for schedule(static) reduction(+ : s)<OMP-END>
OSGeo/grass/lib/gmath/blas_level_1.c
#pragma omp parallel for schedule(static) reduction(+ : s)
100
* */ void G_math_f_euclid_norm(float *x, float *value, int rows) { int i; float s = 0.0; <LOOP-START>for (i = rows - 1; i >= 0; i--) { s += x[i] * x[i]; }<LOOP-END> <OMP-START>#pragma omp parallel for schedule(static) reduction(+ : s)<OMP-END>
OSGeo/grass/lib/gmath/blas_level_1.c
#pragma omp parallel for schedule(static) private(i) reduction(+ : s)
100
* * */ void G_math_f_asum_norm(float *x, float *value, int rows) { int i; float s = 0.0; <LOOP-START>for (i = 0; i < rows; i++) { s += fabs(x[i]); }<LOOP-END> <OMP-START>#pragma omp parallel for schedule(static) private(i) reduction(+ : s)<OMP-END>
OSGeo/grass/lib/gmath/blas_level_1.c
#pragma omp parallel for schedule(static) reduction(+ : s)
100
/ void G_math_i_x_dot_y(int *x, int *y, double *value, int rows) { int i; double s = 0.0; <LOOP-START>for (i = rows - 1; i >= 0; i--) { s += x[i] * y[i]; }<LOOP-END> <OMP-START>#pragma omp parallel for schedule(static) reduction(+ : s)<OMP-END>
OSGeo/grass/lib/gmath/blas_level_1.c
#pragma omp parallel for schedule(static) reduction(+ : s)
100
* */ void G_math_i_euclid_norm(int *x, double *value, int rows) { int i; double s = 0.0; <LOOP-START>for (i = rows - 1; i >= 0; i--) { s += x[i] * x[i]; }<LOOP-END> <OMP-START>#pragma omp parallel for schedule(static) reduction(+ : s)<OMP-END>
OSGeo/grass/lib/gmath/blas_level_1.c
#pragma omp parallel for schedule(static) reduction(+ : s)
100
* * */ void G_math_i_asum_norm(int *x, double *value, int rows) { int i; double s = 0.0; <LOOP-START>for (i = rows - 1; i >= 0; i--) { s += (double)abs(x[i]); }<LOOP-END> <OMP-START>#pragma omp parallel for schedule(static) reduction(+ : s)<OMP-END>
OSGeo/grass/lib/gmath/solvers_direct.c
#pragma omp parallel for schedule(static) private(i, j, tmpval) \
100
le *b, int rows) { int i, j, k; double tmpval = 0.0; for (k = 0; k < rows - 1; k++) { <LOOP-START>shared(k, A, b, rows) for (i = k + 1; i < rows; i++) { tmpval = A[i][k] / A[k][k]; b[i] = b[i] - tmpval * b[k]; for (j = k + 1; j < rows; j++) { A[i...
OSGeo/grass/lib/gmath/solvers_direct.c
#pragma omp parallel for schedule(static) private(i, j) shared(k, A, rows)
100
n(double **A, double *b UNUSED, int rows) { int i, j, k; for (k = 0; k < rows - 1; k++) { <LOOP-START>for (i = k + 1; i < rows; i++) { A[i][k] = A[i][k] / A[k][k]; for (j = k + 1; j < rows; j++) { A[i][j] = A[i][j] - A[i][k] * A[k][j]; } }<LOOP-END> ...
OSGeo/grass/lib/gmath/solvers_direct.c
#pragma omp parallel for schedule(static) private(i, j, sum_2) shared(A, k) \
100
ndwidth <= 0) bandwidth = rows; colsize = bandwidth; for (k = 0; k < rows; k++) { <LOOP-START>reduction(+ : sum_1) for (j = 0; j < k; j++) { sum_1 += A[k][j] * A[k][j]; }<LOOP-END> <OMP-START>#pragma omp parallel for schedule(static) private(i, j, sum_2) shared(A, k) \<OMP-...
OSGeo/grass/lib/gmath/solvers_direct.c
#pragma omp parallel for schedule(static) private(i, j, sum_2) \
100
colsize = rows; } else { colsize = k + bandwidth; } <LOOP-START>shared(A, k, sum_1, colsize) for (i = k + 1; i < colsize; i++) { sum_2 = 0.0; for (j = 0; j < k; j++) { sum_2 += A[i][j] * A[k][j]; } A...
OSGeo/grass/lib/gmath/solvers_direct.c
#pragma omp parallel for schedule(static) private(i, k) shared(A, rows)
100
A[k][k]; } } /* we need to copy the lower triangle matrix to the upper triangle */ <LOOP-START>for (k = 0; k < rows; k++) { for (i = k + 1; i < rows; i++) { A[k][i] = A[i][k]; } }<LOOP-END> <OMP-START>#pragma omp parallel for schedule(static) private(i, k) shared(A, rows...
OSGeo/grass/lib/gmath/solvers_direct_cholesky_band.c
#pragma omp parallel for schedule(static) private(j, k, end, sum) \
100
G_fatal_error(_("Decomposition failed at row %i and col %i"), i, 0); T[i][0] = sqrt(sum); <LOOP-START>shared(A, T, i, bandwidth) for (j = 1; j < bandwidth; j++) { sum = A[i][j]; end = ((bandwidth - j) < (i + 1) ? (bandwidth - j) : (i + 1)); for (k = 1; k < end; k++)...
OSGeo/grass/lib/gmath/sparse_matrix.c
#pragma omp parallel for schedule(static) private(i, j)
100
ows) { int i; unsigned int j; double **A = NULL; A = G_alloc_matrix(rows, rows); <LOOP-START>for (i = 0; i < rows; i++) { for (j = 0; j < Asp[i]->cols; j++) { A[i][Asp[i]->index[j]] = Asp[i]->values[j]; } }<LOOP-END> <OMP-START>#pragma omp parallel for schedule(static)...
OSGeo/grass/lib/gmath/sparse_matrix.c
#pragma omp parallel for schedule(static) private(i, j, nonull, count)
100
int nonull, count = 0; G_math_spvector **Asp = NULL; Asp = G_math_alloc_spmatrix(rows); <LOOP-START>for (i = 0; i < rows; i++) { nonull = 0; /*Count the number of non zero entries */ for (j = 0; j < rows; j++) { if (A[i][j] > epsilon) nonull++; } ...
OSGeo/grass/lib/gmath/lu.c
#pragma omp parallel for private(i, j, big, temp) shared(n, a, vv,
100
ector(n); *d = 1.0; /* this pragma works, but doesn't really help speed things up */ /* <LOOP-START>* is_singular) */ for (i = 0; i < n; i++) { big = 0.0; for (j = 0; j < n; j++) if ((temp = fabs(a[i][j])) > big) big = temp; if (big == 0.0) { ...
OSGeo/grass/lib/gmath/lu.c
#pragma omp parallel for private(i, k, sum, dum) shared(j, n, a, vv, big, imax)
100
} big = 0.0; /* not very efficient, but this pragma helps speed things up a bit */ <LOOP-START>for (i = j; i < n; i++) { sum = a[i][j]; for (k = 0; k < j; k++) sum -= a[i][k] * a[k][j]; a[i][j] = sum; if ((dum = vv[i] * fabs(sum)) >= big) ...
OSGeo/grass/lib/gmath/solvers_krylov.c
#pragma omp parallel for schedule(static) private(i, j, sum) \
100
double sum; assert(rows >= 0); Msp = G_math_alloc_spmatrix(rows); if (A != NULL) { <LOOP-START>shared(A, Msp, rows, cols, prec) for (i = 0; i < (unsigned int)rows; i++) { G_math_spvector *spvect = G_math_alloc_spvector(1); switch (prec) { case G_MATH_ROWSCAL...
OSGeo/grass/lib/gmath/solvers_krylov.c
#pragma omp parallel for schedule(static) private(i, j, sum) \
100
cols = 1; ; G_math_add_spvector(Msp, spvect, i); } } else { <LOOP-START>shared(Asp, Msp, rows, cols, prec) for (i = 0; i < (unsigned int)rows; i++) { G_math_spvector *spvect = G_math_alloc_spvector(1); switch (prec) { case G_MATH_ROWSC...
OSGeo/grass/lib/gpde/n_les_assemble.c
#pragma omp parallel for private(i, j, pos, count) schedule(static)
100
_assemble_les_2d: starting the parallel assemble loop"); /* Assemble the matrix in parallel */ <LOOP-START>for (count = 0; count < cell_type_count; count++) { i = index_ij[count][0]; j = index_ij[count][1]; /*create the entries for the */ N_data_star *items = call->callback(data, g...
OSGeo/grass/lib/gpde/n_les_assemble.c
#pragma omp parallel for private(i, j, k, pos, count) schedule(static)
100
} } } G_debug(2, "N_assemble_les_3d: starting the parallel assemble loop"); <LOOP-START>for (count = 0; count < cell_type_count; count++) { i = index_ij[count][0]; j = index_ij[count][1]; k = index_ij[count][2]; /*create the entries for the */ N_data_star...
OSGeo/grass/lib/gpde/test/test_gradient.c
#pragma omp parallel for private(i, j) shared(data)
100
*data; int i, j; data = N_alloc_array_2d(TEST_N_NUM_COLS, TEST_N_NUM_ROWS, 1, CELL_TYPE); <LOOP-START>for (j = 0; j < TEST_N_NUM_ROWS; j++) { for (i = 0; i < TEST_N_NUM_COLS; i++) { N_put_array_2d_c_value(data, i, j, 1); } }<LOOP-END> <OMP-START>#pragma omp parallel for private...
OSGeo/grass/lib/gpde/test/test_gradient.c
#pragma omp parallel for private(i, j) shared(data)
100
data; int i, j; data = N_alloc_array_2d(TEST_N_NUM_COLS, TEST_N_NUM_ROWS, 1, DCELL_TYPE); <LOOP-START>for (j = 0; j < TEST_N_NUM_ROWS; j++) { for (i = 0; i < TEST_N_NUM_COLS; i++) { N_put_array_2d_d_value(data, i, j, (double)i * j); } }<LOOP-END> <OMP-START>#pragma omp parallel...
OSGeo/grass/lib/gpde/test/test_gradient.c
#pragma omp parallel for private(i, j, k) shared(data)
100
d(TEST_N_NUM_COLS, TEST_N_NUM_ROWS, TEST_N_NUM_DEPTHS, 1, FCELL_TYPE); <LOOP-START>for (k = 0; k < TEST_N_NUM_DEPTHS; k++) { for (j = 0; j < TEST_N_NUM_ROWS; j++) { for (i = 0; i < TEST_N_NUM_COLS; i++) { N_put_array_3d_f_value(data, i, j, k, 1.0); ...
OSGeo/grass/lib/gpde/test/test_gradient.c
#pragma omp parallel for private(i, j, k) shared(data)
100
d(TEST_N_NUM_COLS, TEST_N_NUM_ROWS, TEST_N_NUM_DEPTHS, 1, DCELL_TYPE); <LOOP-START>for (k = 0; k < TEST_N_NUM_DEPTHS; k++) for (j = 0; j < TEST_N_NUM_ROWS; j++) { for (i = 0; i < TEST_N_NUM_COLS; i++) { N_put_array_3d_f_value(data, i, j, k, (float)i * j *...
OSGeo/grass/lib/gpde/test/test_gwflow.c
#pragma omp parallel for private(i, j, k) shared(data)
100
OLS_LOCAL, TEST_N_NUM_ROWS_LOCAL, TEST_N_NUM_DEPTHS_LOCAL, 1, 1); <LOOP-START>for (k = 0; k < TEST_N_NUM_DEPTHS_LOCAL; k++) for (j = 0; j < TEST_N_NUM_ROWS_LOCAL; j++) { for (i = 0; i < TEST_N_NUM_COLS_LOCAL; i++) { if (j == 0) { ...
OSGeo/grass/lib/gpde/test/test_gwflow.c
#pragma omp parallel for private(i, j) shared(data)
100
wflow_data2d(TEST_N_NUM_COLS_LOCAL, TEST_N_NUM_ROWS_LOCAL, 1, 1); <LOOP-START>for (j = 0; j < TEST_N_NUM_ROWS_LOCAL; j++) { for (i = 0; i < TEST_N_NUM_COLS_LOCAL; i++) { if (j == 0) { N_put_array_2d_d_value(data->phead, i, j, 50); N_...
OSGeo/grass/lib/gpde/test/test_arrays.c
#pragma omp parallel for private(i, j) shared(cols, rows, type, a) \
100
int i, j, res = 0; rows = a->rows; cols = a->cols; type = N_get_array_2d_type(a); <LOOP-START>reduction(+ : res) for (j = 0; j < rows; j++) { for (i = 0; i < cols; i++) { if (type == CELL_TYPE) { N_put_array_2d_c_value(a, i, j, (CELL)i * (CELL)j); ...
OSGeo/grass/lib/gpde/test/test_arrays.c
#pragma omp parallel for private(i, j) shared(rows, cols, a) reduction(+ : res)
100
array_2d *a) { int rows, cols; int i, j, res = 0; cols = a->cols; rows = a->rows; <LOOP-START>for (j = 0; j < rows; j++) { for (i = 0; i < cols; i++) { N_put_array_2d_value_null(a, i, j); if (!N_is_array_2d_value_null(a, i, j)) res++; } }<LOO...
OSGeo/grass/lib/gpde/test/test_arrays.c
#pragma omp parallel for private(i, j) shared(cols, rows, type, a, b) \
100
int i, j, res = 0; cols = a->cols; rows = a->rows; type = N_get_array_2d_type(a); <LOOP-START>reduction(+ : res) for (j = 0; j < rows; j++) { for (i = 0; i < cols; i++) { if (type == CELL_TYPE) { if (N_get_array_2d_c_value(a, i, j) != N_get_a...
OSGeo/grass/lib/gpde/test/test_arrays.c
#pragma omp parallel for private(i, j, k) shared(depths, rows, cols, type, a) \
100
cols = a->cols; rows = a->rows; depths = a->depths; type = N_get_array_3d_type(a); <LOOP-START>reduction(+ : res) for (k = 0; k < depths; k++) { for (j = 0; j < rows; j++) { for (i = 0; i < cols; i++) { if (type == FCELL_TYPE) { N_put_array_3d...
OSGeo/grass/lib/gpde/test/test_arrays.c
#pragma omp parallel for private(i, j, k) shared(cols, rows, depths, type, a) \
100
cols = a->cols; rows = a->rows; depths = a->depths; type = N_get_array_3d_type(a); <LOOP-START>reduction(+ : res) for (k = 0; k < depths; k++) { for (j = 0; j < rows; j++) { for (i = 0; i < cols; i++) { N_put_array_3d_value_null(a, i, j, k); if (!...
OSGeo/grass/lib/gpde/test/test_arrays.c
#pragma omp parallel for private(i, j, k) \
100
rows = a->rows; cols = a->cols; depths = a->depths; type = N_get_array_3d_type(a); <LOOP-START>shared(depths, rows, cols, type, a, b) reduction(+ : res) for (k = 0; k < depths; k++) { for (i = 0; i < rows; i++) { for (j = 0; j < cols; j++) { if (type == FCELL_TYP...
OSGeo/grass/lib/gpde/test/test_solute_transport.c
#pragma omp parallel for private(i, j, k) shared(data)
100
_transport_data3d( TEST_N_NUM_COLS_LOCAL, TEST_N_NUM_ROWS_LOCAL, TEST_N_NUM_DEPTHS_LOCAL); <LOOP-START>for (k = 0; k < TEST_N_NUM_DEPTHS_LOCAL; k++) for (j = 0; j < TEST_N_NUM_ROWS_LOCAL; j++) { for (i = 0; i < TEST_N_NUM_COLS_LOCAL; i++) { if (j == 0) { ...
OSGeo/grass/lib/gpde/test/test_solute_transport.c
#pragma omp parallel for private(i, j) shared(data)
100
t_data2d(TEST_N_NUM_COLS_LOCAL, TEST_N_NUM_ROWS_LOCAL); <LOOP-START>for (j = 0; j < TEST_N_NUM_ROWS_LOCAL; j++) { for (i = 0; i < TEST_N_NUM_COLS_LOCAL; i++) { if (j == 0) { N_put_array_2d_d_value(data->c, i, j, 0); N_put_a...
OSGeo/grass/lib/gpde/test/test_assemble.c
#pragma omp parallel for private(i, j) shared(data)
100
*data; int i, j; data = N_alloc_array_2d(TEST_N_NUM_COLS, TEST_N_NUM_ROWS, 1, CELL_TYPE); <LOOP-START>for (j = 0; j < TEST_N_NUM_ROWS; j++) { for (i = 0; i < TEST_N_NUM_COLS; i++) { if (j == 1) { N_put_array_2d_c_value(data, i, j, 2); } else { ...
OSGeo/grass/lib/gpde/test/test_assemble.c
#pragma omp parallel for private(i, j) shared(data)
100
data; int i, j; data = N_alloc_array_2d(TEST_N_NUM_COLS, TEST_N_NUM_ROWS, 1, DCELL_TYPE); <LOOP-START>for (j = 0; j < TEST_N_NUM_ROWS; j++) { for (i = 0; i < TEST_N_NUM_COLS; i++) { if (j == 1) { N_put_array_2d_d_value(data, i, j, 50); } else { ...
OSGeo/grass/lib/gpde/test/test_assemble.c
#pragma omp parallel for private(i, j, k) shared(data)
100
d(TEST_N_NUM_COLS, TEST_N_NUM_ROWS, TEST_N_NUM_DEPTHS, 1, FCELL_TYPE); <LOOP-START>for (k = 0; k < TEST_N_NUM_DEPTHS; k++) for (j = 0; j < TEST_N_NUM_ROWS; j++) { for (i = 0; i < TEST_N_NUM_COLS; i++) { if (i == 0 && j == 1) { N_put_a...
OSGeo/grass/lib/gpde/test/test_assemble.c
#pragma omp parallel for private(i, j, k) shared(data)
100
d(TEST_N_NUM_COLS, TEST_N_NUM_ROWS, TEST_N_NUM_DEPTHS, 1, DCELL_TYPE); <LOOP-START>for (k = 0; k < TEST_N_NUM_DEPTHS; k++) for (j = 0; j < TEST_N_NUM_ROWS; j++) { for (i = 0; i < TEST_N_NUM_COLS; i++) { if (i == 0 && j == 1) { N_put_a...
OSGeo/grass/lib/gpde/test/test_les.c
#pragma omp parallel for private(i, j) shared(les)
100
lloc_les(TEST_N_NUM_ROWS, N_SPARSE_LES); G_message("\t * testing les creation in parallel\n"); <LOOP-START>for (i = 0; i < TEST_N_NUM_ROWS; i++) { for (j = 0; j < TEST_N_NUM_ROWS; j++) { if (i != j) les->A[i][j] = 2e-2; les->A[i][i] = -1e2 - i; } les-...
OSGeo/grass/lib/gpde/test/test_les.c
#pragma omp parallel for private(i, j) shared(sples, spvector)
100
->A[i][i] = -1e2 - i; } les->x[i] = 273.15 + i; les->b[i] = 1e2 - i; } <LOOP-START>for (i = 0; i < TEST_N_NUM_ROWS; i++) { spvector = G_math_alloc_spvector(TEST_N_NUM_ROWS); for (j = 0; j < TEST_N_NUM_ROWS; j++) if (i != j) spvector->index[j] = 2...
Bayons/OpenMP/Practica/energy_v2.c
#pragma omp parallel for shared(layer)
100
tf(stderr,"Error: Allocating the layer memory\n"); exit( EXIT_FAILURE ); } } #pragma omp barrier <LOOP-START>for( k=0; k<layer_size; k++ ) layer[k] = 0.0f; #pragma omp parallel for shared(layer_copy) for( k=0; k<layer_size; k++ ) layer_copy[k] = 0.0f; }/* fin del pragma omp */ /* 4. Fase de bombardeos */ for( i=...
Bayons/OpenMP/Practica/energy_v2.c
#pragma omp parallel for shared(layer_copy)
100
omp barrier #pragma omp parallel for shared(layer) for( k=0; k<layer_size; k++ ) layer[k] = 0.0f; <LOOP-START>for( k=0; k<layer_size; k++ ) layer_copy[k] = 0.0f; }/* fin del pragma omp */ /* 4. Fase de bombardeos */ for( i=0; i<num_storms; i++) { /* 4.1. Suma energia de impactos */ /* Para cada particula */ ...
Bayons/OpenMP/Practica/energy_v2.c
#pragma omp parallel for shared(layer, layer_copy)
100
/* 4.2. Relajacion entre tormentas de particulas */ /* 4.2.1. Copiar valores a capa auxiliar */ //<LOOP-START>for( k=0; k<layer_size; k++ ) layer_copy[k] = layer[k]; /* 4.2.2. Actualizar capa, menos los extremos, usando valores del array auxiliar */ //#pragma omp parallel for shared(layer, layer_copy) reductio...
Bayons/OpenMP/Practica/energy_v2.c
#pragma omp parallel for shared(layer, layer_copy) reduction(/:division)
100
layer[k]; /* 4.2.2. Actualizar capa, menos los extremos, usando valores del array auxiliar */ //<LOOP-START>for( k=1; k<layer_size-1; k++ ){ layer[k] = ( layer_copy[k-1] + layer_copy[k] + layer_copy[k+1] ) / 3; }<LOOP-END> <OMP-START>#pragma omp parallel for shared(layer, layer_copy) reduction(/:division)<OMP-...
cdwdirect/sos_flow/examples/lulesh/lulesh.cc
#pragma omp parallel for firstprivate(numElem)
100
Index_t numElem) { // // pull in the stresses appropriate to the hydro integration // <LOOP-START>for (Index_t i = 0 ; i < numElem ; ++i){ sigxx[i] = sigyy[i] = sigzz[i] = - domain.p(i) - domain.q(i) ; }<LOOP-END> <OMP-START>#pragma omp parallel for firstprivate(numElem)<OMP-END>
cdwdirect/sos_flow/examples/lulesh/lulesh.cc
#pragma omp parallel for firstprivate(numElem)
100
_t>(numElem8) ; fz_elem = Allocate<Real_t>(numElem8) ; } // loop over all elements <LOOP-START>for( Index_t k=0 ; k<numElem ; ++k ) { const Index_t* const elemToNode = domain.nodelist(k); Real_t B[3][8] ;// shape function derivatives Real_t x_local[8] ; Real_t y_loca...
cdwdirect/sos_flow/examples/lulesh/lulesh.cc
#pragma omp parallel for firstprivate(numNode)
100
need to copy the data out of the temporary // arrays used above into the final forces field <LOOP-START>for( Index_t gnode=0 ; gnode<numNode ; ++gnode ) { Index_t count = domain.nodeElemCount(gnode) ; Index_t *cornerList = domain.nodeElemCornerList(gnode) ; Real_t fx_...
cdwdirect/sos_flow/examples/lulesh/lulesh.cc
#pragma omp parallel for firstprivate(numElem, hourg)
100
/*************************************************/ /* compute the hourglass modes */ <LOOP-START>for(Index_t i2=0;i2<numElem;++i2){ Real_t *fx_local, *fy_local, *fz_local ; Real_t hgfx[8], hgfy[8], hgfz[8] ; Real_t coefficient; Real_t hourgam[8][4]; Real_t xd1[8]...
cdwdirect/sos_flow/examples/lulesh/lulesh.cc
#pragma omp parallel for firstprivate(numNode)
100
if (numthreads > 1) { // Collect the data from the local arrays into the final force arrays <LOOP-START>for( Index_t gnode=0 ; gnode<numNode ; ++gnode ) { Index_t count = domain.nodeElemCount(gnode) ; Index_t *cornerList = domain.nodeElemCornerList(gnode) ; Real_t fx_...
cdwdirect/sos_flow/examples/lulesh/lulesh.cc
#pragma omp parallel for firstprivate(numElem)
100
_t>(numElem8) ; Real_t *z8n = Allocate<Real_t>(numElem8) ; /* start loop over elements */ <LOOP-START>for (Index_t i=0 ; i<numElem ; ++i){ Real_t x1[8], y1[8], z1[8] ; Real_t pfx[8], pfy[8], pfz[8] ; Index_t* elemToNode = domain.nodelist(i); CollectDomainNodesToElemNodes(do...
cdwdirect/sos_flow/examples/lulesh/lulesh.cc
#pragma omp parallel for firstprivate(numElem)
100
determ, numElem, domain.numNode()) ; // check for negative element volume <LOOP-START>for ( Index_t k=0 ; k<numElem ; ++k ) { if (determ[k] <= Real_t(0.0)) { #if USE_MPI MPI_Abort(MPI_COMM_WORLD, VolumeError) ; #else exit(VolumeError); ...
cdwdirect/sos_flow/examples/lulesh/lulesh.cc
#pragma omp parallel for firstprivate(numNode)
100
domain.sizeX() + 1, domain.sizeY() + 1, domain.sizeZ() + 1, true, false) ; #endif <LOOP-START>for (Index_t i=0; i<numNode; ++i) { domain.fx(i) = Real_t(0.0) ; domain.fy(i) = Real_t(0.0) ; domain.fz(i) = Real_t(0.0) ; }<LOOP-END> <OMP-START>#pragma omp parallel for firstpriv...
cdwdirect/sos_flow/examples/lulesh/lulesh.cc
#pragma omp parallel for firstprivate(numNode)
100
*************/ static inline void CalcAccelerationForNodes(Domain &domain, Index_t numNode) { <LOOP-START>for (Index_t i = 0; i < numNode; ++i) { domain.xdd(i) = domain.fx(i) / domain.nodalMass(i); domain.ydd(i) = domain.fy(i) / domain.nodalMass(i); domain.zdd(i) = domain.fz(i) / domain.no...
cdwdirect/sos_flow/examples/lulesh/lulesh.cc
#pragma omp parallel for firstprivate(numNode)
100
lcVelocityForNodes(Domain &domain, const Real_t dt, const Real_t u_cut, Index_t numNode) { <LOOP-START>for ( Index_t i = 0 ; i < numNode ; ++i ) { Real_t xdtmp, ydtmp, zdtmp ; xdtmp = domain.xd(i) + domain.xdd(i) * dt ; if( FABS(xdtmp) < u_cut ) xdtmp = Real_t(0.0); domain....
cdwdirect/sos_flow/examples/lulesh/lulesh.cc
#pragma omp parallel for firstprivate(numNode)
100
*/ static inline void CalcPositionForNodes(Domain &domain, const Real_t dt, Index_t numNode) { <LOOP-START>for ( Index_t i = 0 ; i < numNode ; ++i ) { domain.x(i) += domain.xd(i) * dt ; domain.y(i) += domain.yd(i) * dt ; domain.z(i) += domain.zd(i) * dt ; }<LOOP-END> <OMP-START>#pra...
cdwdirect/sos_flow/examples/lulesh/lulesh.cc
#pragma omp parallel for firstprivate(numElem, deltaTime)
100
domain, Real_t *vnew, Real_t deltaTime, Index_t numElem ) { // loop over all elements <LOOP-START>for( Index_t k=0 ; k<numElem ; ++k ) { Real_t B[3][8] ; /** shape function derivatives */ Real_t D[6] ; Real_t x_local[8] ; Real_t y_local[8] ; Real_t z_local[8] ; ...
cdwdirect/sos_flow/examples/lulesh/lulesh.cc
#pragma omp parallel for firstprivate(numElem)
100
ltatime, numElem) ; // element loop to do some stuff not included in the elemlib function. <LOOP-START>for ( Index_t k=0 ; k<numElem ; ++k ) { // calc strain rate and apply as constraint (only done in FB element) Real_t vdov = domain.dxx(k) + domain.dyy(k) + domain.dzz(k) ; ...
cdwdirect/sos_flow/examples/lulesh/lulesh.cc
#pragma omp parallel for firstprivate(numElem)
100
notonicQGradientsForElems(Domain& domain, Real_t vnew[]) { Index_t numElem = domain.numElem(); <LOOP-START>for (Index_t i = 0 ; i < numElem ; ++i ) { const Real_t ptiny = Real_t(1.e-36) ; Real_t ax,ay,az ; Real_t dxv,dyv,dzv ; const Index_t *elemToNode = domain.nodelist(i); ...
cdwdirect/sos_flow/examples/lulesh/lulesh.cc
#pragma omp parallel for firstprivate(qlc_monoq, qqc_monoq, monoq_limiter_mult, monoq_max_slope, ptiny)
100
max_slope(); Real_t qlc_monoq = domain.qlc_monoq(); Real_t qqc_monoq = domain.qqc_monoq(); <LOOP-START>for ( Index_t ielem = 0 ; ielem < domain.regElemSize(r); ++ielem ) { Index_t i = domain.regElemlist(r,ielem); Real_t qlin, qquad ; Real_t phixi, phieta, phizeta ; Int_t bcMask ...
cdwdirect/sos_flow/examples/lulesh/lulesh.cc
#pragma omp parallel for firstprivate(length)
100
Real_t pmin, Real_t p_cut, Real_t eosvmax, Index_t length, Index_t *regElemList) { <LOOP-START>for (Index_t i = 0; i < length ; ++i) { Real_t c1s = Real_t(2.0)/Real_t(3.0) ; bvc[i] = c1s * (compression[i] + Real_t(1.)); pbvc[i] = c1s; }<LOOP-END> <OMP-START>#pragma omp paral...
cdwdirect/sos_flow/examples/lulesh/lulesh.cc
#pragma omp parallel for firstprivate(length, pmin, p_cut, eosvmax)
100
)/Real_t(3.0) ; bvc[i] = c1s * (compression[i] + Real_t(1.)); pbvc[i] = c1s; } <LOOP-START>for (Index_t i = 0 ; i < length ; ++i){ Index_t elem = regElemList[i]; p_new[i] = bvc[i] * e_old[i] ; if (FABS(p_new[i]) < p_cut ) p_new[i] = Real_t(0.0) ; ...
cdwdirect/sos_flow/examples/lulesh/lulesh.cc
#pragma omp parallel for firstprivate(length, emin)
100
Index_t length, Index_t *regElemList) { Real_t *pHalfStep = Allocate<Real_t>(length) ; <LOOP-START>for (Index_t i = 0 ; i < length ; ++i) { e_new[i] = e_old[i] - Real_t(0.5) * delvc[i] * (p_old[i] + q_old[i]) + Real_t(0.5) * work[i]; if (e_new[i] < emin ) { e_new[i...
cdwdirect/sos_flow/examples/lulesh/lulesh.cc
#pragma omp parallel for firstprivate(length, rho0)
100
ep, bvc, pbvc, e_new, compHalfStep, vnewc, pmin, p_cut, eosvmax, length, regElemList); <LOOP-START>for (Index_t i = 0 ; i < length ; ++i) { Real_t vhalf = Real_t(1.) / (Real_t(1.) + compHalfStep[i]) ; if ( delvc[i] > Real_t(0.) ) { q_new[i] /* = qq_old[i] = ql_old[i] */ = Real_...
cdwdirect/sos_flow/examples/lulesh/lulesh.cc
#pragma omp parallel for firstprivate(length, emin, e_cut)
100
.0)*(p_old[i] + q_old[i]) - Real_t(4.0)*(pHalfStep[i] + q_new[i])) ; } <LOOP-START>for (Index_t i = 0 ; i < length ; ++i) { e_new[i] += Real_t(0.5) * work[i]; if (FABS(e_new[i]) < e_cut) { e_new[i] = Real_t(0.) ; } if ( e_new[i] < emin ) {...
cdwdirect/sos_flow/examples/lulesh/lulesh.cc
#pragma omp parallel for firstprivate(length, rho0, emin, e_cut)
100
new, bvc, pbvc, e_new, compression, vnewc, pmin, p_cut, eosvmax, length, regElemList); <LOOP-START>for (Index_t i = 0 ; i < length ; ++i){ const Real_t sixth = Real_t(1.0) / Real_t(6.0) ; Index_t elem = regElemList[i]; Real_t q_tilde ; if (delvc[i] > Real_t(0.)) { ...
cdwdirect/sos_flow/examples/lulesh/lulesh.cc
#pragma omp parallel for firstprivate(length, rho0, q_cut)
100
new, bvc, pbvc, e_new, compression, vnewc, pmin, p_cut, eosvmax, length, regElemList); <LOOP-START>for (Index_t i = 0 ; i < length ; ++i){ Index_t elem = regElemList[i]; if ( delvc[i] <= Real_t(0.) ) { Real_t ssc = ( pbvc[i] * e_new[i] + vnewc[elem] * vnewc[...
cdwdirect/sos_flow/examples/lulesh/lulesh.cc
#pragma omp parallel for firstprivate(rho0, ss4o3)
100
newc, Real_t *pbvc, Real_t *bvc, Real_t ss4o3, Index_t len, Index_t *regElemList) { <LOOP-START>for (Index_t i = 0; i < len ; ++i) { Index_t elem = regElemList[i]; Real_t ssTmp = (pbvc[i] * enewc[i] + vnewc[elem] * vnewc[elem] * bvc[i] * pnewc[i]) / rho0; if (ssTm...
cdwdirect/sos_flow/examples/lulesh/lulesh.cc
#pragma omp parallel for firstprivate(numElemReg)
100
in, qq_old, ql_old, rho0, eosvmax, numElemReg, regElemList); } <LOOP-START>for (Index_t i=0; i<numElemReg; ++i) { Index_t elem = regElemList[i]; domain.p(elem) = p_new[i] ; domain.e(elem) = e_new[i] ; domain.q(elem) = q_new[i] ; }<LOOP-END> <OMP-S...
cdwdirect/sos_flow/examples/lulesh/lulesh.cc
#pragma omp parallel for firstprivate(length, v_cut)
100
rElems(Domain &domain, Real_t *vnew, Real_t v_cut, Index_t length) { if (length != 0) { <LOOP-START>for(Index_t i=0 ; i<length ; ++i) { Real_t tmpV = vnew[i] ; if ( FABS(tmpV - Real_t(1.0)) < v_cut ) tmpV = Real_t(1.0) ; domain.v(i) = tmpV ; }<LO...
mishal23/parallel-programming-openmp/daxpy-loop.cpp
#pragma omp parallel for shared(x,y) private(i)
100
) { int i; omp_set_num_threads(number_of_threads); int n_per_thread = SIZE/number_of_threads; <LOOP-START>for(i=0;i<SIZE;i++) { x[i]=a*x[i]+y[i]; }<LOOP-END> <OMP-START>#pragma omp parallel for shared(x,y) private(i)<OMP-END>
mishal23/parallel-programming-openmp/matrix-multiplication.cpp
#pragma omp parallel for shared(a,b) private(i,j,k)
100
) { for(j=0; j<SIZE; j++) { b[i][j] = rand()%1000; } } } void parallel() { int i,j,k; <LOOP-START>for(i=0; i<SIZE; i++) { for(j=0; j<SIZE; j++) { res[i][j] = 0; for(k=0; k<SIZE; k++) { res[i][j] += a[i][k]*b[k][j]; } } }<LOOP-END> <OMP-START>#pragma omp parallel for share...
mishal23/parallel-programming-openmp/value-of-pi/value-of-pie-thread-safe.cpp
#pragma omp parallel for private(x, y, i) reduction(+:count)
100
return pi; } double parallel(int n) { int i, count=0; double x,y,pi; count = 0; <LOOP-START>for(i=0; i<n; i++) { x = drandom(); y = drandom(); if((x*x + y*y) <= 1) { count++; } }<LOOP-END> <OMP-START>#pragma...
mishal23/parallel-programming-openmp/value-of-pi/value-of-pie-random-generator.cpp
#pragma omp parallel for private(x, y, i) reduction(+:count)
100
double x,y,pi; count = 0; // removes synchronization issue - hence reduction clause <LOOP-START>for(i=0; i<n; i++) { x = (double)rand_double(1.0); y = (double)rand_double(1.0); if((x*x + y*y) <= 1) { count++; } ...
ainsleyrutterford/HPC-OpenCL/openmp_d2q9-bgk.c
#pragma omp parallel for
100
4.f / 9.f; float w1 = params->density / 9.f; float w2 = params->density / 36.f; <LOOP-START>for (int jj = 0; jj < params->ny; jj++) { for (int ii = 0; ii < params->nx; ii++) { // centre (*cells_ptr)[(0 * params->ny * params->nx) + (ii + jj*params->nx)] = w0; // axis directions ...
ainsleyrutterford/HPC-OpenCL/openmp_d2q9-bgk.c
#pragma omp parallel for
100
x) + (ii + jj*params->nx)] = w2; } } /* first set all cells in obstacle array to zero */ <LOOP-START>for (int jj = 0; jj < params->ny; jj++) { for (int ii = 0; ii < params->nx; ii++) { (*obstacles_ptr)[ii + jj*params->nx] = 0; } }<LOOP-END> <OMP-START>#pragma omp parallel for<OMP-END>
Alfons0329/Parallel_Programming_Fall_2018/Final Project/gaussian_blur_omp.cpp
#pragma omp parallel for
100
bstr(0, inputfile_name.size() - 4)+ "_blur_omp.bmp"; for (int j = 0; j < img_height; j++) { <LOOP-START>for (int i = 0; i < img_width; i++) { pic_out[3 * (j * img_width + i) + MYRED] = gaussian_filter(i, j, MYRED, resolution); pic_out[3 * (j * img_width + i) + MYGREEN] = gaussian_filter(i, j, MYGREEN...
Alfons0329/Parallel_Programming_Fall_2018/HW2/CG/cg.c
#pragma omp parallel for
100
r Class; logical verified; double zeta_verify_value, epsilon, err; char *t_names[T_last]; <LOOP-START>for (i = 0; i < T_last; i++) { timer_clear(i); }<LOOP-END> <OMP-START>#pragma omp parallel for<OMP-END>
Alfons0329/Parallel_Programming_Fall_2018/HW2/CG/cg.c
#pragma omp parallel for reduction(-:colidx)
100
n, no speed up #pragma omp for for (j = 0; j < lastrow - firstrow + 1; j++) { // <LOOP-START>for (k = rowstr[j]; k < rowstr[j+1]; k+=3) //try loop unrolling { colidx[k] = colidx[k] - firstcol; colidx[k + 1] = colidx[k + 1] - firstcol; colidx[k + 2] = colidx[k + 2] - firs...
Alfons0329/Parallel_Programming_Fall_2018/HW2/CG/cg.c
#pragma omp parallel for
100
ze z to obtain x //--------------------------------------------------------------------- // <LOOP-START>for (j = 0; j < lastcol - firstcol + 1; j++) { x[j] = norm_temp2 * z[j]; }<LOOP-END> <OMP-START>#pragma omp parallel for<OMP-END>
Alfons0329/Parallel_Programming_Fall_2018/HW2/CG/cg.c
#pragma omp parallel for
100
ector to (1, 1, .... 1) //--------------------------------------------------------------------- <LOOP-START>for (i = 0; i < NA+1; i++) { x[i] = 1.0; }<LOOP-END> <OMP-START>#pragma omp parallel for<OMP-END>
Alfons0329/Parallel_Programming_Fall_2018/HW2/CG/cg.c
#pragma omp parallel for reduction(+:norm_temp1, norm_temp2) private(j)
100
// tried the following two pragma, but seems not speed up with these two, inner for bad idea // <LOOP-START>// #pragma omp parallel for reduction(+:norm_temp1, norm_temp2) for (j = 0; j < lastcol - firstcol + 1; j++) { norm_temp1 = norm_temp1 + x[j]*z[j]; norm_temp2 = norm_temp2 + z[j]*z[j]; }<L...
Alfons0329/Parallel_Programming_Fall_2018/HW2/CG/cg.c
#pragma omp parallel for reduction(+:norm_temp1, norm_temp2)
100
for bad idea // #pragma omp parallel for reduction(+:norm_temp1, norm_temp2) private(j) // <LOOP-START>for (j = 0; j < lastcol - firstcol + 1; j++) { norm_temp1 = norm_temp1 + x[j]*z[j]; norm_temp2 = norm_temp2 + z[j]*z[j]; }<LOOP-END> <OMP-START>#pragma omp parallel for reduction(+:norm_temp1,...
Alfons0329/Parallel_Programming_Fall_2018/HW2/CG/cg.c
#pragma omp parallel for, no speed up, inner for bad idea
100
ze z to obtain x //--------------------------------------------------------------------- // <LOOP-START>for (j = 0; j < lastcol - firstcol + 1; j++) { x[j] = norm_temp2 * z[j]; }<LOOP-END> <OMP-START>#pragma omp parallel for, no speed up, inner for bad idea<OMP-END>
Alfons0329/Parallel_Programming_Fall_2018/HW2/CG/cg.c
#pragma omp parallel for
100
significantly faster // on the Cray t3d - overall speed of code is 1.5 times faster. <LOOP-START>for (j = 0; j < lastrow - firstrow + 1; j++) { sum = 0.0; //#pragma omp parallel for reduction(+:sum) // no speed up, inner loop for bad idea(too mych overhead) for (k = rowstr[j]; k < rows...
Alfons0329/Parallel_Programming_Fall_2018/HW2/CG/cg.c
#pragma omp parallel for reduction(+:sum)
100
neck?)!!!!!************ for (j = 0; j < lastrow - firstrow + 1; j++) { sum = 0.0; //<LOOP-START>for (k = rowstr[j]; k < rowstr[j+1]; k++) { sum = sum + a[k]*p[colidx[k]]; }<LOOP-END> <OMP-START>#pragma omp parallel for reduction(+:sum) <OMP-END>
Alfons0329/Parallel_Programming_Fall_2018/HW2/CG/cg.c
#pragma omp parallel for reduction(+:d)
100
in p.q //--------------------------------------------------------------------- d = 0.0; <LOOP-START>for (j = 0; j < lastcol - firstcol + 1; j++) { d = d + p[j]*q[j]; }<LOOP-END> <OMP-START>#pragma omp parallel for reduction(+:d)<OMP-END>
Alfons0329/Parallel_Programming_Fall_2018/HW2/CG/cg.c
#pragma omp parallel for reduction(+:rho)
100
ha*q //--------------------------------------------------------------------- rho = 0.0; <LOOP-START>for (j = 0; j < lastcol - firstcol + 1; j++) { z[j] = z[j] + alpha*p[j]; r[j] = r[j] - alpha*q[j]; rho = rho + r[j]*r[j]; }<LOOP-END> <OMP-START>#pragma omp parallel for reduction(+:rh...
Alfons0329/Parallel_Programming_Fall_2018/HW2/CG/cg.c
#pragma omp parallel for
100
// p = r + beta*p //--------------------------------------------------------------------- <LOOP-START>for (j = 0; j < lastcol - firstcol + 1; j++) { p[j] = r[j] + beta*p[j]; }<LOOP-END> <OMP-START>#pragma omp parallel for<OMP-END>
Alfons0329/Parallel_Programming_Fall_2018/HW2/CG/cg.c
#pragma omp parallel for
100
or the use in sparse. //--------------------------------------------------------------------- //<LOOP-START>for (iouter = 0; iouter < n; iouter++) { nzv = NONZER; sprnvc(n, nzv, nn1, vc, ivc); vecset(n, vc, ivc, &nzv, iouter+1, 0.5); arow[iouter] = nzv; #pragma omp parallel for for (ivelt = ...
Alfons0329/Parallel_Programming_Fall_2018/HW2/CG/cg.c
#pragma omp parallel for
100
nvc(n, nzv, nn1, vc, ivc); vecset(n, vc, ivc, &nzv, iouter+1, 0.5); arow[iouter] = nzv; <LOOP-START>for (ivelt = 0; ivelt < nzv; ivelt++) { acol[iouter][ivelt] = ivc[ivelt] - 1; aelt[iouter][ivelt] = vc[ivelt]; }<LOOP-END> <OMP-START>#pragma omp parallel for<OMP-END>
Alfons0329/Parallel_Programming_Fall_2018/HW2/CG/cg.c
#pragma omp parallel for
100
f triples in each row //--------------------------------------------------------------------- //<LOOP-START>for (j = 0; j < nrows+1; j++) { rowstr[j] = 0; }<LOOP-END> <OMP-START>#pragma omp parallel for<OMP-END>
Alfons0329/Parallel_Programming_Fall_2018/HW2/CG/cg.c
#pragma omp parallel for
100
-------- //#pragma omp parallel for for (j = 0; j < nrows+1; j++) { rowstr[j] = 0; } //<LOOP-START>for (i = 0; i < n; i++) { for (nza = 0; nza < arow[i]; nza++) { j = acol[i][nza] + 1; rowstr[j] = rowstr[j] + arow[i]; } }<LOOP-END> <OMP-START>#pragma omp parallel for<OMP-END>
Alfons0329/Parallel_Programming_Fall_2018/HW2/CG/cg.c
#pragma omp parallel for
100
j = acol[i][nza] + 1; rowstr[j] = rowstr[j] + arow[i]; } } rowstr[0] = 0; //<LOOP-START>for (j = 1; j < nrows+1; j++) { rowstr[j] = rowstr[j] + rowstr[j-1]; }<LOOP-END> <OMP-START>#pragma omp parallel for<OMP-END>
Alfons0329/Parallel_Programming_Fall_2018/HW2/CG/cg.c
#pragma omp parallel for
100
.. preload data pages //--------------------------------------------------------------------- //<LOOP-START>for (j = 0; j < nrows; j++) { for (k = rowstr[j]; k < rowstr[j+1]; k++) { a[k] = 0.0; colidx[k] = -1; } nzloc[j] = 0; }<LOOP-END> <OMP-START>#pragma omp parallel for<OMP-END>
Alfons0329/Parallel_Programming_Fall_2018/HW2/CG/cg.c
#pragma omp parallel for, no speed up *************VERIFICATION FAILED WITH THIS ONE*********************
100
------------------------------------- size = 1.0; ratio = pow(rcond, (1.0 / (double)(n))); //<LOOP-START>for (i = 0; i < n; i++) { for (nza = 0; nza < arow[i]; nza++) { j = acol[i][nza]; scale = size * aelt[i][nza]; for (nzrow = 0; nzrow < arow[i]; nzrow++) { jcol = acol[i][nzrow];...
End of preview. Expand in Data Studio

No dataset card yet

Downloads last month
12