MP-Gadget  5.0.1.dev1-76bc7d4726-dirty
Macros | Functions
system.h File Reference
#include <stdint.h>
#include <mpi.h>
#include <stdarg.h>
Include dependency graph for system.h:
This graph shows which files directly or indirectly include this file:

Go to the source code of this file.

Macros

#define MPI_UINT64   MPI_UNSIGNED_LONG
 
#define MPI_INT64   MPI_LONG
 
#define MPIU_Barrier(comm)   _MPIU_Barrier(__FILE__, __LINE__, comm)
 

Functions

int cluster_get_num_hosts (void)
 
double get_physmem_bytes (void)
 
double get_random_number (uint64_t id)
 
void set_random_numbers (int seed)
 
void sumup_large_ints (int n, int *src, int64_t *res)
 
void sumup_longs (int n, int64_t *src, int64_t *res)
 
int64_t count_sum (int64_t countLocal)
 
int MPIU_Any (int condition, MPI_Comm comm)
 
void MPIU_write_pids (char *filename)
 
size_t gadget_compact_thread_arrays (int *dest, int *srcs[], size_t sizes[], int narrays)
 
void gadget_setup_thread_arrays (int *dest, int *srcs[], size_t sizes[], size_t total_size, int narrays)
 
int MPI_Alltoallv_smart (void *sendbuf, int *sendcnts, int *sdispls, MPI_Datatype sendtype, void *recvbuf, int *recvcnts, int *rdispls, MPI_Datatype recvtype, MPI_Comm comm)
 
int MPI_Alltoallv_sparse (void *sendbuf, int *sendcnts, int *sdispls, MPI_Datatype sendtype, void *recvbuf, int *recvcnts, int *rdispls, MPI_Datatype recvtype, MPI_Comm comm)
 
double timediff (double t0, double t1)
 
double second (void)
 
size_t sizemax (size_t a, size_t b)
 
static int64_t atomic_fetch_and_add_64 (int64_t *ptr, int64_t value)
 
static int atomic_fetch_and_add (int *ptr, int value)
 
static int atomic_add_and_fetch (int *ptr, int value)
 
void MPIU_Trace (MPI_Comm comm, int where, const char *fmt,...)
 
void MPIU_Tracev (MPI_Comm comm, int where, int error, const char *fmt, va_list va)
 
int _MPIU_Barrier (const char *fn, const int ln, MPI_Comm comm)
 

Macro Definition Documentation

◆ MPI_INT64

#define MPI_INT64   MPI_LONG

Definition at line 12 of file system.h.

◆ MPI_UINT64

#define MPI_UINT64   MPI_UNSIGNED_LONG

Definition at line 11 of file system.h.

◆ MPIU_Barrier

#define MPIU_Barrier (   comm)    _MPIU_Barrier(__FILE__, __LINE__, comm)

Definition at line 103 of file system.h.

Function Documentation

◆ _MPIU_Barrier()

int _MPIU_Barrier ( const char *  fn,
const int  line,
MPI_Comm  comm 
)

A fancy MPI barrier (use MPIU_Barrier macro)

  • aborts if barrier mismatch occurs
  • warn if some ranks are very imbalanced.

Definition at line 500 of file system.c.

501 {
502  int ThisTask, NTask;
503  MPI_Comm_size(comm, &NTask);
504  MPI_Comm_rank(comm, &ThisTask);
505  int * recvbuf = ta_malloc("tags", int, NTask);
506  int tag = 0;
507  int i;
508  for(i = 0; fn[i]; i ++) {
509  tag += (int)fn[i] * 8;
510  }
511  tag += line;
512 
513  MPI_Request request;
514  MPI_Igather(&tag, 1, MPI_INT, recvbuf, 1, MPI_INT, 0, comm, &request);
515  i = 0;
516  int flag = 1;
517  int tsleep = 0;
518  while(flag) {
519  MPI_Test(&request, &flag, MPI_STATUS_IGNORE);
520  if(flag) break;
521  usleep(i * 1000);
522  tsleep += i * 1000;
523  i = i + 1;
524  if(i == 50) {
525  if(ThisTask == 0) {
526  MPIU_Trace(comm, 0, "Waited more than %g seconds during barrier %s : %d \n", tsleep / 1000000., fn, line);
527  }
528  break;
529  }
530  }
531  MPI_Wait(&request, MPI_STATUS_IGNORE);
532  /* now check if all ranks indeed hit the same barrier. Some MPIs do allow them to mix up! */
533  if (ThisTask == 0) {
534  for(i = 0; i < NTask; i ++) {
535  if(recvbuf[i] != tag) {
536  MPIU_Trace(comm, 0, "Task %d Did not hit barrier at %s : %d; expecting %d, got %d\n", i, fn, line, tag, recvbuf[i]);
537  }
538  }
539  }
540  ta_free(recvbuf);
541  return 0;
542 }
#define ta_malloc(name, type, nele)
Definition: mymalloc.h:25
#define ta_free(p)
Definition: mymalloc.h:28
void MPIU_Trace(MPI_Comm comm, int where, const char *fmt,...)
Definition: system.c:182
int ThisTask
Definition: test_exchange.c:23
int NTask
Definition: test_exchange.c:23

References MPIU_Trace(), NTask, ta_free, ta_malloc, and ThisTask.

Here is the call graph for this function:

◆ atomic_add_and_fetch()

static int atomic_add_and_fetch ( int *  ptr,
int  value 
)
inlinestatic

Definition at line 87 of file system.h.

87  {
88  int k;
89 #pragma omp atomic capture
90  {
91  (*ptr)+=value;
92  k = (*ptr);
93  }
94  return k;
95 }

◆ atomic_fetch_and_add()

static int atomic_fetch_and_add ( int *  ptr,
int  value 
)
inlinestatic

Definition at line 78 of file system.h.

78  {
79  int k;
80 #pragma omp atomic capture
81  {
82  k = (*ptr);
83  (*ptr)+=value;
84  }
85  return k;
86 }

Referenced by get_freenode(), and real_ev().

Here is the caller graph for this function:

◆ atomic_fetch_and_add_64()

static int64_t atomic_fetch_and_add_64 ( int64_t *  ptr,
int64_t  value 
)
inlinestatic

Definition at line 68 of file system.h.

68  {
69  int64_t k;
70 #pragma omp atomic capture
71  {
72  k = (*ptr);
73  (*ptr)+=value;
74  }
75  return k;
76 }

Referenced by sfr_wind_feedback_ngbiter(), slots_convert(), slots_split_particle(), and timestep_eh_slots_fork().

Here is the caller graph for this function:

◆ cluster_get_num_hosts()

int cluster_get_num_hosts ( void  )

Definition at line 434 of file system.c.

435 {
436  /* Find a unique hostid for the computing rank. */
437  int NTask;
438  int ThisTask;
439  MPI_Comm_size(MPI_COMM_WORLD, &NTask);
440  MPI_Comm_rank(MPI_COMM_WORLD, &ThisTask);
441 
442  /* Size is set by the size of the temp heap:
443  * this fills it and should be changed if needed.*/
444  const int bufsz = 256;
445  char * buffer = ta_malloc("buffer", char, bufsz * NTask);
446  memset(buffer, 0, bufsz * NTask);
447  int i, j;
448  gethostname(&buffer[bufsz*ThisTask], bufsz);
449  buffer[bufsz * ThisTask + bufsz - 1] = '\0';
450  MPI_Allgather(MPI_IN_PLACE, bufsz, MPI_CHAR, buffer, bufsz, MPI_CHAR, MPI_COMM_WORLD);
451 
452  int nunique = 0;
453  /* Count unique entries*/
454  for(j = 0; j < NTask; j++) {
455  for(i = j+1; i < NTask; i++) {
456  if(strncmp(buffer + i * bufsz, buffer + j * bufsz, bufsz) == 0)
457  break;
458  }
459  if(i == NTask)
460  nunique++;
461  }
462  ta_free(buffer);
463  return nunique;
464 }

References NTask, ta_free, ta_malloc, and ThisTask.

Referenced by mymalloc_init().

Here is the caller graph for this function:

◆ count_sum()

int64_t count_sum ( int64_t  countLocal)

Definition at line 264 of file system.c.

264  {
265  int64_t sum = 0;
266  MPI_Allreduce(&countLocal, &sum, 1, MPI_INT64, MPI_SUM, MPI_COMM_WORLD);
267  return sum;
268 }
#define MPI_INT64
Definition: system.h:12

References MPI_INT64.

Referenced by get_long_range_timestep_dloga(), and petaio_save_block().

Here is the caller graph for this function:

◆ gadget_compact_thread_arrays()

size_t gadget_compact_thread_arrays ( int *  dest,
int *  srcs[],
size_t  sizes[],
int  narrays 
)

Definition at line 587 of file system.c.

588 {
589  int i;
590  size_t asize = 0;
591 
592  for(i = 0; i < narrays; i++)
593  {
594  memmove(dest + asize, srcs[i], sizeof(int) * sizes[i]);
595  asize += sizes[i];
596  }
597  return asize;
598 }

Referenced by cooling_and_starformation(), domain_build_exchange_list(), rebuild_activelist(), treewalk_build_queue(), and treewalk_do_hsml_loop().

Here is the caller graph for this function:

◆ gadget_setup_thread_arrays()

void gadget_setup_thread_arrays ( int *  dest,
int *  srcs[],
size_t  sizes[],
size_t  total_size,
int  narrays 
)

Definition at line 600 of file system.c.

601 {
602  int i;
603  srcs[0] = dest;
604  for(i=0; i < narrays; i++) {
605  srcs[i] = dest + i * total_size;
606  sizes[i] = 0;
607  }
608 }

Referenced by cooling_and_starformation(), domain_build_exchange_list(), rebuild_activelist(), treewalk_build_queue(), and treewalk_do_hsml_loop().

Here is the caller graph for this function:

◆ get_physmem_bytes()

double get_physmem_bytes ( void  )

Definition at line 467 of file system.c.

468 {
469 #if defined _SC_PHYS_PAGES && defined _SC_PAGESIZE
470  { /* This works on linux-gnu, solaris2 and cygwin. */
471  double pages = sysconf (_SC_PHYS_PAGES);
472  double pagesize = sysconf (_SC_PAGESIZE);
473  if (0 <= pages && 0 <= pagesize)
474  return pages * pagesize;
475  }
476 #endif
477 
478 #if defined HW_PHYSMEM
479  { /* This works on *bsd and darwin. */
480  unsigned int physmem;
481  size_t len = sizeof physmem;
482  static int mib[2] = { CTL_HW, HW_PHYSMEM };
483 
484  if (sysctl (mib, ARRAY_SIZE (mib), &physmem, &len, NULL, 0) == 0
485  && len == sizeof (physmem))
486  return (double) physmem;
487  }
488 #endif
489  return 64 * 1024 * 1024;
490 }

Referenced by read_parameter_file(), and read_parameterfile().

Here is the caller graph for this function:

◆ get_random_number()

double get_random_number ( uint64_t  id)

Definition at line 60 of file system.c.

61 {
62  return RndTable[(int)(id % RNDTABLE)];
63 }
#define RNDTABLE
Definition: system.c:22
static double RndTable[RNDTABLE]
Definition: system.c:58

References RNDTABLE, and RndTable.

Referenced by bh_powerlaw_seed_mass(), blackhole_accretion_ngbiter(), choose_QSO_halo(), gaussian_rng(), get_random_dir(), get_sfr_eeqos(), get_wind_dir(), lightcone_cross(), quicklyastarformation(), sfr_wind_feedback_ngbiter(), and update_random_offset().

Here is the caller graph for this function:

◆ MPI_Alltoallv_smart()

int MPI_Alltoallv_smart ( void *  sendbuf,
int *  sendcnts,
int *  sdispls,
MPI_Datatype  sendtype,
void *  recvbuf,
int *  recvcnts,
int *  rdispls,
MPI_Datatype  recvtype,
MPI_Comm  comm 
)

Definition at line 278 of file system.c.

287 {
288  int ThisTask;
289  int NTask;
290  MPI_Comm_rank(comm, &ThisTask);
291  MPI_Comm_size(comm, &NTask);
292  int i;
293  int nn = 0;
294  int *a_sdispls=NULL, *a_recvcnts=NULL, *a_rdispls=NULL;
295  for(i = 0; i < NTask; i ++) {
296  if(sendcnts[i] > 0) {
297  nn ++;
298  }
299  }
300  if(recvcnts == NULL) {
301  a_recvcnts = ta_malloc("recvcnts", int, NTask);
302  recvcnts = a_recvcnts;
303  MPI_Alltoall(sendcnts, 1, MPI_INT,
304  recvcnts, 1, MPI_INT, comm);
305  }
306  if(recvbuf == NULL) {
307  int totalrecv = 0;
308  for(i = 0; i < NTask; i ++) {
309  totalrecv += recvcnts[i];
310  }
311  return totalrecv;
312  }
313  if(sdispls == NULL) {
314  a_sdispls = ta_malloc("sdispls", int, NTask);
315  sdispls = a_sdispls;
316  sdispls[0] = 0;
317  for (i = 1; i < NTask; i++) {
318  sdispls[i] = sdispls[i - 1] + sendcnts[i - 1];
319  }
320  }
321  if(rdispls == NULL) {
322  a_rdispls = ta_malloc("rdispls", int, NTask);
323  rdispls = a_rdispls;
324  rdispls[0] = 0;
325  for (i = 1; i < NTask; i++) {
326  rdispls[i] = rdispls[i - 1] + recvcnts[i - 1];
327  }
328  }
329 
330  int dense = nn < NTask * 0.2;
331  int tot_dense = 0, ret;
332  MPI_Allreduce(&dense, &tot_dense, 1, MPI_INT, MPI_SUM, comm);
333 
334  if(tot_dense != 0) {
335  ret = MPI_Alltoallv(sendbuf, sendcnts, sdispls,
336  sendtype, recvbuf,
337  recvcnts, rdispls, recvtype, comm);
338  } else {
339  ret = MPI_Alltoallv_sparse(sendbuf, sendcnts, sdispls,
340  sendtype, recvbuf,
341  recvcnts, rdispls, recvtype, comm);
342 
343  }
344  if(a_rdispls)
345  ta_free(a_rdispls);
346  if(a_sdispls)
347  ta_free(a_sdispls);
348  if(a_recvcnts)
349  ta_free(a_recvcnts);
350  return ret;
351 }
int MPI_Alltoallv_sparse(void *sendbuf, int *sendcnts, int *sdispls, MPI_Datatype sendtype, void *recvbuf, int *recvcnts, int *rdispls, MPI_Datatype recvtype, MPI_Comm comm)
Definition: system.c:353

References MPI_Alltoallv_sparse(), NTask, ta_free, ta_malloc, and ThisTask.

Referenced by fof_reduce_groups(), fof_seed(), and mpsort_mpi_histogram_sort().

Here is the call graph for this function:
Here is the caller graph for this function:

◆ MPI_Alltoallv_sparse()

int MPI_Alltoallv_sparse ( void *  sendbuf,
int *  sendcnts,
int *  sdispls,
MPI_Datatype  sendtype,
void *  recvbuf,
int *  recvcnts,
int *  rdispls,
MPI_Datatype  recvtype,
MPI_Comm  comm 
)

Definition at line 353 of file system.c.

355  {
356 
357  int ThisTask;
358  int NTask;
359  MPI_Comm_rank(comm, &ThisTask);
360  MPI_Comm_size(comm, &NTask);
361  int PTask;
362  int ngrp;
363 
364  for(PTask = 0; NTask > (1 << PTask); PTask++);
365 
366  ptrdiff_t lb;
367  ptrdiff_t send_elsize;
368  ptrdiff_t recv_elsize;
369 
370  MPI_Type_get_extent(sendtype, &lb, &send_elsize);
371  MPI_Type_get_extent(recvtype, &lb, &recv_elsize);
372 
373 #ifndef NO_ISEND_IRECV_IN_DOMAIN
374  int n_requests;
375  MPI_Request *requests = (MPI_Request *) mymalloc("requests", NTask * 2 * sizeof(MPI_Request));
376  n_requests = 0;
377 
378  for(ngrp = 0; ngrp < (1 << PTask); ngrp++)
379  {
380  int target = ThisTask ^ ngrp;
381 
382  if(target >= NTask) continue;
383  if(recvcnts[target] == 0) continue;
384  MPI_Irecv(
385  ((char*) recvbuf) + recv_elsize * rdispls[target],
386  recvcnts[target],
387  recvtype, target, 101934, comm, &requests[n_requests++]);
388  }
389 
390  MPI_Barrier(comm);
391  /* not really necessary, but this will guarantee that all receives are
392  posted before the sends, which helps the stability of MPI on
393  bluegene, and perhaps some mpich1-clusters */
394  /* Note 08/2016: Even on modern hardware this barrier leads to a slight speedup.
395  * Probably because it allows the code to hit a fast path transfer.*/
396 
397  for(ngrp = 0; ngrp < (1 << PTask); ngrp++)
398  {
399  int target = ThisTask ^ ngrp;
400  if(target >= NTask) continue;
401  if(sendcnts[target] == 0) continue;
402  MPI_Isend(((char*) sendbuf) + send_elsize * sdispls[target],
403  sendcnts[target],
404  sendtype, target, 101934, comm, &requests[n_requests++]);
405  }
406 
407  MPI_Waitall(n_requests, requests, MPI_STATUSES_IGNORE);
408  myfree(requests);
409 #else
410  for(ngrp = 0; ngrp < (1 << PTask); ngrp++)
411  {
412  int target = ThisTask ^ ngrp;
413 
414  if(target >= NTask) continue;
415  if(sendcnts[target] == 0 && recvcnts[target] == 0) continue;
416  MPI_Sendrecv(((char*)sendbuf) + send_elsize * sdispls[target],
417  sendcnts[target], sendtype,
418  target, 101934,
419  ((char*)recvbuf) + recv_elsize * rdispls[target],
420  recvcnts[target], recvtype,
421  target, 101934,
422  comm, MPI_STATUS_IGNORE);
423 
424  }
425 #endif
426  /* ensure the collective-ness */
427  MPI_Barrier(comm);
428 
429  return 0;
430 }
#define mymalloc(name, size)
Definition: mymalloc.h:15
#define myfree(x)
Definition: mymalloc.h:19

References myfree, mymalloc, NTask, and ThisTask.

Referenced by domain_exchange_once(), ev_communicate(), and MPI_Alltoallv_smart().

Here is the caller graph for this function:

◆ MPIU_Any()

int MPIU_Any ( int  condition,
MPI_Comm  comm 
)

Definition at line 545 of file system.c.

546 {
547  MPI_Allreduce(MPI_IN_PLACE, &condition, 1, MPI_INT, MPI_LOR, comm);
548  return condition;
549 }

Referenced by domain_decompose_full(), domain_determine_global_toptree(), domain_exchange(), domain_exchange_once(), domain_nonrecursively_combine_topTree(), force_tree_build(), mymalloc_init(), tamalloc_init(), winds_and_feedback(), and winds_subgrid().

Here is the caller graph for this function:

◆ MPIU_Trace()

void MPIU_Trace ( MPI_Comm  comm,
int  where,
const char *  fmt,
  ... 
)

Definition at line 182 of file system.c.

183 {
184  va_list va;
185  va_start(va, fmt);
186  MPIU_Tracev(comm, where, 0, fmt, va);
187  va_end(va);
188 }
void MPIU_Tracev(MPI_Comm comm, int where, int error, const char *fmt, va_list va)
Definition: system.c:149

References MPIU_Tracev().

Referenced by _MPIU_Barrier().

Here is the call graph for this function:
Here is the caller graph for this function:

◆ MPIU_Tracev()

void MPIU_Tracev ( MPI_Comm  comm,
int  where,
int  error,
const char *  fmt,
va_list  va 
)

Definition at line 149 of file system.c.

150 {
151  if(_timestart == -1) {
152  _timestart = MPI_Wtime();
153  }
154  int ThisTask;
155  MPI_Comm_rank(comm, &ThisTask);
156  char prefix[128];
157 
158  char buf[4096];
159  vsnprintf(buf, 4096, fmt, va);
160  buf[4095] = '\0';
161  char err[] = "ERROR: ";
162  /* Print nothing if not an error*/
163  if(!error)
164  err[0] = '\0';
165 
166  if(where > 0) {
167  sprintf(prefix, "[ %09.2f ] %sTask %d: ", MPI_Wtime() - _timestart, err, ThisTask);
168  } else {
169  sprintf(prefix, "[ %09.2f ] %s", MPI_Wtime() - _timestart, err);
170  }
171 
172  if(ThisTask == 0 || where > 0) {
173  putline(prefix, buf);
174  }
175 }
static int putline(const char *prefix, const char *line)
Definition: system.c:107
static double _timestart
Definition: system.c:144
char prefix[1024]
Definition: test_hci.c:13

References _timestart, prefix, putline(), and ThisTask.

Referenced by endrun(), message(), and MPIU_Trace().

Here is the call graph for this function:
Here is the caller graph for this function:

◆ MPIU_write_pids()

void MPIU_write_pids ( char *  filename)

Definition at line 552 of file system.c.

553 {
554  MPI_Comm comm = MPI_COMM_WORLD;
555  int NTask;
556  int ThisTask;
557  MPI_Comm_size(comm, &NTask);
558  MPI_Comm_rank(comm, &ThisTask);
559 
560  int my_pid = getpid();
561  int * pids = ta_malloc("pids", int, NTask);
562  /* Smaller buffer than in cluster_get_num_hosts because
563  * here an overflow is harmless but running out of memory isn't*/
564  int bufsz = 64;
565  char * hosts = ta_malloc("hosts", char, (NTask+1) * bufsz);
566  char * thishost = hosts + NTask * bufsz;
567  gethostname(thishost, bufsz);
568  thishost[bufsz - 1] = '\0';
569  /* MPI_IN_PLACE is not used here because the MPI on travis doesn't like it*/
570  MPI_Gather(thishost, bufsz, MPI_CHAR, hosts, bufsz, MPI_CHAR, 0, comm);
571  MPI_Gather(&my_pid, 1, MPI_INT, pids, 1, MPI_INT, 0, comm);
572 
573  if(ThisTask == 0)
574  {
575  int i;
576  FILE *fd = fopen(filename, "w");
577  if(!fd)
578  endrun(5, "Could not open pidfile %s\n", filename);
579  for(i = 0; i < NTask; i++)
580  fprintf(fd, "host: %s pid: %d\n", hosts+i*bufsz, pids[i]);
581  fclose(fd);
582  }
583  myfree(hosts);
584  myfree(pids);
585 }
void endrun(int where, const char *fmt,...)
Definition: endrun.c:147

References endrun(), myfree, NTask, ta_malloc, and ThisTask.

Referenced by begrun().

Here is the call graph for this function:
Here is the caller graph for this function:

◆ second()

double second ( void  )

Definition at line 83 of file system.c.

84 {
85  return MPI_Wtime();
86 }

Referenced by ev_ndone(), ev_primary(), ev_reduce_result(), ev_secondary(), fof_label_primary(), and treewalk_run().

Here is the caller graph for this function:

◆ set_random_numbers()

void set_random_numbers ( int  seed)

Definition at line 65 of file system.c.

66 {
67  gsl_rng * random_generator = gsl_rng_alloc(gsl_rng_ranlxd1);
68 
69  /* start-up seed */
70  gsl_rng_set(random_generator, seed);
71 
72  int i;
73  for(i = 0; i < RNDTABLE; i++)
74  RndTable[i] = gsl_rng_uniform(random_generator);
75 
76  gsl_rng_free(random_generator);
77 }

References RNDTABLE, and RndTable.

Referenced by begrun(), and run().

Here is the caller graph for this function:

◆ sizemax()

size_t sizemax ( size_t  a,
size_t  b 
)

Definition at line 270 of file system.c.

271 {
272  if(a < b)
273  return b;
274  else
275  return a;
276 }

◆ sumup_large_ints()

void sumup_large_ints ( int  n,
int *  src,
int64_t *  res 
)

Definition at line 192 of file system.c.

193 {
194  MPI_Comm comm = MPI_COMM_WORLD;
195  int NTask;
196  int ThisTask;
197  MPI_Comm_size(comm, &NTask);
198  MPI_Comm_rank(comm, &ThisTask);
199 
200  int i, j, *numlist;
201 
202  numlist = (int *) mymalloc("numlist", NTask * n * sizeof(int));
203  MPI_Allgather(src, n, MPI_INT, numlist, n, MPI_INT, MPI_COMM_WORLD);
204 
205  for(j = 0; j < n; j++)
206  res[j] = 0;
207 
208  for(i = 0; i < NTask; i++)
209  for(j = 0; j < n; j++)
210  res[j] += numlist[i * n + j];
211 
212  myfree(numlist);
213 }

References myfree, mymalloc, NTask, and ThisTask.

Referenced by check_omega(), collect_BH_info(), cooling_and_starformation(), fof_compile_catalogue(), fof_label_secondary(), gas_ionization_fraction(), get_long_range_timestep_dloga(), petaio_save_snapshot(), print_timebin_statistics(), turn_on_quasars(), winds_and_feedback(), and winds_find_weights().

Here is the caller graph for this function:

◆ sumup_longs()

void sumup_longs ( int  n,
int64_t *  src,
int64_t *  res 
)

Definition at line 215 of file system.c.

216 {
217  MPI_Comm comm = MPI_COMM_WORLD;
218  int NTask;
219  int ThisTask;
220  MPI_Comm_size(comm, &NTask);
221  MPI_Comm_rank(comm, &ThisTask);
222  int i, j;
223  int64_t *numlist;
224 
225  numlist = (int64_t *) mymalloc("numlist", NTask * n * sizeof(int64_t));
226  MPI_Allgather(src, n * sizeof(int64_t), MPI_BYTE, numlist, n * sizeof(int64_t), MPI_BYTE,
227  MPI_COMM_WORLD);
228 
229  for(j = 0; j < n; j++)
230  res[j] = 0;
231 
232  for(i = 0; i < NTask; i++)
233  for(j = 0; j < n; j++)
234  res[j] += numlist[i * n + j];
235 
236  myfree(numlist);
237 }

References myfree, mymalloc, NTask, and ThisTask.

◆ timediff()

double timediff ( double  t0,
double  t1 
)

Definition at line 92 of file system.c.

93 {
94  double dt;
95 
96  dt = t1 - t0;
97 
98  if(dt < 0) /* overflow has occured (for systems with 32bit tick counter) */
99  {
100  dt = t1 + pow(2, 32) / CLOCKS_PER_SEC - t0;
101  }
102 
103  return dt;
104 }

Referenced by ev_ndone(), ev_primary(), ev_reduce_result(), ev_secondary(), and treewalk_run().

Here is the caller graph for this function: