diff --git a/.gitignore b/.gitignore index 63e6fbe444b..0efc4e56d24 100644 --- a/.gitignore +++ b/.gitignore @@ -355,19 +355,12 @@ ompi/tools/ompi_info/ompi_info.1 ompi/tools/wrappers/mpic++-wrapper-data.txt ompi/tools/wrappers/mpicc-wrapper-data.txt ompi/tools/wrappers/mpifort-wrapper-data.txt -ompi/tools/wrappers/mpicc.1 -ompi/tools/wrappers/mpic++.1 -ompi/tools/wrappers/mpicxx.1 -ompi/tools/wrappers/mpifort.1 -ompi/tools/wrappers/mpijavac.1 ompi/tools/wrappers/ompi_wrapper_script ompi/tools/wrappers/ompi.pc ompi/tools/wrappers/ompi-c.pc ompi/tools/wrappers/ompi-cxx.pc ompi/tools/wrappers/ompi-fort.pc ompi/tools/wrappers/mpijavac.pl -ompi/tools/wrappers/mpif90.1 -ompi/tools/wrappers/mpif77.1 ompi/tools/wrappers/mpicxx-wrapper-data.txt ompi/tools/wrappers/mpif77-wrapper-data.txt ompi/tools/wrappers/mpif90-wrapper-data.txt @@ -471,10 +464,6 @@ opal/tools/wrappers/opalcc-wrapper-data.txt opal/tools/wrappers/opalc++-wrapper-data.txt opal/tools/wrappers/opalCC-wrapper-data.txt opal/tools/wrappers/opal_wrapper -opal/tools/wrappers/opalcc.1 -opal/tools/wrappers/opalc++.1 -opal/tools/wrappers/generic_wrapper.1 -opal/tools/wrappers/opal_wrapper.1 opal/tools/wrappers/opal.pc opal/util/show_help_lex.c @@ -704,3 +693,12 @@ opal/test/reachable/reachable_netlink opal/test/reachable/reachable_weighted opal/mca/threads/argobots/threads_argobots.h opal/mca/threads/qthreads/threads_qthreads.h + +docs/_build +docs/_static +docs/_static/css/custom.css +docs/_templates + +# Common Python virtual environment directory names +venv +py?? diff --git a/.readthedocs.yaml b/.readthedocs.yaml new file mode 100644 index 00000000000..73f2d6745e7 --- /dev/null +++ b/.readthedocs.yaml @@ -0,0 +1,10 @@ +# .readthedocs.yaml +# Read the Docs configuration file +# See https://docs.readthedocs.io/en/stable/config-file/v2.html for details + +# Required +version: 2 + +# Build documentation in the docs/ directory with Sphinx +sphinx: + configuration: docs/conf.py diff --git a/HACKING.md b/HACKING.md index fab5b1191a7..494bbacabea 100644 --- a/HACKING.md +++ b/HACKING.md @@ -6,271 +6,11 @@ This file is here for those who are building/exploring OMPI in its source code form, most likely through a developer's tree (i.e., a Git clone). +## This file has moved -## Obtaining Open MPI +Please see the content of this file in its new location: +https://ompi.readthedocs.io/en/latest/developers/ -Open MPI is available from many distributions, however some users -prefer to obtain it directly from the Open MPI community via -prepackaged tarball (see: https://www.open-mpi.org/software/ompi/). -The Open MPI tarball includes manpages, and openpmix and openprrte -components, along with an auto-generated configure script. - -Some developers prefer to obtain Open MPI by directly cloning it -from https://github.com/open-mpi/ompi. It is recommended that users -who choose to clone the source directly, use the git clone flag -`--recurse-submodules`, to also obtain the openpmix, and openprrte. - -Regardless of how openpmix and openprrte are obtained, the -configure logic in Open MPI v5.0+ prefer externally installed -components. Please see `configure --help` for more details. - - -## Developer Builds: Compiler Pickyness by Default - -If you are building Open MPI from a Git clone (i.e., there is a `.git` -directory in your build tree), the default build includes extra -compiler pickyness, which will result in more compiler warnings than -in non-developer builds. Getting these extra compiler warnings is -helpful to Open MPI developers in making the code base as clean as -possible. - -Developers can disable this picky-by-default behavior by using the -`--disable-picky` configure option. Also note that extra-picky compiles -do *not* happen automatically when you do a VPATH build (e.g., if -`.git` is in your source tree, but not in your build tree). - -Prior versions of Open MPI would automatically activate a lot of -(performance-reducing) debugging code by default if `.git` was found -in your build tree. This is no longer true. You can manually enable -these (performance-reducing) debugging features in the Open MPI code -base with these configure options: - -* `--enable-debug` -* `--enable-mem-debug` -* `--enable-mem-profile` - -***NOTE:*** These options are really only relevant to those who are -developing Open MPI itself. They are not generally helpful for -debugging general MPI applications. - - -## Use of GNU Autoconf, Automake, and Libtool (and m4) - -You need to read/care about this section *ONLY* if you are building -from a developer's tree (i.e., a Git clone of the Open MPI source -tree). If you have an Open MPI distribution tarball, the contents of -this section are optional -- you can (and probably should) skip -reading this section. - -If you are building Open MPI from a developer's tree, you must first -install fairly recent versions of the GNU tools Autoconf, Automake, -and Libtool (and possibly GNU m4, because recent versions of Autoconf -have specific GNU m4 version requirements). The specific versions -required depend on if you are using the Git master branch or a release -branch (and which release branch you are using). [The specific -versions can be found -here](https://www.open-mpi.org/source/building.php). - -You can check what versions of the autotools you have installed with -the following: - -``` -shell$ m4 --version -shell$ autoconf --version -shell$ automake --version -shell$ libtoolize --version -``` - -[Required version levels for all the OMPI releases can be found -here](https://www.open-mpi.org/source/building.php). - -To strengthen the above point: the core Open MPI developers typically -use very, very recent versions of the GNU tools. There are known bugs -in older versions of the GNU tools that Open MPI no longer compensates -for (it seemed senseless to indefinitely support patches for ancient -versions of Autoconf, for example). You *WILL* have problems if you -do not use recent versions of the GNU tools. - -***NOTE:*** On MacOS/X, the default `libtool` program is different -than the GNU libtool. You must download and install the GNU version -(e.g., via MacPorts, Homebrew, or some other mechanism). - -If you need newer versions, you are *strongly* encouraged to heed the -following advice: - -1. Unless your OS distribution has easy-to-use binary installations, - the sources can be can be downloaded from: - * https://ftp.gnu.org/gnu/autoconf/ - * https://ftp.gnu.org/gnu/automake/ - * https://ftp.gnu.org/gnu/libtool/ - * And if you need it: https://ftp.gnu.org/gnu/m4/ - - ***NOTE:*** It is certainly easiest to download/build/install all - four of these tools together. But note that Open MPI has no - specific m4 requirements; it is only listed here because Autoconf - requires minimum versions of GNU m4. Hence, you may or may not - *need* to actually install a new version of GNU m4. That being - said, if you are confused or don't know, just install the latest - GNU m4 with the rest of the GNU Autotools and everything will work - out fine. - -1. Build and install the tools in the following order: - 1. m4 - 1. Autoconf - 1. Automake - 1. Libtool - -1. You MUST install the last three tools (Autoconf, Automake, Libtool) - into the same prefix directory. These three tools are somewhat - inter-related, and if they're going to be used together, they MUST - share a common installation prefix. - - You can install m4 anywhere as long as it can be found in the path; - it may be convenient to install it in the same prefix as the other - three. Or you can use any recent-enough m4 that is in your path. - - 1. It is *strongly* encouraged that you do not install your new - versions over the OS-installed versions. This could cause - other things on your system to break. Instead, install into - `$HOME/local`, or `/usr/local`, or wherever else you tend to - install "local" kinds of software. - 1. In doing so, be sure to prefix your $path with the directory - where they are installed. For example, if you install into - `$HOME/local`, you may want to edit your shell startup file - (`.bashrc`, `.cshrc`, `.tcshrc`, etc.) to have something like: - - ```sh - # For bash/sh: - export PATH=$HOME/local/bin:$PATH - # For csh/tcsh: - set path = ($HOME/local/bin $path) - ``` - - 1. Ensure to set your `$PATH` *BEFORE* you configure/build/install - the four packages. - -1. All four packages require two simple commands to build and - install (where PREFIX is the prefix discussed in 3, above). - - ``` - shell$ cd - shell$ ./configure --prefix=PREFIX - shell$ make; make install - ``` - - ***NOTE:*** If you are using the `csh` or `tcsh` shells, be sure to - run the `rehash` command after you install each package. - - ``` - shell$ cd - shell$ ./configure --prefix=PREFIX - shell$ make; make install - ``` - - ***NOTE:*** If you are using the `csh` or `tcsh` shells, be sure to - run the `rehash` command after you install each package. - - ``` - shell$ cd - shell$ ./configure --prefix=PREFIX - shell$ make; make install - ``` - - ***NOTE:*** If you are using the `csh` or `tcsh` shells, be sure to - run the `rehash` command after you install each package. - - ``` - shell$ cd - shell$ ./configure --prefix=PREFIX - shell$ make; make install - ``` - - ***NOTE:*** If you are using the `csh` or `tcsh` shells, be sure to - run the `rehash` command after you install each package. - - m4, Autoconf and Automake build and install very quickly; Libtool - will take a minute or two. - -1. You can now run OMPI's top-level `autogen.pl` script. This script - will invoke the GNU Autoconf, Automake, and Libtool commands in the - proper order and setup to run OMPI's top-level `configure` script. - - Running `autogen.pl` may take a few minutes, depending on your - system. It's not very exciting to watch. :smile: - - If you have a multi-processor system, enabling the multi-threaded - behavior in Automake 1.11 (or newer) can result in `autogen.pl` - running faster. Do this by setting the `AUTOMAKE_JOBS` environment - variable to the number of processors (threads) that you want it to - use before invoking `autogen`.pl. For example (you can again put - this in your shell startup files): - - ```sh - # For bash/sh: - export AUTOMAKE_JOBS=4 - # For csh/tcsh: - set AUTOMAKE_JOBS 4 - ``` - - 1. You generally need to run autogen.pl whenever the top-level file - `configure.ac` changes, or any files in the `config/` or - `/config/` directories change (these directories are - where a lot of "include" files for Open MPI's `configure` script - live). - - 1. You do *NOT* need to re-run `autogen.pl` if you modify a - `Makefile.am`. - -## Use of Flex - -Flex is used during the compilation of a developer's checkout (it is -not used to build official distribution tarballs). Other flavors of -lex are *not* supported: given the choice of making parsing code -portable between all flavors of lex and doing more interesting work on -Open MPI, we greatly prefer the latter. - -Note that no testing has been performed to see what the minimum -version of Flex is required by Open MPI. We suggest that you use -v2.5.35 at the earliest. - -***NOTE:*** Windows developer builds of Open MPI *require* Flex version -2.5.35. Specifically, we know that v2.5.35 works and 2.5.4a does not. -We have not tested to figure out exactly what the minimum required -flex version is on Windows; we suggest that you use 2.5.35 at the -earliest. It is for this reason that the -`contrib/dist/make_dist_tarball` script checks for a Windows-friendly -version of Flex before continuing. - -For now, Open MPI will allow developer builds with Flex 2.5.4. This -is primarily motivated by the fact that RedHat/Centos 5 ships with -Flex 2.5.4. It is likely that someday Open MPI developer builds will -require Flex version >=2.5.35. - -Note that the `flex`-generated code generates some compiler warnings -on some platforms, but the warnings do not seem to be consistent or -uniform on all platforms, compilers, and flex versions. As such, we -have done little to try to remove those warnings. - -If you do not have Flex installed, see [the Flex Github -repository](https://github.com/westes/flex). - -## Use of Pandoc - -Similar to prior sections, you need to read/care about this section -*ONLY* if you are building from a developer's tree (i.e., a Git clone -of the Open MPI source tree). If you have an Open MPI distribution -tarball, the contents of this section are optional -- you can (and -probably should) skip reading this section. - -The Pandoc tool is used to generate Open MPI's man pages. -Specifically: Open MPI's man pages are written in Markdown; Pandoc is -the tool that converts that Markdown to nroff (i.e., the format of man -pages). - -You must have Pandoc >=v1.12 when building Open MPI from a developer's -tree. If configure cannot find Pandoc >=v1.12, it will abort. - -If you need to install Pandoc, check your operating system-provided -packages (to include MacOS Homebrew and MacPorts). [The Pandoc -project web site](https://pandoc.org/) itself also offers binaries for -their releases. +Additionally, see +https://ompi.readthedocs.io/en/latest/developers/prerequisites.html#sphinx +if you want to edit and build the documentation locally. diff --git a/INSTALL b/INSTALL deleted file mode 100644 index 33a6bb554b5..00000000000 --- a/INSTALL +++ /dev/null @@ -1,101 +0,0 @@ -Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana - University Research and Technology - Corporation. All rights reserved. -Copyright (c) 2004-2005 The University of Tennessee and The University - of Tennessee Research Foundation. All rights - reserved. -Copyright (c) 2004-2005 High Performance Computing Center Stuttgart, - University of Stuttgart. All rights reserved. -Copyright (c) 2004-2005 The Regents of the University of California. - All rights reserved. -Copyright (c) 2008-2020 Cisco Systems, Inc. All rights reserved. -$COPYRIGHT$ - -Additional copyrights may follow - -$HEADER$ - - -For More Information -==================== - -This file is a *very* short overview of building and installing Open -MPI, and building MPI applications. Much more information is -available on the Open MPI web site (e.g., see the FAQ section): - - https://www.open-mpi.org/ - - -Developer Builds -================ - -If you have checked out a DEVELOPER'S COPY of Open MPI (i.e., you -cloned from Git), you should read the HACKING file before attempting -to build Open MPI. You must then run: - -shell$ ./autogen.pl - -You will need very recent versions of GNU Autoconf, Automake, and -Libtool. If autogen.pl fails, read the HACKING file. If anything -else fails, read the HACKING file. Finally, we suggest reading the -HACKING file. - -*** NOTE: Developer's copies of Open MPI typically include a large -performance penalty at run-time because of extra debugging overhead. - - -User Builds -=========== - -Building Open MPI is typically a combination of running "configure" -and "make". Execute the following commands to install the Open MPI -system from within the directory at the top of the tree: - -shell$ ./configure --prefix=/where/to/install -[...lots of output...] -shell$ make all install - -If you need special access to install, then you can execute "make -all" as a user with write permissions in the build tree, and a -separate "make install" as a user with write permissions to the -install tree. - -Compiling support for various networks or other specific hardware may -require additional command ling flags when running configure. See the -README file for more details. Note that VPATH builds are fully -supported. For example: - -shell$ tar xf openmpi-X.Y.Z.tar.gz -shell$ cd openmpi-X.Y.Z -shell$ mkdir build -shell$ cd build -shell$ ../configure ...your options... -[...lots of output...] -shell$ make all install - -Parallel builds are also supported (although some versions of "make", -such as GNU make, will only use the first target listed on the command -line when executable parallel builds). For example (assume GNU make): - -shell$ make -j 4 all -[...lots of output...] -shell$ make install - -Parallel make is generally only helpful in the build phase (i.e., -"make all"); the installation process (i.e., "make install") is mostly -serial and does not benefit much from parallelization. - - -Compiling MPI Applications -========================== - -MPI applications should be compiled using the Open MPI "wrapper" -compilers: - -C programs: mpicc your-code.c -C++ programs: mpiCC your-code.cc or - mpic++ your-code.cc (for case-insensitive filesystems) -Fortran programs: mpifort your-code.f90 - -These compilers simply add various command line flags (such as -lmpi) -and invoke a back-end compiler; they are not compilers in themselves. diff --git a/Makefile.am b/Makefile.am index 23a5253b268..8dc915b6cb2 100644 --- a/Makefile.am +++ b/Makefile.am @@ -9,7 +9,7 @@ # University of Stuttgart. All rights reserved. # Copyright (c) 2004-2005 The Regents of the University of California. # All rights reserved. -# Copyright (c) 2006-2016 Cisco Systems, Inc. All rights reserved. +# Copyright (c) 2006-2022 Cisco Systems, Inc. All rights reserved. # Copyright (c) 2012-2015 Los Alamos National Security, Inc. All rights reserved. # Copyright (c) 2014-2019 Intel, Inc. All rights reserved. # Copyright (c) 2017-2021 Amazon.com, Inc. or its affiliates. @@ -22,9 +22,9 @@ # $HEADER$ # -SUBDIRS = config contrib 3rd-party $(MCA_PROJECT_SUBDIRS) test -DIST_SUBDIRS = config contrib 3rd-party $(MCA_PROJECT_DIST_SUBDIRS) test -EXTRA_DIST = README.md INSTALL VERSION Doxyfile LICENSE autogen.pl README.JAVA.md AUTHORS +SUBDIRS = config contrib 3rd-party $(MCA_PROJECT_SUBDIRS) test docs +DIST_SUBDIRS = config contrib 3rd-party $(MCA_PROJECT_DIST_SUBDIRS) test docs +EXTRA_DIST = README.md VERSION Doxyfile LICENSE autogen.pl AUTHORS include examples/Makefile.include diff --git a/Makefile.ompi-rules b/Makefile.ompi-rules index d8aefcb8f3d..567bcfd99f3 100644 --- a/Makefile.ompi-rules +++ b/Makefile.ompi-rules @@ -1,5 +1,5 @@ # -*- makefile -*- -# Copyright (c) 2008-2018 Cisco Systems, Inc. All rights reserved. +# Copyright (c) 2008-2022 Cisco Systems, Inc. All rights reserved. # Copyright (c) 2008 Sun Microsystems, Inc. All rights reserved. # Copyright (c) 2020 Intel, Inc. All rights reserved. # $COPYRIGHT$ @@ -9,72 +9,6 @@ # $HEADER$ # -MD2NROFF = $(OMPI_TOP_SRCDIR)/config/md2nroff.pl - -TRIM_OPTIONS= -if ! MAN_PAGE_BUILD_MPIFH_BINDINGS - TRIM_OPTIONS += --nofortran -endif -if ! MAN_PAGE_BUILD_USEMPIF08_BINDINGS - TRIM_OPTIONS += --nof08 -endif - -# JMS This rule can be deleted once all man pages have been converted -# to markdown. -.1in.1: - $(OMPI_V_GEN) $(top_srcdir)/config/make_manpage.pl \ - --package-name='@PACKAGE_NAME@' \ - --package-version='@PACKAGE_VERSION@' \ - --ompi-date='@OMPI_RELEASE_DATE@' \ - --opal-date='@OPAL_RELEASE_DATE@' \ - --input=$< \ - --output=$@ - -# JMS This rule can be deleted once all man pages have been converted -# to markdown. -.3in.3: - $(OMPI_V_GEN) $(top_srcdir)/config/make_manpage.pl \ - --package-name='@PACKAGE_NAME@' \ - --package-version='@PACKAGE_VERSION@' \ - --ompi-date='@OMPI_RELEASE_DATE@' \ - --opal-date='@OPAL_RELEASE_DATE@' \ - $(TRIM_OPTIONS) \ - --input=$< \ - --output=$@ - -# JMS This rule can be deleted once all man pages have been converted -# to markdown. -.7in.7: - $(OMPI_V_GEN) $(top_srcdir)/config/make_manpage.pl \ - --package-name='@PACKAGE_NAME@' \ - --package-version='@PACKAGE_VERSION@' \ - --ompi-date='@OMPI_RELEASE_DATE@' \ - --opal-date='@OPAL_RELEASE_DATE@' \ - --input=$< \ - --output=$@ - -%.1: %.1.md - $(OMPI_V_GEN) $(MD2NROFF) --source=$< --dest=$@ --pandoc=$(PANDOC) - -%.3: %.3.md - $(OMPI_V_GEN) $(MD2NROFF) --source=$< --dest=$@ --pandoc=$(PANDOC) - -%.5: %.5.md - $(OMPI_V_GEN) $(MD2NROFF) --source=$< --dest=$@ --pandoc=$(PANDOC) - -%.7: %.7.md - $(OMPI_V_GEN) $(MD2NROFF) --source=$< --dest=$@ --pandoc=$(PANDOC) - -# It is an error to "configure --disable-man-pages" and then try to -# "make dist". -if !OPAL_ENABLE_MAN_PAGES -dist-hook: - @echo "************************************************************************************" - @echo "ERROR: 'make dist' inoperable when Open MPI is configured with --disable-man-pages" - @echo "************************************************************************************" - @/bin/false -endif - # A little verbosity magic; "make" will show the terse output. "make # V=1" will show the actual commands used (just like the other # Automake-generated compilation/linker rules). @@ -91,3 +25,11 @@ ompi__v_MKDIR_0 = @echo " MKDIR " $@; OMPI_V_GEN = $(ompi__v_GEN_$V) ompi__v_GEN_ = $(ompi__v_GEN_$AM_DEFAULT_VERBOSITY) ompi__v_GEN_0 = @echo " GENERATE" $@; + +OMPI_V_SPHINX_HTML = $(ompi__v_SPHINX_HTML_$V) +ompi__v_SPHINX_HTML_ = $(ompi__v_SPHINX_HTML_$AM_DEFAULT_VERBOSITY) +ompi__v_SPHINX_HTML_0 = @echo " GENERATE HTML docs"; + +OMPI_V_SPHINX_MAN = $(ompi__v_SPHINX_MAN_$V) +ompi__v_SPHINX_MAN_ = $(ompi__v_SPHINX_MAN_$AM_DEFAULT_VERBOSITY) +ompi__v_SPHINX_MAN_0 = @echo " GENERATE man pages"; diff --git a/NEWS b/NEWS deleted file mode 100644 index 82f82b0b3f7..00000000000 --- a/NEWS +++ /dev/null @@ -1,5096 +0,0 @@ -Copyright (c) 2004-2010 The Trustees of Indiana University and Indiana - University Research and Technology - Corporation. All rights reserved. -Copyright (c) 2004-2006 The University of Tennessee and The University - of Tennessee Research Foundation. All rights - reserved. -Copyright (c) 2004-2008 High Performance Computing Center Stuttgart, - University of Stuttgart. All rights reserved. -Copyright (c) 2004-2006 The Regents of the University of California. - All rights reserved. -Copyright (c) 2006-2021 Cisco Systems, Inc. All rights reserved. -Copyright (c) 2006 Voltaire, Inc. All rights reserved. -Copyright (c) 2006 Sun Microsystems, Inc. All rights reserved. - Use is subject to license terms. -Copyright (c) 2006-2017 Los Alamos National Security, LLC. All rights - reserved. -Copyright (c) 2010-2022 IBM Corporation. All rights reserved. -Copyright (c) 2012 Oak Ridge National Labs. All rights reserved. -Copyright (c) 2012 Sandia National Laboratories. All rights reserved. -Copyright (c) 2012 University of Houston. All rights reserved. -Copyright (c) 2013-2021 NVIDIA Corporation. All rights reserved. -Copyright (c) 2013-2018 Intel, Inc. All rights reserved. -Copyright (c) 2018-2021 Amazon.com, Inc. or its affiliates. All Rights - reserved. -$COPYRIGHT$ - -Additional copyrights may follow - -$HEADER$ - -=========================================================================== - -This file contains the main features as well as overviews of specific -bug fixes (and other actions) for each version of Open MPI since -version 1.0. - -As more fully described in the "Software Version Number" section in -the README file, Open MPI typically releases two separate version -series simultaneously. Since these series have different goals and -are semi-independent of each other, a single NEWS-worthy item may be -introduced into different series at different times. For example, -feature F was introduced in the vA.B series at version vA.B.C, and was -later introduced into the vX.Y series at vX.Y.Z. - -The first time feature F is released, the item will be listed in the -vA.B.C section, denoted as: - - (** also to appear: X.Y.Z) -- indicating that this item is also - likely to be included in future release - version vX.Y.Z. - -When vX.Y.Z is later released, the same NEWS-worthy item will also be -included in the vX.Y.Z section and be denoted as: - - (** also appeared: A.B.C) -- indicating that this item was previously - included in release version vA.B.C. - -Master (not on release branches yet) ------------------------------------- - -********************************************************************** -* PRE-DEPRECATION WARNING: MPIR Support -* -* As was announced in summer 2017, Open MPI is deprecating support for -* MPIR-based tools beginning with the future release of OMPI v5.0, with -* full removal of that support tentatively planned for OMPI v6.0. -* -* This serves as a pre-deprecation warning to provide tools developers -* with sufficient time to migrate to PMIx. Support for PMIx-based -* tools will be rolled out during the OMPI v4.x series. No runtime -* deprecation warnings will be output during this time. -* -* Runtime deprecation warnings will be output beginning with the OMPI v5.0 -* release whenever MPIR-based tools connect to Open MPI's mpirun/mpiexec -* launcher. -********************************************************************** - -v5.0.0rc3 -- March, 2021 --------------------- - -- ORTE, the underlying OMPI launcher has been removed, and replaced - with PRTE. -- New MPI-4.0 features: - - Added ERRORS_ABORT infrastructure. - - Added support for 'initial error handler'. - - Added error handling for 'unbound' errors to MPI_COMM_SELF. - - Persistent collectives have been moved to the MPI - namespace from MPIX. - - Make MPI_Comm_get_info, MPI_File_get_info, and - MPI_Win_get_info compliant to the standard. - - Initial partitioned communication using persistent sends and - persistent receives added. - - Added support for MPI_Info_get_string. - - Droped unknown/ignored info keys on communicators, files, - and windows. - - Implemented MPI_Isendrecv() and its variants. - - Added Support for MPI_Comm_idup_with_info(). -- Reworked how Open MPI integrates with 3rd party packages. - The decision was made to stop building 3rd-party packages - such as Libevent, HWLOC, PMIx, and PRRTE as MCA components - and instead 1) start relying on external libraries whenever - possible and 2) Open MPI builds the 3rd party libraries (if needed) - as independent libraries, rather than linked into libopen-pal. -- Updated to use PMIx v4.2 branch - current hash: b59e49f -- Updated to use PRRTE v2.1 branch - current hash: 8c09625 -- osc/rdma and various btls: Many fixes to better support btl/tcp. - - This patch series fixs many issues when running with - "--mca osc rdma --mca btl tcp", IE - TCP support for one sided MPI calls. -- Change the default component build behavior to prefer building - components as part of libmpi.so instead of individual DSOs. -- Remove pml/yalla, mxm, mtl/psm, and ikrit components. -- Remove all vestiges of the C/R support. -- Various ROMIO v3.4.1 updates. -- Use Pandoc to generate manpages -- 32 bit atomics are now only supported via C11 compliant compilers. -- Explicitly disable support for GNU gcc < v4.8.1 (note: the default - gcc compiler that is included in RHEL 7 is v4.8.5). -- HWLOC: Require v1.11.0 or later. -- HWLOC: Updated internal hwloc to v2.7.0. -- HWLOC: enable --enable-plugins when appropriate - When running configure on the internal hwloc, enable plugins via - the --enable-plugins option when 1) --disable-dlopen was not - specified and 2) the user didn't add some variant of - --enable-plugins on the configure line already. The motivation - for this change is to not pull in hwloc dependencies like libcuda - in the general case. -- Do not build Open SHMEM layer when there are no SPMLs available. - Currently, this means the Open SHMEM layer will only build if - the UCX library is found. -- Fix rank-by algorithms to properly rank by object and span. -- Updated the "-mca pml" option to only accept one pml, not a list. -- vprotocol/pessimist: Updated to support MPI_THREAD_MULLTIPLE. -- btl/tcp: Updated to use reachability and graph solving for global - interface matching. This has been shown to improve MPI_Init() - performance under btl/tcp. -- fs/ime: Fixed compilation errors due to missing header inclusion - Thanks to Sylvain Didelot for finding - and fixing this issue. -- Fixed bug where MPI_Init_thread can give wrong error messages by - delaying error reporting until all infrastructure is running. -- Atomics support removed: S390/s390x, Sparc v9, ARMv4 and ARMv5 CMA - support. -- autogen.pl now supports a "-j" option to run multi-threaded. - Users can also use environment variable "AUTOMAKE_JOBS". -- PMI support has been removed for Open MPI apps. -- Legacy btl/sm has been removed, and replaced with btl/vader, which - was renamed to "btl/sm". -- Update btl/sm to not use CMA in user namespaces. -- C++ bindings have been removed. -- The "--am" and "--amca" options have been deprecated. -- opal/mca/threads framework added. Currently supports - argobots, qthreads, and pthreads. See the --with-threads=x option - in configure. -- Various README.md fixes - thanks to: - Yixin Zhang , - Samuel Cho , - rlangefe , - Alex Ross , - Sophia Fang , - mitchelltopaloglu , - Evstrife , and - Hao Tong for their - contributions. -- osc/pt2pt: Removed. Users can use osc/rdma + btl/tcp - for OSC support using TCP, or other providers. -- Open MPI now links -levent_core instead of -levent. -- common/cuda docs: Various fixes. Thanks to - Simon Byrne for finding and fixing. -- osc/ucx: Add support for acc_single_intrinsic. -- Fixed buildrpm.sh "-r" option used for RPM options specification. - Thanks to John K. McIver III for - reporting and fixing. -- configure: Added support for setting the wrapper C compiler. - Adds new option "--with-wrapper-cc=" . -- mpi_f08: Fixed Fortran-8-byte-INTEGER vs. C-4-byte-int issue. - Thanks to @ahaichen for reporting the bug. -- opal/thread/tsd: Added thread-specific-data (tsd) api. -- Add missing MPI_Status conversion subroutines: - MPI_Status_c2f08(), MPI_Status_f082c(), MPI_Status_f082f(), - MPI_Status_f2f08() and the PMPI_* related subroutines. -- patcher: Removed the Linux component. -- opal/util: Fixed typo in error string. Thanks to - NARIBAYASHI Akira for finding - and fixing the bug. -- Fortran/use-mpi-f08: Generate PMPI bindings from the MPI bindings. -- Converted man pages to markdown. - Thanks to Fangcong Yin for their contribution - to this effort. -- Fixed ompi_proc_world error string and some comments in pml/ob1. - Thanks to Julien EMMANUEL for - finding and fixing these issues. -- oshmem/tools/oshmem_info: Fixed Fortran keyword issue when - compiling param.c. Thanks to Pak Lui for - finding and fixing the bug. -- autogen.pl: Patched libtool.m4 for OSX Big Sur. Thanks to - @fxcoudert for reporting the issue. -- Updgraded to HWLOC v2.4.0. -- Removed config/opal_check_pmi.m4. - Thanks to Zach Osman for the contribution. -- opal/atomics: Added load-linked, store-conditional atomics for - AArch6. -- Fixed envvar names to OMPI_MCA_orte_precondition_transports. - Thanks to Marisa Roman - for the contribution. -- fcoll/two_phase: Removed the component. All scenerios it was - used for has been replaced. -- btl/uct: Bumped highest supported UCX version to v1.10.x. -- ULFM Fault Tolerance has been added. See README.FT.ULFM.md. -- Fixed a crash during CUDA initialization. - Thanks to Yaz Saito for finding - and fixing the bug. -- Added CUDA support to the OFI MTL. -- ompio: Added atomicity support. -- Singleton comm spawn support has been fixed. -- Autoconf v2.7 support has been updated. -- Fortran: Added check for ISO_FORTRAN_ENV:REAL16. Thanks to - Jeff Hammond for reporting this issue. -- Changed the MCA component build style default to static. -- PowerPC atomics: Force usage of opal/ppc assembly. -- Removed C++ compiler requirement to build Open MPI. -- Fixed .la files leaking into wrapper compilers. -- Fixed bug where the cache line size was not set soon enough in - MPI_Init(). -- coll/ucc and scoll/ucc components were added. -- coll/ucc: Added support for allgather and reduce collective - operations. -- autogen.pl: Fixed bug where it would not ignore all - excluded components. -- Various datatype bugfixes and performance improvements -- Various pack/unpack bugfixes and performance improvements -- Fix mmap infinite recurse in memory patcher -- Fix C to Fortran error code conversions. -- osc/ucx: Fix data corruption with non-contiguous accumulates -- Update coll/tuned selection rules -- Fix non-blocking collective ops -- btl/portals4: Fix flow control -- Various oshmem:ucx bugfixes and performance improvements -- common/ofi: Disable new monitor API until libfabric 1.14.0 -- Fix AVX detection with icc -- mpirun option "--mca ompi_display_comm mpi_init/mpi_finalize" - has been added. Enables a communication protocol report: - when MPI_Init is invoked (using the 'mpi_init' value) and/or - when MPI_Finalize is invoked (using the 'mpi_finalize' value). -- New algorithm for Allgather and Allgatherv added, based on the - paper "Sparbit: a new logarithmic-cost and data locality-aware MPI - Allgather algorithm". Default algorithm selection rules are - un-changed, to use these algorithms add: - "--mca coll_tuned_allgather_algorithm sparbit" and/or - "--mca coll_tuned_allgatherv_algorithm sparbit" - Thanks to: Wilton Jaciel Loch , - and Guilherme Koslovski for their contribution. -- OFI: Delay patcher initialization until needed. It will now - be initialized only after the component is officially selected. -- Portable_platform file has been updated from GASNet. -- GCC versions < 4.8.1 are no longer supported. -- coll: Fix a bug with the libnbc MPI_AllReduce ring algorithm - when using MPI_IN_PLACE. -- Updated the usage of .gitmodules to use relative paths from - absolute paths. This allows the submodule cloning to use the same - protocol as OMPI cloning. Thanks to Felix Uhl - for the contribution. -- osc/rdma: Add local leader pid in shm file name to make it unique. -- ofi: Fix memory handler unregistration. This change fixes a - segfault during shutdown if the common/ofi component was built - as a dynamic object. -- osc/rdma: Add support for MPI minimum alignment key. -- memory_patcher: Add ability to detect patched memory. Thanks - to Rich Welch for the contribution. -- build: Improve handling of compiler version string. This - fixes a compiler error with clang and armclang. -- Fix bug where the relocation of OMPI packages caused - the launch to fail. -- Various improvements to MPI_AlltoAll algorithms for both - performance and memory usage. -- coll/basic: Fix segmentation fault in MPI_Alltoallw with - MPI_IN_PLACE. -- Added OpenPMIx binaries to build, including pmix_info. -- Added new OSHMEM APIs for v1.5. - Thanks to Mamzi Bayatpour for the contribution. -- OSC/UCX - Various fixes for dynamic windows. -- allocator/bucket: Add check for overflow. -- Fortran: Fixes for grequest return values and status. -- mtl/ofi: Call fi_setopt to state MPI p2p requirements for CUDA -- Fortran: Changed MPI_Waitall/array_of_requests/etc bindings from dimension(count) to dimension(*). -- coll/base/alltoall: Fixed send/recv offsets in rbuf for the case of MPI_IN_PLACE -- btl/uct: Updated the defaults to allow Mellanox HCAs (mlx4_0, and mlx5_0) for - osc/rdma to work out-of-the-box. -- build: Fix bug where internal PMIx/PRRTE configure was running - even if external requested. -- Fixed a segv when older HWLOCs are used. -- Various fixes for static builds. -- ofi/mtl: Fix component selection debug output - Thanks fo Samuel K. Gutierrez for the contribution. -- opal/mca/threads/pthreads: removed opal_pthread_mutex_t - cleans - up logic and code duplication. - Thanks to Shintaro Iwasaki for the contribution. -- opal/mca/threads/qhtreads: Added opal_recursive_mutex_t support. - Thanks to Jan Ciesko for the contribution. -- Various improvements to MPI_Test, MPI_Testall, and MPI_Testsome. -- MPI module: added the mpi_f08 TYPE(MPI_*) types. -- MPI_Op: Various performance improvements, and removed all ops with three buffers. - Thanks to George Katevenis for the report - and their contribution to the patch. -- Fixed Fortran preprocessor issue with CPPFLAGS. - Thanks to Jeff Hammond for reporting this issue. -- opal/atomics: Cleaned up and refactored these interfaces. -- btl/sm: Fixed crash on put/get to self. -- btl/base_am_rdma: fix handling of btl_send returning 1 -- Fixed bus error with btl/sm + XPMEM. - Thanks to George Katevenis for reporting this issue. -- scoll/ucc: fix long long reduction dtype. - Thanks to ferrol aderholdt for the - contribution. -- scoll/ucc: add onesided ucc support - Thanks to ferrol aderholdt for the - contribution. -- opal/common/ucx: enable OPAL memory hooks by default - This will provide compatibility to other transports. -- Fixed leak of sm_segment files in /dev/shm. -- ompi/contrib: Removed libompitrace. - This library was incomplete and unmaintained. If needed, it - is available in the v4/v4.1 series. - -4.1.2 -- November, 2021 ------------------------ - -- ROMIO portability fix for OpenBSD -- Fix handling of MPI_IN_PLACE with MPI_ALLTOALLW and improve performance - of MPI_ALLTOALL and MPI_ALLTOALLV for MPI_IN_PLACE. -- Fix one-sided issue with empty groups in Post-Start-Wait-Complete - synchronization mode. -- Fix Fortran status returns in certain use cases involving - Generalized Requests -- Romio datatype bug fixes. -- Fix oshmem_shmem_finalize() when main() returns non-zero value. -- Fix wrong affinity under LSF with the membind option. -- Fix count==0 cases in MPI_REDUCE and MPI_IREDUCE. -- Fix ssh launching on Bourne-flavored shells when the user has "set - -u" set in their shell startup files. -- Correctly process 0 slots with the mpirun --host option. -- Ensure to unlink and rebind socket when the Open MPI session - directory already exists. -- Fix a segv in mpirun --disable-dissable-map. -- Fix a potential hang in the memory hook handling. -- Slight performance improvement in MPI_WAITALL when running in - MPI_THREAD_MULTIPLE. -- Fix hcoll datatype mapping and rooted operation behavior. -- Correct some operations modifying MPI_Status.MPI_ERROR when it is - disallowed by the MPI standard. -- UCX updates: - - Fix datatype reference count issues. - - Detach dynamic window memory when freeing a window. - - Fix memory leak in datatype handling. -- Fix various atomic operations issues. -- mpirun: try to set the curses winsize to the pty of the spawned - task. Thanks to Stack Overflow user @Seriously for reporting the - issue. -- PMIx updates: - - Fix compatibility with external PMIx v4.x installations. - - Fix handling of PMIx v3.x compiler/linker flags. Thanks to Erik - Schnetter for reporting the issue. - - Skip SLURM-provided PMIx detection when appropriate. Thanks to - Alexander Grund for reporting the issue. -- Fix handling by C++ compilers when they #include the STL "" - header file, which ends up including Open MPI's text VERSION file - (which is not C code). Thanks to @srpgilles for reporting the - issue. -- Fix MPI_Op support for MPI_LONG. -- Make the MPI C++ bindings library (libmpi_cxx) explicitly depend on - the OPAL internal library (libopen-pal). Thanks to Ye Luo for - reporting the issue. -- Fix configure handling of "--with-libevent=/usr". -- Fix memory leak when opening Lustre files. Thanks to Bert Wesarg - for submitting the fix. -- Fix MPI_SENDRECV_REPLACE to correctly process datatype errors. - Thanks to Lisandro Dalcin for reporting the issue. -- Fix MPI_SENDRECV_REPLACE to correctly handle large data. Thanks - Jakub Benda for reporting this issue and suggesting a fix. -- Add workaround for TCP "dropped connection" errors to drastically - reduce the possibility of this happening. -- OMPIO updates: - - Fix handling when AMODE is not set. Thanks to Rainer Keller for - reporting the issue and supplying the fix. - - Fix FBTL "posix" component linking issue. Thanks for Honggang Li - for reporting the issue. - - Fixed segv with MPI_FILE_GET_BYTE_OFFSET on 0-sized file view. - - Thanks to GitHub user @shanedsnyder for submitting the issue. -- OFI updates: - - Multi-plane / Multi-Nic nic selection cleanups - - Add support for exporting Open MPI memory monitors into - Libfabric. - - Ensure that Cisco usNIC devices are never selected by the OFI - MTL. - - Fix buffer overflow in OFI networking setup. Thanks to Alexander - Grund for reporting the issue and supplying the fix. -- Fix SSEND on tag matching networks. -- Fix error handling in several MPI collectives. -- Fix the ordering of MPI_COMM_SPLIT_TYPE. Thanks to Wolfgang - Bangerth for raising the issue. -- No longer install the orted-mpir library (it's an internal / Libtool - convenience library). Thanks to Andrew Hesford for the fix. -- PSM2 updates: - - Allow advanced users to disable PSM2 version checking. - - Fix to allow non-default installation locations of psm2.h. - -4.1.1 -- April, 2021 --------------------- - -- Fix a number of datatype issues, including an issue with - improper handling of partial datatypes that could lead to - an unexpected application failure. -- Change UCX PML to not warn about MPI_Request leaks during - MPI_FINALIZE by default. The old behavior can be restored with - the mca_pml_ucx_request_leak_check MCA parameter. -- Reverted temporary solution that worked around launch issues in - SLURM v20.11.{0,1,2}. SchedMD encourages users to avoid these - versions and to upgrade to v20.11.3 or newer. -- Updated PMIx to v3.2.2. -- Fixed configuration issue on Apple Silicon observed with - Homebrew. Thanks to François-Xavier Coudert for reporting the issue. -- Disabled gcc built-in atomics by default on aarch64 platforms. -- Disabled UCX PML when UCX v1.8.0 is detected. UCX version 1.8.0 has a bug that - may cause data corruption when its TCP transport is used in conjunction with - the shared memory transport. UCX versions prior to v1.8.0 are not affected by - this issue. Thanks to @ksiazekm for reporting the issue. -- Fixed detection of available UCX transports/devices to better inform PML - prioritization. -- Fixed SLURM support to mark ORTE daemons as non-MPI tasks. -- Improved AVX detection to more accurately detect supported - platforms. Also improved the generated AVX code, and switched to - using word-based MCA params for the op/avx component (vs. numeric - big flags). -- Improved OFI compatibility support and fixed memory leaks in error - handling paths. -- Improved HAN collectives with support for Barrier and Scatter. Thanks - to @EmmanuelBRELLE for these changes and the relevant bug fixes. -- Fixed MPI debugger support (i.e., the MPIR_Breakpoint() symbol). - Thanks to @louisespellacy-arm for reporting the issue. -- Fixed ORTE bug that prevented debuggers from reading MPIR_Proctable. -- Removed PML uniformity check from the UCX PML to address performance - regression. -- Fixed MPI_Init_thread(3) statement about C++ binding and update - references about MPI_THREAD_MULTIPLE. Thanks to Andreas Lösel for - bringing the outdated docs to our attention. -- Added fence_nb to Flux PMIx support to address segmentation faults. -- Ensured progress of AIO requests in the POSIX FBTL component to - prevent exceeding maximum number of pending requests on MacOS. -- Used OPAL's mutli-thread support in the orted to leverage atomic - operations for object refcounting. -- Fixed segv when launching with static TCP ports. -- Fixed --debug-daemons mpirun CLI option. -- Fixed bug where mpirun did not honor --host in a managed job - allocation. -- Made a managed allocation filter a hostfile/hostlist. -- Fixed bug to marked a generalized request as pending once initiated. -- Fixed external PMIx v4.x check. -- Fixed OSHMEM build with `--enable-mem-debug`. -- Fixed a performance regression observed with older versions of GCC when - __ATOMIC_SEQ_CST is used. Thanks to @BiplabRaut for reporting the issue. -- Fixed buffer allocation bug in the binomial tree scatter algorithm when - non-contiguous datatypes are used. Thanks to @sadcat11 for reporting the issue. -- Fixed bugs related to the accumulate and atomics functionality in the - osc/rdma component. -- Fixed race condition in MPI group operations observed with - MPI_THREAD_MULTIPLE threading level. -- Fixed a deadlock in the TCP BTL's connection matching logic. -- Fixed pml/ob1 compilation error when CUDA support is enabled. -- Fixed a build issue with Lustre caused by unnecessary header includes. -- Fixed a build issue with IMB LSF workload manager. -- Fixed linker error with UCX SPML. - -4.1.0 -- December, 2020 ------------------------ - -- collectives: Add HAN and ADAPT adaptive collectives components. - Both components are off by default and can be enabled by specifying - "mpirun --mca coll_adapt_priority 100 --mca coll_han_priority 100 ...". - We intend to enable both by default in Open MPI 5.0. -- OMPIO is now the default for MPI-IO on all filesystems, including - Lustre (prior to this, ROMIO was the default for Lustre). Many - thanks to Mark Dixon for identifying MPI I/O issues and providing - access to Lustre systems for testing. -- Updates for macOS Big Sur. Thanks to FX Coudert for reporting this - issue and pointing to a solution. -- Minor MPI one-sided RDMA performance improvements. -- Fix hcoll MPI_SCATTERV with MPI_IN_PLACE. -- Add AVX support for MPI collectives. -- Updates to mpirun(1) about "slots" and PE=x values. -- Fix buffer allocation for large environment variables. Thanks to - @zrss for reporting the issue. -- Upgrade the embedded OpenPMIx to v3.2.2. -- Take more steps towards creating fully Reproducible builds (see - https://reproducible-builds.org/). Thanks Bernhard M. Wiedemann for - bringing this to our attention. -- Fix issue with extra-long values in MCA files. Thanks to GitHub - user @zrss for bringing the issue to our attention. -- UCX: Fix zero-sized datatype transfers. -- Fix --cpu-list for non-uniform modes. -- Fix issue in PMIx callback caused by missing memory barrier on Arm platforms. -- OFI MTL: Various bug fixes. -- Fixed issue where MPI_TYPE_CREATE_RESIZED would create a datatype - with unexpected extent on oddly-aligned datatypes. -- collectives: Adjust default tuning thresholds for many collective - algorithms -- runtime: fix situation where rank-by argument does not work -- Portals4: Clean up error handling corner cases -- runtime: Remove --enable-install-libpmix option, which has not - worked since it was added -- opal: Disable memory patcher component on MacOS -- UCX: Allow UCX 1.8 to be used with the btl uct -- UCX: Replace usage of the deprecated NB API of UCX with NBX -- OMPIO: Add support for the IME file system -- OFI/libfabric: Added support for multiple NICs -- OFI/libfabric: Added support for Scalable Endpoints -- OFI/libfabric: Added btl for one-sided support -- OFI/libfabric: Multiple small bugfixes -- libnbc: Adding numerous performance-improving algorithms - -4.0.6 -- March, 2021 ------------------------ -- Update embedded PMIx to 3.2.2. This update addresses several - MPI_COMM_SPAWN problems. -- Fix a problem when using Flux PMI and UCX. Thanks to Sami Ilvonen - for reporting and supplying a fix. -- Fix a problem with MPIR breakpoint being compiled out using PGI - compilers. Thanks to @louisespellacy-arm for reporting. -- Fix some ROMIO issues when using Lustre. Thanks to Mark Dixon for - reporting. -- Fix a problem using an external PMIx 4 to build Open MPI 4.0.x. -- Fix a compile problem when using the enable-timing configure option - and UCX. Thanks to Jan Bierbaum for reporting. -- Fix a symbol name collision when using the Cray compiler to build - Open SHMEM. Thanks to Pak Lui for reporting and fixing. -- Correct an issue encountered when building Open MPI under OSX Big Sur. - Thanks to FX Coudert for reporting. -- Various fixes to the OFI MTL. -- Fix an issue with allocation of sufficient memory for parsing long - environment variable values. Thanks to @zrss for reporting. -- Improve reproducibility of builds to assist Open MPI packages. - Thanks to Bernhard Wiedmann for bringing this to our attention. - -4.0.5 -- August, 2020 ---------------------- - -- Fix a problem with MPI RMA compare and swap operations. Thanks - to Wojciech Chlapek for reporting. -- Disable binding of MPI processes to system resources by Open MPI - if an application is launched using SLURM's srun command. -- Disable building of the Fortran mpi_f08 module when configuring - Open MPI with default 8 byte Fortran integer size. Thanks to - @ahcien for reporting. -- Fix a problem with mpirun when the --map-by option is used. - Thanks to Wenbin Lyu for reporting. -- Fix some issues with MPI one-sided operations uncovered using Global - Arrays regression test-suite. Thanks to @bjpalmer for reporting. -- Fix a problem with make check when using the PGI compiler. Thanks to - Carl Ponder for reporting. -- Fix a problem with MPI_FILE_READ_AT_ALL that could lead to application - hangs under certain circumstances. Thanks to Scot Breitenfeld for - reporting. -- Fix a problem building C++ applications with newer versions of GCC. - Thanks to Constantine Khrulev for reporting. - -4.0.4 -- June, 2020 -------------------- - -- Fix a memory patcher issue intercepting shmat and shmdt. This was - observed on RHEL 8.x ppc64le (see README for more info). -- Fix an illegal access issue caught using gcc's address sanitizer. - Thanks to Georg Geiser for reporting. -- Add checks to avoid conflicts with a libevent library shipped with LSF. -- Switch to linking against libevent_core rather than libevent, if present. -- Add improved support for UCX 1.9 and later. -- Fix an ABI compatibility issue with the Fortran 2008 bindings. - Thanks to Alastair McKinstry for reporting. -- Fix an issue with rpath of /usr/lib64 when building OMPI on - systems with Lustre. Thanks to David Shrader for reporting. -- Fix a memory leak occurring with certain MPI RMA operations. -- Fix an issue with ORTE's mapping of MPI processes to resources. - Thanks to Alex Margolin for reporting and providing a fix. -- Correct a problem with incorrect error codes being returned - by OMPI MPI_T functions. -- Fix an issue with debugger tools not being able to attach - to mpirun more than once. Thanks to Gregory Lee for reporting. -- Fix an issue with the Fortran compiler wrappers when using - NAG compilers. Thanks to Peter Brady for reporting. -- Fix an issue with the ORTE ssh based process launcher at scale. - Thanks to Benjamín Hernández for reporting. -- Address an issue when using shared MPI I/O operations. OMPIO will - now successfully return from the file open statement but will - raise an error if the file system does not supported shared I/O - operations. Thanks to Romain Hild for reporting. -- Fix an issue with MPI_WIN_DETACH. Thanks to Thomas Naughton for reporting. - -4.0.3 -- March, 2020 ------------------------ - -- Update embedded PMIx to 3.1.5 -- Add support for Mellanox ConnectX-6. -- Fix an issue in OpenMPI IO when using shared file pointers. - Thanks to Romain Hild for reporting. -- Fix a problem with Open MPI using a previously installed - Fortran mpi module during compilation. Thanks to Marcin - Mielniczuk for reporting -- Fix a problem with Fortran compiler wrappers ignoring use of - disable-wrapper-runpath configure option. Thanks to David - Shrader for reporting. -- Fixed an issue with trying to use mpirun on systems where neither - ssh nor rsh is installed. -- Address some problems found when using XPMEM for intra-node message - transport. -- Improve dimensions returned by MPI_Dims_create for certain - cases. Thanks to @aw32 for reporting. -- Fix an issue when sending messages larger than 4GB. Thanks to - Philip Salzmann for reporting this issue. -- Add ability to specify alternative module file path using - Open MPI's RPM spec file. Thanks to @jschwartz-cray for reporting. -- Clarify use of --with-hwloc configuration option in the README. - Thanks to Marcin Mielniczuk for raising this documentation issue. -- Fix an issue with shmem_atomic_set. Thanks to Sameh Sharkawi for reporting. -- Fix a problem with MPI_Neighbor_alltoall(v,w) for cartesian communicators - with cyclic boundary conditions. Thanks to Ralph Rabenseifner and - Tony Skjellum for reporting. -- Fix an issue using Open MPIO on 32 bit systems. Thanks to - Orion Poplawski for reporting. -- Fix an issue with NetCDF test deadlocking when using the vulcan - Open MPIO component. Thanks to Orion Poplawski for reporting. -- Fix an issue with the mpi_yield_when_idle parameter being ignored - when set in the Open MPI MCA parameter configuration file. - Thanks to @iassiour for reporting. -- Address an issue with Open MPIO when writing/reading more than 2GB - in an operation. Thanks to Richard Warren for reporting. - -4.0.2 -- September, 2019 ------------------------- - -- Update embedded PMIx to 3.1.4 -- Enhance Open MPI to detect when processes are running in - different name spaces on the same node, in which case the - vader CMA single copy mechanism is disabled. Thanks - to Adrian Reber for reporting and providing a fix. -- Fix an issue with ORTE job tree launch mechanism. Thanks - to @lanyangyang for reporting. -- Fix an issue with env processing when running as root. - Thanks to Simon Byrne for reporting and providing a fix. -- Fix Fortran MPI_FILE_GET_POSITION return code bug. - Thanks to Wei-Keng Liao for reporting. -- Fix user defined datatypes/ops leak in nonblocking base collective - component. Thanks to Andrey Maslennikov for verifying fix. -- Fixed shared memory not working with spawned processes. - Thanks to @rodarima for reporting. -- Fix data corruption of overlapping datatypes on sends. - Thanks to DKRZ for reporting. -- Fix segfault in oob_tcp component on close with active listeners. - Thanks to Orivej Desh for reporting and providing a fix. -- Fix divide by zero segfault in ompio. - Thanks to @haraldkl for reporting and providing a fix. -- Fix finalize of flux compnents. - Thanks to Stephen Herbein and Jim Garlick for providing a fix. -- Fix osc_rdma_acc_single_intrinsic regression. - Thanks to Joseph Schuchart for reporting and providing a fix. -- Fix hostnames with large integers. - Thanks to @perrynzhou for reporting and providing a fix. -- Fix Deadlock in MPI_Fetch_and_op when using UCX - Thanks to Joseph Schuchart for reporting. -- Fix the SLURM plm for mpirun-based launching. - Thanks to Jordon Hayes for reporting and providing a fix. -- Prevent grep failure in rpmbuild from aborting. - Thanks to Daniel Letai for reporting. -- Fix btl/vader finalize sequence. - Thanks to Daniel Vollmer for reporting. -- Fix pml/ob1 local handle sent during PUT control message. - Thanks to @EmmanuelBRELLE for reporting and providing a fix. -- Fix Memory leak with persistent MPI sends and the ob1 "get" protocol. - Thanks to @s-kuberski for reporting. -- v4.0.x: mpi: mark MPI_COMBINER_{HVECTOR,HINDEXED,STRUCT}_INTEGER - removed unless configured with --enable-mpi1-compatibility -- Fix make-authors.pl when run in a git submodule. - Thanks to Michael Heinz for reporting and providing a fix. -- Fix deadlock with mpi_assert_allow_overtaking in MPI_Issend. - Thanks to Joseph Schuchart and George Bosilca for reporting. -- Add compilation flag to allow unwinding through files that are - present in the stack when attaching with MPIR. - Thanks to James A Clark for reporting and providing a fix. - -Known issues: - -- There is a known issue with the OFI libfabric and PSM2 MTLs when trying to send - very long (> 4 GBytes) messages. In this release, these MTLs will catch - this case and abort the transfer. A future release will provide a - better solution to this issue. - -4.0.1 -- March, 2019 --------------------- - -- Update embedded PMIx to 3.1.2. -- Fix an issue with Vader (shared-memory) transport on OS-X. Thanks - to Daniel Vollmer for reporting. -- Fix a problem with the usNIC BTL Makefile. Thanks to George Marselis - for reporting. -- Fix an issue when using --enable-visibility configure option - and older versions of hwloc. Thanks to Ben Menadue for reporting - and providing a fix. -- Fix an issue with MPI_WIN_CREATE_DYNAMIC and MPI_GET from self. - Thanks to Bart Janssens for reporting. -- Fix an issue of excessive compiler warning messages from mpi.h - when using newer C++ compilers. Thanks to @Shadow-fax for - reporting. -- Fix a problem when building Open MPI using clang 5.0. -- Fix a problem with MPI_WIN_CREATE when using UCX. Thanks - to Adam Simpson for reporting. -- Fix a memory leak encountered for certain MPI datatype - destructor operations. Thanks to Axel Huebl for reporting. -- Fix several problems with MPI RMA accumulate operations. - Thanks to Jeff Hammond for reporting. -- Fix possible race condition in closing some file descriptors - during job launch using mpirun. Thanks to Jason Williams - for reporting and providing a fix. -- Fix a problem in OMPIO for large individual write operations. - Thanks to Axel Huebl for reporting. -- Fix a problem with parsing of map-by ppr options to mpirun. - Thanks to David Rich for reporting. -- Fix a problem observed when using the mpool hugepage component. Thanks - to Hunter Easterday for reporting and fixing. -- Fix valgrind warning generated when invoking certain MPI Fortran - data type creation functions. Thanks to @rtoijala for reporting. -- Fix a problem when trying to build with a PMIX 3.1 or newer - release. Thanks to Alastair McKinstry for reporting. -- Fix a problem encountered with building MPI F08 module files. - Thanks to Igor Andriyash and Axel Huebl for reporting. -- Fix two memory leaks encountered for certain MPI-RMA usage patterns. - Thanks to Joseph Schuchart for reporting and fixing. -- Fix a problem with the ORTE rmaps_base_oversubscribe MCA paramater. - Thanks to @iassiour for reporting. -- Fix a problem with UCX PML default error handler for MPI communicators. - Thanks to Marcin Krotkiewski for reporting. -- Fix various issues with OMPIO uncovered by the testmpio test suite. - -4.0.0 -- September, 2018 ------------------------- - -- OSHMEM updated to the OpenSHMEM 1.4 API. -- Do not build OpenSHMEM layer when there are no SPMLs available. - Currently, this means the OpenSHMEM layer will only build if - a MXM or UCX library is found. -- A UCX BTL was added for enhanced MPI RMA support using UCX -- With this release, OpenIB BTL now only supports iWarp and RoCE by default. -- Updated internal HWLOC to 2.0.2 -- Updated internal PMIx to 3.0.2 -- Change the priority for selecting external verses internal HWLOC - and PMIx packages to build. Starting with this release, configure - by default selects available external HWLOC and PMIx packages over - the internal ones. -- Updated internal ROMIO to 3.2.1. -- Removed support for the MXM MTL. -- Removed support for SCIF. -- Improved CUDA support when using UCX. -- Enable use of CUDA allocated buffers for OMPIO. -- Improved support for two phase MPI I/O operations when using OMPIO. -- Added support for Software-based Performance Counters, see - https://github.com/davideberius/ompi/wiki/How-to-Use-Software-Based-Performance-Counters-(SPCs)-in-Open-MPI -- Change MTL OFI from opting-IN on "psm,psm2,gni" to opting-OUT on - "shm,sockets,tcp,udp,rstream" -- Various improvements to MPI RMA performance when using RDMA - capable interconnects. -- Update memkind component to use the memkind 1.6 public API. -- Fix a problem with javadoc builds using OpenJDK 11. Thanks to - Siegmar Gross for reporting. -- Fix a memory leak using UCX. Thanks to Charles Taylor for reporting. -- Fix hangs in MPI_FINALIZE when using UCX. -- Fix a problem with building Open MPI using an external PMIx 2.1.2 - library. Thanks to Marcin Krotkiewski for reporting. -- Fix race conditions in Vader (shared memory) transport. -- Fix problems with use of newer map-by mpirun options. Thanks to - Tony Reina for reporting. -- Fix rank-by algorithms to properly rank by object and span -- Allow for running as root of two environment variables are set. - Requested by Axel Huebl. -- Fix a problem with building the Java bindings when using Java 10. - Thanks to Bryce Glover for reporting. -- Fix a problem with ORTE not reporting error messages if an application - terminated normally but exited with non-zero error code. Thanks to - Emre Brookes for reporting. - -3.1.6 -- March, 2020 --------------------- - -- Fix one-sided shared memory window configuration bug. -- Fix support for PGI'18 compiler. -- Fix issue with zero-length blockLength in MPI_TYPE_INDEXED. -- Fix run-time linker issues with OMPIO on newer Linux distros. -- Fix PMIX dstore locking compilation issue. Thanks to Marco Atzeri - for reporting the issue. -- Allow the user to override modulefile_path in the Open MPI SRPM, - even if install_in_opt is set to 1. -- Properly detect ConnectX-6 HCAs in the openib BTL. -- Fix segfault in the MTL/OFI initialization for large jobs. -- Fix issue to guarantee to properly release MPI one-sided lock when - using UCX transports to avoid a deadlock. -- Fix potential deadlock when processing outstanding transfers with - uGNI transports. -- Fix various portals4 control flow bugs. -- Fix communications ordering for alltoall and Cartesian neighborhood - collectives. -- Fix an infinite recursion crash in the memory patcher on systems - with glibc v2.26 or later (e.g., Ubuntu 18.04) when using certain - OS-bypass interconnects. - -3.1.5 -- November, 2019 ------------------------ - -- Fix OMPIO issue limiting file reads/writes to 2GB. Thanks to - Richard Warren for reporting the issue. -- At run time, automatically disable Linux cross-memory attach (CMA) - for vader BTL (shared memory) copies when running in user namespaces - (i.e., containers). Many thanks to Adrian Reber for raising the - issue and providing the fix. -- Sending very large MPI messages using the ofi MTL will fail with - some of the underlying Libfabric transports (e.g., PSM2 with - messages >=4GB, verbs with messages >=2GB). Prior version of Open - MPI failed silently; this version of Open MPI invokes the - appropriate MPI error handler upon failure. See - https://github.com/open-mpi/ompi/issues/7058 for more details. - Thanks to Emmanuel Thomé for raising the issue. -- Fix case where 0-extent datatypes might be eliminated during - optimization. Thanks to Github user @tjahns for raising the issue. -- Ensure that the MPIR_Breakpoint symbol is not optimized out on - problematic platforms. -- Fix MPI one-sided 32 bit atomic support. -- Fix OMPIO offset calculations with SEEK_END and SEEK_CUR in - MPI_FILE_GET_POSITION. Thanks to Wei-keng Liao for raising the - issue. -- Add "naive" regx component that will never fail, no matter how - esoteric the hostnames are. -- Fix corner case for datatype extent computations. Thanks to David - Dickenson for raising the issue. -- Allow individual jobs to set their map/rank/bind policies when - running LSF. Thanks to Nick R. Papior for assistance in solving the - issue. -- Fix MPI buffered sends with the "cm" PML. -- Properly propagate errors to avoid deadlocks in MPI one-sided operations. -- Update to PMIx v2.2.3. -- Fix data corruption in non-contiguous MPI accumulates over UCX. -- Fix ssh-based tree-based spawning at scale. Many thanks to Github - user @zrss for the report and diagnosis. -- Fix the Open MPI RPM spec file to not abort when grep fails. Thanks - to Daniel Letai for bringing this to our attention. -- Handle new SLURM CLI options (SLURM 19 deprecated some options that - Open MPI was using). Thanks to Jordan Hayes for the report and the - initial fix. -- OMPI: fix division by zero with an empty file view. -- Also handle shmat()/shmdt() memory patching with OS-bypass networks. -- Add support for unwinding info to all files that are present in the - stack starting from MPI_Init, which is helpful with parallel - debuggers. Thanks to James Clark for the report and initial fix. -- Fixed inadvertant use of bitwise operators in the MPI C++ bindings - header files. Thanks to Bert Wesarg for the report and the fix. - -3.1.4 -- April, 2019 --------------------- - -- Fix compile error when configured with --enable-mpi-java and - --with-devel-headers. Thanks to @g-raffy for reporting the issue - (** also appeared: v3.0.4). -- Only use hugepages with appropriate permissions. Thanks to Hunter - Easterday for the fix. -- Fix possible floating point rounding and division issues in OMPIO - which led to crashes and/or data corruption with very large data. - Thanks to Axel Huebl and René Widera for identifing the issue, - supplying and testing the fix (** also appeared: v3.0.4). -- Use static_cast<> in mpi.h where appropriate. Thanks to @shadow-fx - for identifying the issue (** also appeared: v3.0.4). -- Fix RMA accumulate of non-predefined datatypes with predefined - operators. Thanks to Jeff Hammond for raising the issue (** also - appeared: v3.0.4). -- Fix race condition when closing open file descriptors when launching - MPI processes. Thanks to Jason Williams for identifying the issue and - supplying the fix (** also appeared: v3.0.4). -- Fix support for external PMIx v3.1.x. -- Fix Valgrind warnings for some MPI_TYPE_CREATE_* functions. Thanks - to Risto Toijala for identifying the issue and supplying the fix (** - also appeared: v3.0.4). -- Fix MPI_TYPE_CREATE_F90_{REAL,COMPLEX} for r=38 and r=308 (** also - appeared: v3.0.4). -- Fix assembly issues with old versions of gcc (<6.0.0) that affected - the stability of shared memory communications (e.g., with the vader - BTL) (** also appeared: v3.0.4). -- Fix MPI_Allreduce crashes with some cases in the coll/spacc module. -- Fix the OFI MTL handling of MPI_ANY_SOURCE (** also appeared: - v3.0.4). -- Fix noisy errors in the openib BTL with regards to - ibv_exp_query_device(). Thanks to Angel Beltre and others who - reported the issue (** also appeared: v3.0.4). -- Fix zero-size MPI one-sided windows with UCX. - -3.1.3 -- October, 2018 ----------------------- - -- Fix race condition in MPI_THREAD_MULTIPLE support of non-blocking - send/receive path. -- Fix error handling SIGCHLD forwarding. -- Add support for CHARACTER and LOGICAL Fortran datatypes for MPI_SIZEOF. -- Fix compile error when using OpenJDK 11 to compile the Java bindings. -- Fix crash when using a hostfile with a 'user@host' line. -- Numerous Fortran '08 interface fixes. -- TCP BTL error message fixes. -- OFI MTL now will use any provider other than shm, sockets, tcp, udp, or - rstream, rather than only supporting gni, psm, and psm2. -- Disable async receive of CUDA buffers by default, fixing a hang - on large transfers. -- Support the BCM57XXX and BCM58XXX Broadcomm adapters. -- Fix minmax datatype support in ROMIO. -- Bug fixes in vader shared memory transport. -- Support very large buffers with MPI_TYPE_VECTOR. -- Fix hang when launching with mpirun on Cray systems. - -3.1.2 -- August, 2018 ------------------------- - -- A subtle race condition bug was discovered in the "vader" BTL - (shared memory communications) that, in rare instances, can cause - MPI processes to crash or incorrectly classify (or effectively drop) - an MPI message sent via shared memory. If you are using the "ob1" - PML with "vader" for shared memory communication (note that vader is - the default for shared memory communication with ob1), you need to - upgrade to v3.1.2 or later to fix this issue. You may also upgrade - to the following versions to fix this issue: - - Open MPI v2.1.5 (expected end of August, 2018) or later in the - v2.1.x series - - Open MPI v3.0.1 (released March, 2018) or later in the v3.0.x - series -- Assorted Portals 4.0 bug fixes. -- Fix for possible data corruption in MPI_BSEND. -- Move shared memory file for vader btl into /dev/shm on Linux. -- Fix for MPI_ISCATTER/MPI_ISCATTERV Fortran interfaces with MPI_IN_PLACE. -- Upgrade PMIx to v2.1.3. -- Numerous One-sided bug fixes. -- Fix for race condition in uGNI BTL. -- Improve handling of large number of interfaces with TCP BTL. -- Numerous UCX bug fixes. - -3.1.1 -- June, 2018 -------------------- - -- Fix potential hang in UCX PML during MPI_FINALIZE -- Update internal PMIx to v2.1.2rc2 to fix forward version compatibility. -- Add new MCA parameter osc_sm_backing_store to allow users to specify - where in the filesystem the backing file for the shared memory - one-sided component should live. Defaults to /dev/shm on Linux. -- Fix potential hang on non-x86 platforms when using builds with - optimization flags turned off. -- Disable osc/pt2pt when using MPI_THREAD_MULTIPLE due to numerous - race conditions in the component. -- Fix dummy variable names for the mpi and mpi_f08 Fortran bindings to - match the MPI standard. This may break applications which use - name-based parameters in Fortran which used our internal names - rather than those documented in the MPI standard. -- Revamp Java detection to properly handle new Java versions which do - not provide a javah wrapper. -- Fix RMA function signatures for use-mpi-f08 bindings to have the - asynchonous property on all buffers. -- Improved configure logic for finding the UCX library. - -3.1.0 -- May, 2018 ------------------- - -- Various OpenSHMEM bug fixes. -- Properly handle array_of_commands argument to Fortran version of - MPI_COMM_SPAWN_MULTIPLE. -- Fix bug with MODE_SEQUENTIAL and the sharedfp MPI-IO component. -- Use "javac -h" instead of "javah" when building the Java bindings - with a recent version of Java. -- Fix mis-handling of jostepid under SLURM that could cause problems - with PathScale/OmniPath NICs. -- Disable the POWER 7/BE block in configure. Note that POWER 7/BE is - still not a supported platform, but it is no longer automatically - disabled. See - https://github.com/open-mpi/ompi/issues/4349#issuecomment-374970982 - for more information. -- The output-filename option for mpirun is now converted to an - absolute path before being passed to other nodes. -- Add monitoring component for PML, OSC, and COLL to track data - movement of MPI applications. See - ompi/mca/commmon/monitoring/HowTo_pml_monitoring.tex for more - information about the monitoring framework. -- Add support for communicator assertions: mpi_assert_no_any_tag, - mpi_assert_no_any_source, mpi_assert_exact_length, and - mpi_assert_allow_overtaking. -- Update PMIx to version 2.1.1. -- Update hwloc to 1.11.7. -- Many one-sided behavior fixes. -- Improved performance for Reduce and Allreduce using Rabenseifner's algorithm. -- Revamped mpirun --help output to make it a bit more manageable. -- Portals4 MTL improvements: Fix race condition in rendezvous protocol and - retry logic. -- UCX OSC: initial implementation. -- UCX PML improvements: add multi-threading support. -- Yalla PML improvements: Fix error with irregular contiguous datatypes. -- Openib BTL: disable XRC support by default. -- TCP BTL: Add check to detect and ignore connections from processes - that aren't MPI (such as IDS probes) and verify that source and - destination are using the same version of Open MPI, fix issue with very - large message transfer. -- ompi_info parsable output now escapes double quotes in values, and - also quotes values can contains colons. Thanks to Lev Givon for the - suggestion. -- CUDA-aware support can now handle GPUs within a node that do not - support CUDA IPC. Earlier versions would get error and abort. -- Add a mca parameter ras_base_launch_orted_on_hn to allow for launching - MPI processes on the same node where mpirun is executing using a separate - orte daemon, rather than the mpirun process. This may be useful to set to - true when using SLURM, as it improves interoperability with SLURM's signal - propagation tools. By default it is set to false, except for Cray XC systems. -- Remove LoadLeveler RAS support. -- Remove IB XRC support from the OpenIB BTL due to lack of support. -- Add functionality for IBM s390 platforms. Note that regular - regression testing does not occur on the s390 and it is not - considered a supported platform. -- Remove support for big endian PowerPC. -- Remove support for XL compilers older than v13.1. -- Remove support for atomic operations using MacOS atomics library. - -3.0.6 -- March, 2020 --------------------- - -- Fix one-sided shared memory window configuration bug. -- Fix support for PGI'18 compiler. -- Fix run-time linker issues with OMPIO on newer Linux distros. -- Allow the user to override modulefile_path in the Open MPI SRPM, - even if install_in_opt is set to 1. -- Properly detect ConnectX-6 HCAs in the openib BTL. -- Fix segfault in the MTL/OFI initialization for large jobs. -- Fix various portals4 control flow bugs. -- Fix communications ordering for alltoall and Cartesian neighborhood - collectives. -- Fix an infinite recursion crash in the memory patcher on systems - with glibc v2.26 or later (e.g., Ubuntu 18.04) when using certain - OS-bypass interconnects. - -3.0.5 -- November, 2019 ------------------------ - -- Fix OMPIO issue limiting file reads/writes to 2GB. Thanks to - Richard Warren for reporting the issue. -- At run time, automatically disable Linux cross-memory attach (CMA) - for vader BTL (shared memory) copies when running in user namespaces - (i.e., containers). Many thanks to Adrian Reber for raising the - issue and providing the fix. -- Sending very large MPI messages using the ofi MTL will fail with - some of the underlying Libfabric transports (e.g., PSM2 with - messages >=4GB, verbs with messages >=2GB). Prior version of Open - MPI failed silently; this version of Open MPI invokes the - appropriate MPI error handler upon failure. See - https://github.com/open-mpi/ompi/issues/7058 for more details. - Thanks to Emmanuel Thomé for raising the issue. -- Fix case where 0-extent datatypes might be eliminated during - optimization. Thanks to Github user @tjahns for raising the issue. -- Ensure that the MPIR_Breakpoint symbol is not optimized out on - problematic platforms. -- Fix OMPIO offset calculations with SEEK_END and SEEK_CUR in - MPI_FILE_GET_POSITION. Thanks to Wei-keng Liao for raising the - issue. -- Fix corner case for datatype extent computations. Thanks to David - Dickenson for raising the issue. -- Fix MPI buffered sends with the "cm" PML. -- Update to PMIx v2.2.3. -- Fix ssh-based tree-based spawning at scale. Many thanks to Github - user @zrss for the report and diagnosis. -- Fix the Open MPI RPM spec file to not abort when grep fails. Thanks - to Daniel Letai for bringing this to our attention. -- Handle new SLURM CLI options (SLURM 19 deprecated some options that - Open MPI was using). Thanks to Jordan Hayes for the report and the - initial fix. -- OMPI: fix division by zero with an empty file view. -- Also handle shmat()/shmdt() memory patching with OS-bypass networks. -- Add support for unwinding info to all files that are present in the - stack starting from MPI_Init, which is helpful with parallel - debuggers. Thanks to James Clark for the report and initial fix. -- Fixed inadvertant use of bitwise operators in the MPI C++ bindings - header files. Thanks to Bert Wesarg for the report and the fix. -- Added configure option --disable-wrappers-runpath (alongside the - already-existing --disable-wrappers-rpath option) to prevent Open - MPI's configure script from automatically adding runpath CLI options - to the wrapper compilers. - -3.0.4 -- April, 2019 --------------------- - -- Fix compile error when configured with --enable-mpi-java and - --with-devel-headers. Thanks to @g-raffy for reporting the issue. -- Fix possible floating point rounding and division issues in OMPIO - which led to crashes and/or data corruption with very large data. - Thanks to Axel Huebl and René Widera for identifing the issue, - supplying and testing the fix (** also appeared: v3.0.4). -- Use static_cast<> in mpi.h where appropriate. Thanks to @shadow-fx - for identifying the issue. -- Fix datatype issue with RMA accumulate. Thanks to Jeff Hammond for - raising the issue. -- Fix RMA accumulate of non-predefined datatypes with predefined - operators. Thanks to Jeff Hammond for raising the issue. -- Fix race condition when closing open file descriptors when launching - MPI processes. Thanks to Jason Williams for identifying the issue and - supplying the fix. -- Fix Valgrind warnings for some MPI_TYPE_CREATE_* functions. Thanks - to Risto Toijala for identifying the issue and supplying the fix. -- Fix MPI_TYPE_CREATE_F90_{REAL,COMPLEX} for r=38 and r=308. -- Fix assembly issues with old versions of gcc (<6.0.0) that affected - the stability of shared memory communications (e.g., with the vader - BTL). -- Fix the OFI MTL handling of MPI_ANY_SOURCE. -- Fix noisy errors in the openib BTL with regards to - ibv_exp_query_device(). Thanks to Angel Beltre and others who - reported the issue. - -3.0.3 -- October, 2018 ----------------------- - -- Fix race condition in MPI_THREAD_MULTIPLE support of non-blocking - send/receive path. -- Fix error handling SIGCHLD forwarding. -- Add support for CHARACTER and LOGICAL Fortran datatypes for MPI_SIZEOF. -- Fix compile error when using OpenJDK 11 to compile the Java bindings. -- Fix crash when using a hostfile with a 'user@host' line. -- Numerous Fortran '08 interface fixes. -- TCP BTL error message fixes. -- OFI MTL now will use any provider other than shm, sockets, tcp, udp, or - rstream, rather than only supporting gni, psm, and psm2. -- Disable async receive of CUDA buffers by default, fixing a hang - on large transfers. -- Support the BCM57XXX and BCM58XXX Broadcomm adapters. -- Fix minmax datatype support in ROMIO. -- Bug fixes in vader shared memory transport. -- Support very large buffers with MPI_TYPE_VECTOR. -- Fix hang when launching with mpirun on Cray systems. -- Bug fixes in OFI MTL. -- Assorted Portals 4.0 bug fixes. -- Fix for possible data corruption in MPI_BSEND. -- Move shared memory file for vader btl into /dev/shm on Linux. -- Fix for MPI_ISCATTER/MPI_ISCATTERV Fortran interfaces with MPI_IN_PLACE. -- Upgrade PMIx to v2.1.4. -- Fix for Power9 built-in atomics. -- Numerous One-sided bug fixes. -- Fix for race condition in uGNI BTL. -- Improve handling of large number of interfaces with TCP BTL. -- Numerous UCX bug fixes. -- Add support for QLogic and Broadcom Cumulus RoCE HCAs to Open IB BTL. -- Add patcher support for aarch64. -- Fix hang on Power and ARM when Open MPI was built with low compiler - optimization settings. - -3.0.2 -- June, 2018 -------------------- - -- Disable osc/pt2pt when using MPI_THREAD_MULTIPLE due to numerous - race conditions in the component. -- Fix dummy variable names for the mpi and mpi_f08 Fortran bindings to - match the MPI standard. This may break applications which use - name-based parameters in Fortran which used our internal names - rather than those documented in the MPI standard. -- Fixed MPI_SIZEOF in the "mpi" Fortran module for the NAG compiler. -- Fix RMA function signatures for use-mpi-f08 bindings to have the - asynchonous property on all buffers. -- Fix Fortran MPI_COMM_SPAWN_MULTIPLE to properly follow the count - length argument when parsing the array_of_commands variable. -- Revamp Java detection to properly handle new Java versions which do - not provide a javah wrapper. -- Improved configure logic for finding the UCX library. -- Add support for HDR InfiniBand link speeds. -- Disable the POWER 7/BE block in configure. Note that POWER 7/BE is - still not a supported platform, but it is no longer automatically - disabled. See - https://github.com/open-mpi/ompi/issues/4349#issuecomment-374970982 - for more information. - -3.0.1 -- March, 2018 ----------------------- - -- Fix ability to attach parallel debuggers to MPI processes. -- Fix a number of issues in MPI I/O found by the HDF5 test suite. -- Fix (extremely) large message transfers with shared memory. -- Fix out of sequence bug in multi-NIC configurations. -- Fix stdin redirection bug that could result in lost input. -- Disable the LSF launcher if CSM is detected. -- Plug a memory leak in MPI_Mem_free(). Thanks to Philip Blakely for reporting. -- Fix the tree spawn operation when the number of nodes is larger than the radix. - Thanks to Carlos Eduardo de Andrade for reporting. -- Fix Fortran 2008 macro in MPI extensions. Thanks to Nathan T. Weeks for - reporting. -- Add UCX to list of interfaces that OpenSHMEM will use by default. -- Add --{enable|disable}-show-load-errors-by-default to control - default behavior of the load errors option. -- OFI MTL improvements: handle empty completion queues properly, fix - incorrect error message around fi_getinfo(), use default progress - option for provider by default, Add support for reading multiple - CQ events in ofi_progress. -- PSM2 MTL improvements: Allow use of GPU buffers, thread fixes. -- Numerous corrections to memchecker behavior. -- Add a mca parameter ras_base_launch_orted_on_hn to allow for launching - MPI processes on the same node where mpirun is executing using a separate - orte daemon, rather than the mpirun process. This may be useful to set to - true when using SLURM, as it improves interoperability with SLURM's signal - propagation tools. By default it is set to false, except for Cray XC systems. -- Fix a problem reported on the mailing separately by Kevin McGrattan and Stephen - Guzik about consistency issues on NFS file systems when using OMPIO. This fix - also introduces a new mca parameter fs_ufs_lock_algorithm which allows to - control the locking algorithm used by ompio for read/write operations. By - default, ompio does not perfom locking on local UNIX file systems, locks the - entire file per operation on NFS file systems, and selective byte-range - locking on other distributed file systems. -- Add an mca parameter pmix_server_usock_connections to allow mpirun to - support applications statically built against the Open MPI v2.x release, - or installed in a container along with the Open MPI v2.x libraries. It is - set to false by default. - -3.0.0 -- September, 2017 ------------------------- - -Major new features: - -- Use UCX allocator for OSHMEM symmetric heap allocations to optimize intra-node - data transfers. UCX SPML only. -- Use UCX multi-threaded API in the UCX PML. Requires UCX 1.0 or later. -- Added support for Flux PMI -- Update embedded PMIx to version 2.1.0 -- Update embedded hwloc to version 1.11.7 - -Changes in behavior compared to prior versions: - -- Per Open MPI's versioning scheme (see the README), increasing the - major version number to 3 indicates that this version is not - ABI-compatible with prior versions of Open MPI. In addition, there may - be differences in MCA parameter names and defaults from previous releases. - Command line options for mpirun and other commands may also differ from - previous versions. You will need to recompile MPI and OpenSHMEM applications - to work with this version of Open MPI. -- With this release, Open MPI supports MPI_THREAD_MULTIPLE by default. -- New configure options have been added to specify the locations of libnl - and zlib. -- A new configure option has been added to request Flux PMI support. -- The help menu for mpirun and related commands is now context based. - "mpirun --help compatibility" generates the help menu in the same format - as previous releases. - -Removed legacy support: -- AIX is no longer supported. -- Loadlever is no longer supported. -- OpenSHMEM currently supports the UCX and MXM transports via the ucx and ikrit - SPMLs respectively. -- Remove IB XRC support from the OpenIB BTL due to lack of support. -- Remove support for big endian PowerPC. -- Remove support for XL compilers older than v13.1 - -Known issues: - -- MPI_Connect/accept between applications started by different mpirun - commands will fail, even if ompi-server is running. - -2.1.5 -- August 2018 --------------------- - -- A subtle race condition bug was discovered in the "vader" BTL - (shared memory communications) that, in rare instances, can cause - MPI processes to crash or incorrectly classify (or effectively drop) - an MPI message sent via shared memory. If you are using the "ob1" - PML with "vader" for shared memory communication (note that vader is - the default for shared memory communication with ob1), you need to - upgrade to v2.1.5 to fix this issue. You may also upgrade to the - following versions to fix this issue: - - Open MPI v3.0.1 (released March, 2018) or later in the v3.0.x - series - - Open MPI v3.1.2 (expected end of August, 2018) or later -- A link issue was fixed when the UCX library was not located in the - linker-default search paths. - -2.1.4 -- August, 2018 ---------------------- - -Bug fixes/minor improvements: -- Disable the POWER 7/BE block in configure. Note that POWER 7/BE is - still not a supported platform, but it is no longer automatically - disabled. See - https://github.com/open-mpi/ompi/issues/4349#issuecomment-374970982 - for more information. -- Fix bug with request-based one-sided MPI operations when using the - "rdma" component. -- Fix issue with large data structure in the TCP BTL causing problems - in some environments. Thanks to @lgarithm for reporting the issue. -- Minor Cygwin build fixes. -- Minor fixes for the openib BTL: - - Support for the QLogic RoCE HCA - - Support for the Boradcom Cumulus RoCE HCA - - Enable support for HDR link speeds -- Fix MPI_FINALIZED hang if invoked from an attribute destructor - during the MPI_COMM_SELF destruction in MPI_FINALIZE. Thanks to - @AndrewGaspar for reporting the issue. -- Java fixes: - - Modernize Java framework detection, especially on OS X/MacOS. - Thanks to Bryce Glover for reporting and submitting the fixes. - - Prefer "javac -h" to "javah" to support newer Java frameworks. -- Fortran fixes: - - Use conformant dummy parameter names for Fortran bindings. Thanks - to Themos Tsikas for reporting and submitting the fixes. - - Build the MPI_SIZEOF() interfaces in the "TKR"-style "mpi" module - whenever possible. Thanks to Themos Tsikas for reporting the - issue. - - Fix array of argv handling for the Fortran bindings of - MPI_COMM_SPAWN_MULTIPLE (and its associated man page). - - Make NAG Fortran compiler support more robust in configure. -- Disable the "pt2pt" one-sided MPI component when MPI_THREAD_MULTIPLE - is used. This component is simply not safe in MPI_THREAD_MULTIPLE - scenarios, and will not be fixed in the v2.1.x series. -- Make the "external" hwloc component fail gracefully if it is tries - to use an hwloc v2.x.y installation. hwloc v2.x.y will not be - supported in the Open MPI v2.1.x series. -- Fix "vader" shared memory support for messages larger than 2GB. - Thanks to Heiko Bauke for the bug report. -- Configure fixes for external PMI directory detection. Thanks to - Davide Vanzo for the report. - -2.1.3 -- March, 2018 --------------------- - -Bug fixes/minor improvements: -- Update internal PMIx version to 1.2.5. -- Fix a problem with ompi_info reporting using param option. - Thanks to Alexander Pozdneev for reporting. -- Correct PMPI_Aint_{add|diff} to be functions (not subroutines) - in the Fortran mpi_f08 module. -- Fix a problem when doing MPI I/O using data types with large - extents in conjunction with MPI_TYPE_CREATE_SUBARRAY. Thanks to - Christopher Brady for reporting. -- Fix a problem when opening many files using MPI_FILE_OPEN. - Thanks to William Dawson for reporting. -- Fix a problem with debuggers failing to attach to a running job. - Thanks to Dirk Schubert for reporting. -- Fix a problem when using madvise and the OpenIB BTL. Thanks to - Timo Bingmann for reporting. -- Fix a problem in the Vader BTL that resulted in failures of - IMB under certain circumstances. Thanks to Nicolas Morey- - Chaisemartin for reporting. -- Fix a problem preventing Open MPI from working under Cygwin. - Thanks to Marco Atzeri for reporting. -- Reduce some verbosity being emitted by the USNIC BTL under certain - circumstances. Thanks to Peter Forai for reporting. -- Fix a problem with misdirection of SIGKILL. Thanks to Michael Fern - for reporting. -- Replace use of posix_memalign with malloc for small allocations. Thanks - to Ben Menaude for reporting. -- Fix a problem with Open MPI's out of band TCP network for file descriptors - greater than 32767. Thanks to Wojtek Wasko for reporting and fixing. -- Plug a memory leak in MPI_Mem_free(). Thanks to Philip Blakely for reporting. - -2.1.2 -- September, 2017 ------------------------- - -Bug fixes/minor improvements: -- Update internal PMIx version to 1.2.3. -- Fix some problems when using the NAG Fortran compiler to build Open MPI - and when using the compiler wrappers. Thanks to Neil Carlson for reporting. -- Fix a compilation problem with the SM BTL. Thanks to Paul Hargrove for - reporting. -- Fix a problem with MPI_IALLTOALLW when using zero-length messages. - Thanks to Dahai Guo for reporting. -- Fix a problem with C11 generic type interface for SHMEM_G. Thanks - to Nick Park for reporting. -- Switch to using the lustreapi.h include file when building Open MPI - with Lustre support. -- Fix a problem in the OB1 PML that led to hangs with OSU collective tests. -- Fix a progression issue with MPI_WIN_FLUSH_LOCAL. Thanks to - Joseph Schuchart for reporting. -- Fix an issue with recent versions of PBSPro requiring libcrypto. - Thanks to Petr Hanousek for reporting. -- Fix a problem when using MPI_ANY_SOURCE with MPI_SENDRECV. -- Fix an issue that prevented signals from being propagated to ORTE - daemons. -- Ensure that signals are forwarded from ORTE daemons to all processes - in the process group created by the daemons. Thanks to Ted Sussman - for reporting. -- Fix a problem with launching a job under a debugger. Thanks to - Greg Lee for reporting. -- Fix a problem with Open MPI native I/O MPI_FILE_OPEN when using - a communicator having an associated topology. Thanks to - Wei-keng Liao for reporting. -- Fix an issue when using MPI_ACCUMULATE with derived datatypes. -- Fix a problem with Fortran bindings that led to compilation errors - for user defined reduction operations. Thanks to Nathan Weeks for - reporting. -- Fix ROMIO issues with large writes/reads when using NFS file systems. -- Fix definition of Fortran MPI_ARGV_NULL and MPI_ARGVS_NULL. -- Enable use of the head node of a SLURM allocation on Cray XC systems. -- Fix a problem with synchronous sends when using the UCX PML. -- Use default socket buffer size to improve TCP BTL performance. -- Add a mca parameter ras_base_launch_orted_on_hn to allow for launching - MPI processes on the same node where mpirun is executing using a separate - orte daemon, rather than the mpirun process. This may be useful to set to - true when using SLURM, as it improves interoperability with SLURM's signal - propagation tools. By default it is set to false, except for Cray XC systems. -- Fix --without-lsf when lsf is installed in the default search path. -- Remove support for big endian PowerPC. -- Remove support for XL compilers older than v13.1 -- Remove IB XRC support from the OpenIB BTL due to loss of maintainer. - -2.1.1 -- April, 2017 --------------------- - -Bug fixes/minor improvements: - -- Fix a problem with one of Open MPI's fifo data structures which led to - hangs in a make check test. Thanks to Nicolas Morey-Chaisemartin for - reporting. -- Add missing MPI_AINT_ADD/MPI_AINT_DIFF function definitions to mpif.h. - Thanks to Aboorva Devarajan for reporting. -- Fix the error return from MPI_WIN_LOCK when rank argument is invalid. - Thanks to Jeff Hammond for reporting and fixing this issue. -- Fix a problem with mpirun/orterun when started under a debugger. Thanks - to Gregory Leff for reporting. -- Add configury option to disable use of CMA by the vader BTL. Thanks - to Sascha Hunold for reporting. -- Add configury check for MPI_DOUBLE_COMPLEX datatype support. - Thanks to Alexander Klein for reporting. -- Fix memory allocated by MPI_WIN_ALLOCATE_SHARED to - be 64 bit aligned. Thanks to Joseph Schuchart for - reporting. -- Update MPI_WTICK man page to reflect possibly higher - resolution than 10e-6. Thanks to Mark Dixon for - reporting -- Add missing MPI_T_PVAR_SESSION_NULL definition to mpi.h - include file. Thanks to Omri Mor for this contribution. -- Enhance the Open MPI spec file to install modulefile in /opt - if installed in a non-default location. Thanks to Kevin - Buckley for reporting and supplying a fix. -- Fix a problem with conflicting PMI symbols when linking statically. - Thanks to Kilian Cavalotti for reporting. - -Known issues (to be addressed in v2.1.2): - -- See the list of fixes slated for v2.1.2 here: - https://github.com/open-mpi/ompi/milestone/28 - -2.1.0 -- March, 2017 --------------------- - -Major new features: - -- The main focus of the Open MPI v2.1.0 release was to update to PMIx - v1.2.1. When using PMIx (e.g., via mpirun-based launches, or via - direct launches with recent versions of popular resource managers), - launch time scalability is improved, and the run time memory - footprint is greatly decreased when launching large numbers of MPI / - OpenSHMEM processes. -- Update OpenSHMEM API conformance to v1.3. -- The usnic BTL now supports MPI_THREAD_MULTIPLE. -- General/overall performance improvements to MPI_THREAD_MULTIPLE. -- Add a summary message at the bottom of configure that tells you many - of the configuration options specified and/or discovered by Open - MPI. - -Changes in behavior compared to prior versions: - -- None. - -Removed legacy support: - -- The ptmalloc2 hooks have been removed from the Open MPI code base. - This is not really a user-noticable change; it is only mentioned - here because there was much rejoycing in the Open MPI developer - community. - -Bug fixes/minor improvements: - -- New MCA parameters: - - iof_base_redirect_app_stderr_to_stdout: as its name implies, it - combines MPI / OpenSHMEM applications' stderr into its stdout - stream. - - opal_event_include: allow the user to specify which FD selection - mechanism is used by the underlying event engine. - - opal_stacktrace_output: indicate where stacktraces should be sent - upon MPI / OpenSHMEM process crashes ("none", "stdout", "stderr", - "file:filename"). - - orte_timeout_for_stack_trace: number of seconds to wait for stack - traces to be reported (or <=0 to wait forever). - - mtl_ofi_control_prog_type/mtl_ofi_data_prog_type: specify libfabric - progress model to be used for control and data. -- Fix MPI_WTICK regression where the time reported may be inaccurate - on systems with processor frequency scalaing enabled. -- Fix regression that lowered the memory maximum message bandwidth for - large messages on some BTL network transports, such as openib, sm, - and vader. -- Fix a name collision in the shared file pointer MPI IO file locking - scheme. Thanks to Nicolas Joly for reporting the issue. -- Fix datatype extent/offset errors in MPI_PUT and MPI_RACCUMULATE - when using the Portals 4 one-sided component. -- Add support for non-contiguous datatypes to the Portals 4 one-sided - component. -- Various updates for the UCX PML. -- Updates to the following man pages: - - mpirun(1) - - MPI_COMM_CONNECT(3) - - MPI_WIN_GET_NAME(3). Thanks to Nicolas Joly for reporting the - typo. - - MPI_INFO_GET_[NKEYS|NTHKEY](3). Thanks to Nicolas Joly for - reporting the typo. -- Fixed a problem in the TCP BTL when using MPI_THREAD_MULTIPLE. - Thanks to Evgueni Petrov for reporting. -- Fixed external32 representation in the romio314 module. Note that - for now, external32 representation is not correctly supported by the - ompio module. Thanks to Thomas Gastine for bringing this to our - attention. -- Add note how to disable a warning message about when a high-speed - MPI transport is not found. Thanks to Susan Schwarz for reporting - the issue. -- Ensure that sending SIGINT when using the rsh/ssh launcher does not - orphan children nodes in the launch tree. -- Fix the help message when showing deprecated MCA param names to show - the correct (i.e., deprecated) name. -- Enable support for the openib BTL to use multiple different - InfiniBand subnets. -- Fix a minor error in MPI_AINT_DIFF. -- Fix bugs with MPI_IN_PLACE handling in: - - MPI_ALLGATHER[V] - - MPI_[I][GATHER|SCATTER][V] - - MPI_IREDUCE[_SCATTER] - - Thanks to all the users who helped diagnose these issues. -- Allow qrsh to tree spawn (if the back-end system supports it). -- Fix MPI_T_PVAR_GET_INDEX to return the correct index. -- Correctly position the shared file pointer in append mode in the - OMPIO component. -- Add some deprecated names into shmem.h for backwards compatibility - with legacy codes. -- Fix MPI_MODE_NOCHECK support. -- Fix a regression in PowerPC atomics support. Thanks to Orion - Poplawski for reporting the issue. -- Fixes for assembly code with aggressively-optimized compilers on - x86_64/AMD64 platforms. -- Fix one more place where configure was mangling custom CFLAGS. - Thanks to Phil Tooley (@Telemin) for reporting the issue. -- Better handle builds with external installations of hwloc. -- Fixed a hang with MPI_PUT and MPI_WIN_LOCK_ALL. -- Fixed a bug when using MPI_GET on non-contiguous datatypes and - MPI_LOCK/MPI_UNLOCK. -- Fixed a bug when using POST/START/COMPLETE/WAIT after a fence. -- Fix configure portability by cleaning up a few uses of "==" with - "test". Thanks to Kevin Buckley for pointing out the issue. -- Fix bug when using darrays with lib and extent of darray datatypes. -- Updates to make Open MPI binary builds more bit-for-bit - reproducable. Thanks to Alastair McKinstry for the suggestion. -- Fix issues regarding persistent request handling. -- Ensure that shmemx.h is a standalone OpenSHMEM header file. Thanks - to Nick Park (@nspark) for the report. -- Ensure that we always send SIGTERM prior to SIGKILL. Thanks to Noel - Rycroft for the report. -- Added ConnectX-5 and Chelsio T6 device defaults for the openib BTL. -- OpenSHMEM no longer supports MXM less than v2.0. -- Plug a memory leak in ompi_osc_sm_free. Thanks to Joseph Schuchart - for the report. -- The "self" BTL now uses less memory. -- The vader BTL is now more efficient in terms of memory usage when - using XPMEM. -- Removed the --enable-openib-failover configure option. This is not - considered backwards-incompatible because this option was stale and - had long-since stopped working, anyway. -- Allow jobs launched under Cray aprun to use hyperthreads if - opal_hwloc_base_hwthreads_as_cpus MCA parameter is set. -- Add support for 32-bit and floating point Cray Aries atomic - operations. -- Add support for network AMOs for MPI_ACCUMULATE, MPI_FETCH_AND_OP, - and MPI_COMPARE_AND_SWAP if the "ompi_single_intrinsic" info key is - set on the window or the "acc_single_intrinsic" MCA param is set. -- Automatically disqualify RDMA CM support in the openib BTL if - MPI_THREAD_MULTIPLE is used. -- Make configure smarter/better about auto-detecting Linux CMA - support. -- Improve the scalability of MPI_COMM_SPLIT_TYPE. -- Fix the mixing of C99 and C++ header files with the MPI C++ - bindings. Thanks to Alastair McKinstry for the bug report. -- Add support for ARM v8. -- Several MCA parameters now directly support MPI_T enumerator - semantics (i.e., they accept a limited set of values -- e.g., MCA - parameters that accept boolean values). -- Added --with-libmpi-name=STRING configure option for vendor releases - of Open MPI. See the README for more detail. -- Fix a problem with Open MPI's internal memory checker. Thanks to Yvan - Fournier for reporting. -- Fix a multi-threaded issue with MPI_WAIT. Thanks to Pascal Deveze for - reporting. - -Known issues (to be addressed in v2.1.1): - -- See the list of fixes slated for v2.1.1 here: - https://github.com/open-mpi/ompi/milestone/26 - -2.0.4 -- November, 2017 ------------------------ - -Bug fixes/minor improvements: -- Fix an issue with visibility of functions defined in the built-in PMIx. - Thanks to Siegmar Gross for reporting this issue. -- Add configure check to prevent trying to build this release of - Open MPI with an external hwloc 2.0 or newer release. -- Add ability to specify layered providers for OFI MTL. -- Fix a correctness issue with Open MPI's memory manager code - that could result in corrupted message data. Thanks to - Valentin Petrov for reporting. -- Fix issues encountered when using newer versions of PBS Pro. - Thanks to Petr Hanousek for reporting. -- Fix a problem with MPI_GET when using the vader BTL. Thanks - to Dahai Guo for reporting. -- Fix a problem when using MPI_ANY_SOURCE with MPI_SENDRECV_REPLACE. - Thanks to Dahai Guo for reporting. -- Fix a problem using MPI_FILE_OPEN with a communicator with an - attached cartesian topology. Thanks to Wei-keng Liao for reporting. -- Remove IB XRC support from the OpenIB BTL due to lack of support. -- Remove support for big endian PowerPC. -- Remove support for XL compilers older than v13.1 - -2.0.3 -- June 2017 ------------------- - -Bug fixes/minor improvements: - - - Fix a problem with MPI_IALLTOALLW when zero size messages are present. - Thanks to @mathbird for reporting. - - Add missing MPI_USER_FUNCTION definition to the mpi_f08 module. - Thanks to Nathan Weeks for reporting this issue. - - Fix a problem with MPI_WIN_LOCK not returning an error code when - a negative rank is supplied. Thanks to Jeff Hammond for reporting and - providing a fix. - - Fix a problem with make check that could lead to hangs. Thanks to - Nicolas Morey-Chaisemartin for reporting. - - Resolve a symbol conflict problem with PMI-1 and PMI-2 PMIx components. - Thanks to Kilian Cavalotti for reporting this issue. - - Insure that memory allocations returned from MPI_WIN_ALLOCATE_SHARED are - 64 byte aligned. Thanks to Joseph Schuchart for reporting this issue. - - Make use of DOUBLE_COMPLEX, if available, for Fortran bindings. Thanks - to Alexander Klein for reporting this issue. - - Add missing MPI_T_PVAR_SESSION_NULL definition to Open MPI mpi.h include - file. Thanks to Omri Mor for reporting and fixing. - - Fix a problem with use of MPI shared file pointers when accessing - a file from independent jobs. Thanks to Nicolas Joly for reporting - this issue. - - Optimize zero size MPI_IALLTOALL{V,W} with MPI_IN_PLACE. Thanks to - Lisandro Dalcín for the report. - - Fix a ROMIO buffer overflow problem for large transfers when using NFS - filesystems. - - Fix type of MPI_ARGV[S]_NULL which prevented it from being used - properly with MPI_COMM_SPAWN[_MULTIPLE] in the mpi_f08 module. - - Ensure to add proper linker flags to the wrapper compilers for - dynamic libraries on platforms that need it (e.g., RHEL 7.3 and - later). - - Get better performance on TCP-based networks 10Gbps and higher by - using OS defaults for buffer sizing. - - Fix a bug with MPI_[R][GET_]ACCUMULATE when using DARRAY datatypes. - - Fix handling of --with-lustre configure command line argument. - Thanks to Prentice Bisbal and Tim Mattox for reporting the issue. - - Added MPI_AINT_ADD and MPI_AINT_DIFF declarations to mpif.h. Thanks - to Aboorva Devarajan (@AboorvaDevarajan) for the bug report. - - Fix a problem in the TCP BTL when Open MPI is initialized with - MPI_THREAD_MULTIPLE support. Thanks to Evgueni Petro for analyzing and - reporting this issue. - - Fix yalla PML to properly handle underflow errors, and fixed a - memory leak with blocking non-contiguous sends. - - Restored ability to run autogen.pl on official distribution tarballs - (although this is still not recommended for most users!). - - Fix accuracy problems with MPI_WTIME on some systems by always using - either clock_gettime(3) or gettimeofday(3). - - Fix a problem where MPI_WTICK was not returning a higher time resolution - when available. Thanks to Mark Dixon for reporting this issue. - - Restore SGE functionality. Thanks to Kevin Buckley for the initial - report. - - Fix external hwloc compilation issues, and extend support to allow - using external hwloc installations as far back as v1.5.0. Thanks to - Orion Poplawski for raising the issue. - - Added latest Mellanox Connect-X and Chelsio T-6 adapter part IDs to - the openib list of default values. - - Do a better job of cleaning up session directories (e.g., in /tmp). - - Update a help message to indicate how to suppress a warning about - no high performance networks being detected by Open MPI. Thanks to - Susan Schwarz for reporting this issue. - - Fix a problem with mangling of custom CFLAGS when configuring Open MPI. - Thanks to Phil Tooley for reporting. - - Fix some minor memory leaks and remove some unused variables. - Thanks to Joshua Gerrard for reporting. - - Fix MPI_ALLGATHERV bug with MPI_IN_PLACE. - -Known issues (to be addressed in v2.0.4): - -- See the list of fixes slated for v2.0.4 here: - https://github.com/open-mpi/ompi/milestone/29 - -2.0.2 -- 26 January 2017 -------------------------- - -Bug fixes/minor improvements: - -- Fix a problem with MPI_FILE_WRITE_SHARED when using MPI_MODE_APPEND and - Open MPI's native MPI-IO implementation. Thanks to Nicolas Joly for - reporting. -- Fix a typo in the MPI_WIN_GET_NAME man page. Thanks to Nicolas Joly - for reporting. -- Fix a race condition with ORTE's session directory setup. Thanks to - @tbj900 for reporting this issue. -- Fix a deadlock issue arising from Open MPI's approach to catching calls to - munmap. Thanks to Paul Hargrove for reporting and helping to analyze this - problem. -- Fix a problem with PPC atomics which caused make check to fail unless builtin - atomics configure option was enabled. Thanks to Orion Poplawski for reporting. -- Fix a problem with use of x86_64 cpuid instruction which led to segmentation - faults when Open MPI was configured with -O3 optimization. Thanks to Mark - Santcroos for reporting this problem. -- Fix a problem when using built in atomics configure options on PPC platforms - when building 32 bit applications. Thanks to Paul Hargrove for reporting. -- Fix a problem with building Open MPI against an external hwloc installation. - Thanks to Orion Poplawski for reporting this issue. -- Remove use of DATE in the message queue version string reported to debuggers to - insure bit-wise reproducibility of binaries. Thanks to Alastair McKinstry - for help in fixing this problem. -- Fix a problem with early exit of a MPI process without calling MPI_FINALIZE - or MPI_ABORT that could lead to job hangs. Thanks to Christof Koehler for - reporting. -- Fix a problem with forwarding of SIGTERM signal from mpirun to MPI processes - in a job. Thanks to Noel Rycroft for reporting this problem -- Plug some memory leaks in MPI_WIN_FREE discovered using Valgrind. Thanks - to Joseph Schuchart for reporting. -- Fix a problems MPI_NEIGHOR_ALLTOALL when using a communicator with an empty topology - graph. Thanks to Daniel Ibanez for reporting. -- Fix a typo in a PMIx component help file. Thanks to @njoly for reporting this. -- Fix a problem with Valgrind false positives when using Open MPI's internal memchecker. - Thanks to Yvan Fournier for reporting. -- Fix a problem with MPI_FILE_DELETE returning MPI_SUCCESS when - deleting a non-existent file. Thanks to Wei-keng Liao for reporting. -- Fix a problem with MPI_IMPROBE that could lead to hangs in subsequent MPI - point to point or collective calls. Thanks to Chris Pattison for reporting. -- Fix a problem when configure Open MPI for powerpc with --enable-mpi-cxx - enabled. Thanks to Alastair McKinstry for reporting. -- Fix a problem using MPI_IALLTOALL with MPI_IN_PLACE argument. Thanks to - Chris Ward for reporting. -- Fix a problem using MPI_RACCUMULATE with the Portals4 transport. Thanks to - @PDeveze for reporting. -- Fix an issue with static linking and duplicate symbols arising from PMIx - Slurm components. Thanks to Limin Gu for reporting. -- Fix a problem when using MPI dynamic memory windows. Thanks to - Christoph Niethammer for reporting. -- Fix a problem with Open MPI's pkgconfig files. Thanks to Alastair McKinstry - for reporting. -- Fix a problem with MPI_IREDUCE when the same buffer is supplied for the - send and recv buffer arguments. Thanks to Valentin Petrov for reporting. -- Fix a problem with atomic operations on PowerPC. Thanks to Paul - Hargrove for reporting. - -Known issues (to be addressed in v2.0.3): - -- See the list of fixes slated for v2.0.3 here: - https://github.com/open-mpi/ompi/milestone/23 - -2.0.1 -- 2 September 2016 ------------------------ - -Bug fixes/minor improvements: - -- Short message latency and message rate performance improvements for - all transports. -- Fix shared memory performance when using RDMA-capable networks. - Thanks to Tetsuya Mishima and Christoph Niethammer for reporting. -- Fix bandwith performance degredation in the yalla (MXM) PML. Thanks - to Andreas Kempf for reporting the issue. -- Fix OpenSHMEM crash when running on non-Mellanox MXM-based networks. - Thanks to Debendra Das for reporting the issue. -- Fix a crash occuring after repeated calls to MPI_FILE_SET_VIEW with - predefined datatypes. Thanks to Eric Chamberland and Matthew - Knepley for reporting and helping chase down this issue. -- Fix stdin propagation to MPI processes. Thanks to Jingchao Zhang - for reporting the issue. -- Fix various runtime and portability issues by updating the PMIx - internal component to v1.1.5. -- Fix process startup failures on Intel MIC platforms due to very - large entries in /proc/mounts. -- Fix a problem with use of relative path for specifing executables to - mpirun/oshrun. Thanks to David Schneider for reporting. -- Various improvements when running over portals-based networks. -- Fix thread-based race conditions with GNI-based networks. -- Fix a problem with MPI_FILE_CLOSE and MPI_FILE_SET_SIZE. Thanks - to Cihan Altinay for reporting. -- Remove all use of rand(3) from within Open MPI so as not to perturb - applications use of it. Thanks to Matias Cabral and Noel Rycroft - for reporting. -- Fix crash in MPI_COMM_SPAWN. -- Fix types for MPI_UNWEIGHTED and MPI_WEIGHTS_EMPTY. Thanks to - Lisandro Dalcín for reporting. -- Correctly report the name of MPI_INTEGER16. -- Add some missing MPI constants to the Fortran bindings. -- Fixed compile error when configuring Open MPI with --enable-timing. -- Correctly set the shared library version of libompitrace.so. Thanks - to Alastair McKinstry for reporting. -- Fix errors in the MPI_RPUT, MPI_RGET, MPI_RACCUMULATE, and - MPI_RGET_ACCUMULATE Fortran bindings. Thanks to Alfio Lazzaro and - Joost VandeVondele for tracking this down. -- Fix problems with use of derived datatypes in non-blocking - collectives. Thanks to Yuki Matsumoto for reporting. -- Fix problems with OpenSHMEM header files when using CMake. Thanks to - Paul Kapinos for reporting the issue. -- Fix problem with use use of non-zero lower bound datatypes in - collectives. Thanks to Hristo Iliev for reporting. -- Fix a problem with memory allocation within MPI_GROUP_INTERSECTION. - Thanks to Lisandro Dalcín for reporting. -- Fix an issue with MPI_ALLGATHER for communicators that don't consist - of two ranks. Thanks to David Love for reporting. -- Various fixes for collectives when used with esoteric MPI datatypes. -- Fixed corner cases of handling DARRAY and HINDEXED_BLOCK datatypes. -- Fix a problem with filesystem type check for OpenBSD. - Thanks to Paul Hargrove for reporting. -- Fix some debug input within Open MPI internal functions. Thanks to - Durga Choudhury for reporting. -- Fix a typo in a configury help message. Thanks to Paul Hargrove for - reporting. -- Correctly support MPI_IN_PLACE in MPI_[I]ALLTOALL[V|W] and - MPI_[I]EXSCAN. -- Fix alignment issues on SPARC platforms. - -Known issues (to be addressed in v2.0.2): - -- See the list of fixes slated for v2.0.2 here: - https://github.com/open-mpi/ompi/milestone/20, and - https://github.com/open-mpi/ompi-release/milestone/19 - (note that the "ompi-release" Github repo will be folded/absorbed - into the "ompi" Github repo at some point in the future) - - -2.0.0 -- 12 July 2016 ---------------------- - - ********************************************************************** - * Open MPI is now fully MPI-3.1 compliant - ********************************************************************** - -Major new features: - -- Many enhancements to MPI RMA. Open MPI now maps MPI RMA operations - on to native RMA operations for those networks which support this - capability. -- Greatly improved support for MPI_THREAD_MULTIPLE (when configured - with --enable-mpi-thread-multiple). -- Enhancements to reduce the memory footprint for jobs at scale. A - new MCA parameter, "mpi_add_procs_cutoff", is available to set the - threshold for using this feature. -- Completely revamped support for memory registration hooks when using - OS-bypass network transports. -- Significant OMPIO performance improvements and many bug fixes. -- Add support for PMIx - Process Management Interface for Exascale. - Version 1.1.2 of PMIx is included internally in this release. -- Add support for PLFS file systems in Open MPI I/O. -- Add support for UCX transport. -- Simplify build process for Cray XC systems. Add support for - using native SLURM. -- Add a --tune mpirun command line option to simplify setting many - environment variables and MCA parameters. -- Add a new MCA parameter "orte_default_dash_host" to offer an analogue - to the existing "orte_default_hostfile" MCA parameter. -- Add the ability to specify the number of desired slots in the mpirun - --host option. - -Changes in behavior compared to prior versions: - -- In environments where mpirun cannot automatically determine the - number of slots available (e.g., when using a hostfile that does not - specify "slots", or when using --host without specifying a ":N" - suffix to hostnames), mpirun now requires the use of "-np N" to - specify how many MPI processes to launch. -- The MPI C++ bindings -- which were removed from the MPI standard in - v3.0 -- are no longer built by default and will be removed in some - future version of Open MPI. Use the --enable-mpi-cxx-bindings - configure option to build the deprecated/removed MPI C++ bindings. -- ompi_info now shows all components, even if they do not have MCA - parameters. The prettyprint output now separates groups with a - dashed line. -- OMPIO is now the default implementation of parallel I/O, with the - exception for Lustre parallel filesystems (where ROMIO is still the - default). The default selection of OMPI vs. ROMIO can be controlled - via the "--mca io ompi|romio" command line switch to mpirun. -- Per Open MPI's versioning scheme (see the README), increasing the - major version number to 2 indicates that this version is not - ABI-compatible with prior versions of Open MPI. You will need to - recompile MPI and OpenSHMEM applications to work with this version - of Open MPI. -- Removed checkpoint/restart code due to loss of maintainer. :-( -- Change the behavior for handling certain signals when using PSM and - PSM2 libraries. Previously, the PSM and PSM2 libraries would trap - certain signals in order to generate tracebacks. The mechanism was - found to cause issues with Open MPI's own error reporting mechanism. - If not already set, Open MPI now sets the IPATH_NO_BACKTRACE and - HFI_NO_BACKTRACE environment variables to disable PSM/PSM2's - handling these signals. - -Removed legacy support: - -- Removed support for OS X Leopard. -- Removed support for Cray XT systems. -- Removed VampirTrace. -- Removed support for Myrinet/MX. -- Removed legacy collective module:ML. -- Removed support for Alpha processors. -- Removed --enable-mpi-profiling configure option. - -Known issues (to be addressed in v2.0.1): - -- See the list of fixes slated for v2.0.1 here: - https://github.com/open-mpi/ompi/milestone/16, and - https://github.com/open-mpi/ompi-release/milestone/16 - (note that the "ompi-release" Github repo will be folded/absorbed - into the "ompi" Github repo at some point in the future) - -- ompi-release#986: Fix data size counter for large ops with fcoll/static -- ompi-release#987: Fix OMPIO performance on Lustre -- ompi-release#1013: Fix potential inconsistency in btl/openib default settings -- ompi-release#1014: Do not return MPI_ERR_PENDING from collectives -- ompi-release#1056: Remove dead profile code from oshmem -- ompi-release#1081: Fix MPI_IN_PLACE checking for IALLTOALL{V|W} -- ompi-release#1081: Fix memchecker in MPI_IALLTOALLW -- ompi-release#1081: Support MPI_IN_PLACE in MPI_(I)ALLTOALLW and MPI_(I)EXSCAN -- ompi-release#1107: Allow future PMIx support for RM spawn limits -- ompi-release#1108: Fix sparse group process reference counting -- ompi-release#1109: If specified to be oversubcribed, disable binding -- ompi-release#1122: Allow NULL arrays for empty datatypes -- ompi-release#1123: Fix signed vs. unsigned compiler warnings -- ompi-release#1123: Make max hostname length uniform across code base -- ompi-release#1127: Fix MPI_Compare_and_swap -- ompi-release#1127: Fix MPI_Win_lock when used with MPI_Win_fence -- ompi-release#1132: Fix typo in help message for --enable-mca-no-build -- ompi-release#1154: Ensure pairwise coll algorithms disqualify themselves properly -- ompi-release#1165: Fix typos in debugging/verbose message output -- ompi-release#1178: Fix ROMIO filesystem check on OpenBSD 5.7 -- ompi-release#1197: Fix Fortran pthread configure check -- ompi-release#1205: Allow using external PMIx 1.1.4 and 2.0 -- ompi-release#1215: Fix configure to support the NAG Fortran compiler -- ompi-release#1220: Fix combiner args for MPI_HINDEXED_BLOCK -- ompi-release#1225: Fix combiner args for MPI_DARRAY -- ompi-release#1226: Disable old memory hooks with recent gcc versions -- ompi-release#1231: Fix new "patcher" support for some XLC platforms -- ompi-release#1244: Fix Java error handling -- ompi-release#1250: Ensure TCP is not selected for RDMA operations -- ompi-release#1252: Fix verbose output in coll selection -- ompi-release#1253: Set a default name for user-defined MPI_Op -- ompi-release#1254: Add count==0 checks in some non-blocking colls -- ompi-release#1258: Fix "make distclean" when using external pmix/hwloc/libevent -- ompi-release#1260: Clean up/uniform mca/coll/base memory management -- ompi-release#1261: Remove "patcher" warning message for static builds -- ompi-release#1263: Fix IO MPI_Request for 0-size read/write -- ompi-release#1264: Add blocking fence for SLURM operations - -Bug fixes / minor enhancements: - -- Updated internal/embedded copies of third-party software: - - Update the internal copy of ROMIO to that which shipped in MPICH - 3.1.4. - - Update internal copy of libevent to v2.0.22. - - Update internal copy of hwloc to v1.11.2. -- Notable new MCA parameters: - - opal_progress_lp_call_ration: Control how often low-priority - callbacks are made during Open MPI's main progress loop. - - opal_common_verbs_want_fork_support: This replaces the - btl_openib_want_fork_support parameter. -- Add --with-platform-patches-dir configure option. -- Add --with-pmi-libdir configure option for environments that install - PMI libs in a non-default location. -- Various configure-related compatibility updates for newer versions - of libibverbs and OFED. -- Numerous fixes/improvements to orte-dvm. Special thanks to Mark - Santcroos for his help. -- Fix a problem with timer code on ia32 platforms. Thanks to - Paul Hargrove for reporting this and providing a patch. -- Fix a problem with use of a 64 bit atomic counter. Thanks to - Paul Hargrove for reporting. -- Fix a problem with singleton job launching. Thanks to Lisandro - Dalcín for reporting. -- Fix a problem with use of MPI_UNDEFINED with MPI_COMM_SPLIT_TYPE. - Thanks to Lisandro Dalcín for reporting. -- Silence a compiler warning in PSM MTL. Thanks to Adrian Reber for - reporting this. -- Properly detect Intel TrueScale and OmniPath devices in the ACTIVE - state. Thanks to Durga Choudhury for reporting the issue. -- Fix detection and use of Solaris Studio 12.5 (beta) compilers. - Thanks to Paul Hargrove for reporting and debugging. -- Fix various small memory leaks. -- Allow NULL arrays when creating empty MPI datatypes. -- Replace use of alloca with malloc for certain datatype creation - functions. Thanks to Bogdan Sataric for reporting this. -- Fix use of MPI_LB and MPI_UB in creation of of certain MPI datatypes. - Thanks to Gus Correa for helping to fix this. -- Implement a workaround for a GNU Libtool problem. Thanks to Eric - Schnetter for reporting and fixing. -- Improve hcoll library detection in configure. Thanks to David - Shrader and Åke Sandgren for reporting this. -- Miscellaneous minor bug fixes in the hcoll component. -- Miscellaneous minor bug fixes in the ugni component. -- Fix problems with XRC detection in OFED 3.12 and older releases. - Thanks to Paul Hargrove for his analysis of this problem. -- Update (non-standard/experimental) Java MPI interfaces to support - MPI-3.1 functionality. -- Fix an issue with MCA parameters for Java bindings. Thanks to - Takahiro Kawashima and Siegmar Gross for reporting this issue. -- Fix a problem when using persistent requests in the Java bindings. - Thanks to Nate Chambers for reporting. -- Fix problem with Java bindings on OX X 10.11. Thanks to Alexander - Daryin for reporting this issue. -- Fix a performance problem for large messages for Cray XC systems. - Thanks to Jerome Vienne for reporting this. -- Fix an issue with MPI_WIN_LOCK_ALL. Thanks to Thomas Jahns for - reporting. -- Fix an issue with passing a parameter to configure multiple times. - Thanks to QuesarVII for reporting and supplying a fix. -- Add support for ALPS resource allocation system on Cray CLE 5.2 and - later. Thanks to Mark Santcroos. -- Corrections to the HACKING file. Thanks to Maximilien Levesque. -- Fix an issue with user supplied reduction operator functions. - Thanks to Rupert Nash for reporting this. -- Fix an issue with an internal list management function. Thanks to - Adrian Reber for reporting this. -- Fix a problem with MPI-RMA PSCW epochs. Thanks to Berk Hess for - reporting this. -- Fix a problem in neighborhood collectives. Thanks to Lisandro - Dalcín for reporting. -- Fix MPI_IREDUCE_SCATTER_BLOCK for a one-process communicator. Thanks - to Lisandro Dalcín for reporting. -- Add (Open MPI-specific) additional flavors to MPI_COMM_SPLIT_TYPE. - See MPI_Comm_split_type(3) for details. Thanks to Nick Andersen for - supplying this enhancement. -- Improve closing of file descriptors during the job launch phase. - Thanks to Piotr Lesnicki for reporting and providing this - enhancement. -- Fix a problem in MPI_GET_ACCUMULATE and MPI_RGET_ACCUMULATE when - using Portals4. Thanks to Nicolas Chevalier for reporting. -- Use correct include file for lstat prototype in ROMIO. Thanks to - William Throwe for finding and providing a fix. -- Add missing Fortran bindings for MPI_WIN_ALLOCATE. Thanks to Christoph - Niethammer for reporting and fixing. -- Fortran related fixes to handle Intel 2016 compiler. Thanks to - Fabrice Roy for reporting this. -- Fix a Fortran linkage issue. Thanks to Macro Atzeri for finding and - suggesting a fix. -- Fix problem with using BIND(C) for Fortran bindings with logical - parameters. Thanks to Paul Romano for reporting. -- Fix an issue with use of DL-related macros in opal library. Thanks to - Scott Atchley for finding this. -- Fix an issue with parsing mpirun command line options which contain - colons. Thanks to Lev Given for reporting. -- Fix a problem with Open MPI's package configury files. Thanks to - Christoph Junghans for reporting. -- Fix a typo in the MPI_INTERCOMM_MERGE man page. Thanks To Harald - Servat for reporting and correcting. -- Update man pages for non-blocking sends per MPI 3.1 standard. - Thanks to Alexander Pozdneev for reporting. -- Fix problem when compiling against PVFS2. Thanks to Dave Love for - reporting. -- Fix problems with MPI_NEIGHBOR_ALLTOALL{V,W}. Thanks to Willem - Vermin for reporting this issue. -- Fix various compilation problems on Cygwin. Thanks to Marco Atzeri - for supplying these fixes. -- Fix problem with resizing of subarray and darray data types. Thanks - to Keith Bennett and Dan Garmann for reporting. -- Fix a problem with MPI_COMBINER_RESIZED. Thanks to James Ramsey for - the report. -- Fix an hwloc binding issue. Thanks to Ben Menadue for reporting. -- Fix a problem with the shared memory (sm) BTL. Thanks to Peter Wind - for the report. -- Fixes for heterogeneous support. Thanks to Siegmar Gross for reporting. -- Fix a problem with memchecker. Thanks to Clinton Simpson for reporting. -- Fix a problem with MPI_UNWEIGHTED in topology functions. Thanks to - Jun Kudo for reporting. -- Fix problem with a MCA parameter base filesystem types. Thanks to - Siegmar Gross for reporting. -- Fix a problem with some windows info argument types. Thanks to - Alastair McKinstry for reporting. - - -1.10.7 - 16 May 2017 ------- -- Fix bug in TCP BTL that impacted performance on 10GbE (and faster) - networks by not adjusting the TCP send/recv buffer sizes and using - system default values -- Add missing MPI_AINT_ADD and MPI_AINT_DIFF function delcarations in - mpif.h -- Fixed time reported by MPI_WTIME; it was previously reported as - dependent upon the CPU frequency. -- Fix platform detection on FreeBSD -- Fix a bug in the handling of MPI_TYPE_CREATE_DARRAY in - MPI_(R)(GET_)ACCUMULATE -- Fix openib memory registration limit calculation -- Add missing MPI_T_PVAR_SESSION_NULL in mpi.h -- Fix "make distcheck" when using external hwloc and/or libevent packages -- Add latest ConnectX-5 vendor part id to OpenIB device params -- Fix race condition in the UCX PML -- Fix signal handling for rsh launcher -- Fix Fortran compilation errors by removing MPI_SIZEOF in the Fortran - interfaces when the compiler does not support it -- Fixes for the pre-ignore-TKR "mpi" Fortran module implementation - (i.e., for older Fortran compilers -- these problems did not exist - in the "mpi" module implementation for modern Fortran compilers): - - Add PMPI_* interfaces - - Fix typo in MPI_FILE_WRITE_AT_ALL_BEGIN interface name - - Fix typo in MPI_FILE_READ_ORDERED_BEGIN interface name -- Fixed the type of MPI_DISPLACEMENT_CURRENT in all Fortran interfaces - to be an INTEGER(KIND=MPI_OFFSET_KIND). -- Fixed typos in MPI_INFO_GET_* man pages. Thanks to Nicolas Joly for - the patch -- Fix typo bugs in wrapper compiler script - - -1.10.6 - 17 Feb 2017 ------- -- Fix bug in timer code that caused problems at optimization settings - greater than 2 -- OSHMEM: make mmap allocator the default instead of sysv or verbs -- Support MPI_Dims_create with dimension zero -- Update USNIC support -- Prevent 64-bit overflow on timer counter -- Add support for forwarding signals -- Fix bug that caused truncated messages on large sends over TCP BTL -- Fix potential infinite loop when printing a stacktrace - - -1.10.5 - 19 Dec 2016 ------- -- Update UCX APIs -- Fix bug in darray that caused MPI/IO failures -- Use a MPI_Get_library_version() like string to tag the debugger DLL. - Thanks to Alastair McKinstry for the report -- Fix multi-threaded race condition in coll/libnbc -- Several fixes to OSHMEM -- Fix bug in UCX support due to uninitialized field -- Fix MPI_Ialltoallv with MPI_IN_PLACE and without MPI param check -- Correctly reset receive request type before init. Thanks Chris Pattison - for the report and test case. -- Fix bug in iallgather[v] -- Fix concurrency issue with MPI_Comm_accept. Thanks to Pieter Noordhuis - for the patch -- Fix ompi_coll_base_{gather,scatter}_intra_binomial -- Fixed an issue with MPI_Type_get_extent returning the wrong extent - for distributed array datatypes. -- Re-enable use of rtdtsc instruction as a monotonic clock source if - the processor has a core-invariant tsc. This is a partial fix for a - performance regression introduced in Open MPI v1.10.3. - - -1.10.4 - 01 Sept 2016 ------- - -- Fix assembler support for MIPS -- Improve memory handling for temp buffers in collectives -- Fix [all]reduce with non-zero lower bound datatypes - Thanks Hristo Iliev for the report -- Fix non-standard ddt handling. Thanks Yuki Matsumoto for the report -- Various libnbc fixes. Thanks Yuki Matsumoto for the report -- Fix typos in request RMA bindings for Fortran. Thanks to @alazzaro - and @vondele for the assist -- Various bug fixes and enhancements to collective support -- Fix predefined types mapping in hcoll -- Revive the coll/sync component to resolve unexpected message issues - during tight loops across collectives -- Fix typo in wrapper compiler for Fortran static builds - - -1.10.3 - 15 June 2016 ------- - -- Fix zero-length datatypes. Thanks to Wei-keng Liao for reporting - the issue. -- Minor manpage cleanups -- Implement atomic support in OSHMEM/UCX -- Fix support of MPI_COMBINER_RESIZED. Thanks to James Ramsey - for the report -- Fix computation of #cpus when --use-hwthread-cpus is used -- Add entry points for Allgatherv, iAllgatherv, Reduce, and iReduce - for the HCOLL library -- Fix an HCOLL integration bug that could signal completion of request - while still being worked -- Fix computation of cores when SMT is enabled. Thanks to Ben Menadue - for the report -- Various USNIC fixes -- Create a datafile in the per-proc directory in order to make it - unique per communicator. Thanks to Peter Wind for the report -- Fix zero-size malloc in one-sided pt-to-pt code. Thanks to Lisandro - Dalcín for the report -- Fix MPI_Get_address when passed MPI_BOTTOM to not return an error. - Thanks to Lisandro Dalcín for the report -- Fix MPI_TYPE_SET_ATTR with NULL value. Thanks to Lisandro Dalcín for - the report -- Fix various Fortran08 binding issues -- Fix memchecker no-data case. Thanks to Clinton Stimpson for the report -- Fix CUDA support under OS-X -- Fix various OFI/MTL integration issues -- Add MPI_T man pages -- Fix one-sided pt-to-pt issue by preventing communication from happening - before a target enters a fence, even in the no-precede case -- Fix a bug that disabled Totalview for MPMD use-case -- Correctly support MPI_UNWEIGHTED in topo-graph-neighbors. Thanks to - Jun Kudo for the report -- Fix singleton operations under SLURM when PMI2 is enabled -- Do not use MPI_IN_PLACE in neighborhood collectives for non-blocking - collectives (libnbc). Thanks to Jun Kudo for the report -- Silence autogen deprecation warnings for newer versions of Perl -- Do not return MPI_ERR_PENDING from collectives -- Use type int* for MPI_WIN_DISP_UNIT, MPI_WIN_CREATE_FLAVOR, and MPI_WIN_MODEL. - Thanks to Alastair McKinstry for the report -- Fix register_datarep stub function in IO/OMPIO. Thanks to Eric - Chamberland for the report -- Fix a bus error on MPI_WIN_[POST,START] in the shared memory one-sided component -- Add several missing MPI_WIN_FLAVOR constants to the Fortran support -- Enable connecting processes from different subnets using the openib BTL -- Fix bug in basic/barrier algorithm in OSHMEM -- Correct process binding for the --map-by node case -- Include support for subnet-to-subnet routing over InfiniBand networks -- Fix usnic resource check -- AUTHORS: Fix an errant reference to Subversion IDs -- Fix affinity for MPMD jobs running under LSF -- Fix many Fortran binding bugs -- Fix `MPI_IN_PLACE`-related bugs -- Fix PSM/PSM2 support for singleton operations -- Ensure MPI transports continue to progress during RTE barriers -- Update HWLOC to 1.9.1 end-of-series -- Fix a bug in the Java command line parser when the - -Djava.library.path options was given by the user -- Update the MTL/OFI provider selection behavior -- Add support for clock_gettime on Linux. -- Correctly detect and configure for Solaris Studio 12.5 - beta compilers -- Correctly compute #slots when -host is used for MPMD case -- Fix a bug in the hcoll collectives due to an uninitialized field -- Do not set a binding policy when oversubscribing a node -- Fix hang in intercommunicator operations when oversubscribed -- Speed up process termination during MPI_Abort -- Disable backtrace support by default in the PSM/PSM2 libraries to - prevent unintentional conflicting behavior. - - - -1.10.2: 26 Jan 2016 -------------------- - - ********************************************************************** - * OSHMEM is now 1.2 compliant - ********************************************************************** - -- Fix NBC_Copy for legitimate zero-size messages -- Fix multiple bugs in OSHMEM -- Correctly handle mpirun --host @ -- Centralize two MCA params to avoid duplication between OMPI and - OSHMEM layers: opal_abort_delay and opal_abort_print_stack -- Add support for Fujitsu compilers -- Add UCX support for OMPI and OSHMEM -- Correctly handle oversubscription when not given directives - to permit it. Thanks to @ammore1 for reporting it -- Fix rpm spec file to not include the /usr directory -- Add Intel HFI1 default parameters for the openib BTL -- Resolve symbol conflicts in the PSM2 library -- Add ability to empty the rgpusm cache when full if requested -- Fix another libtool bug when -L requires a space between it - and the path. Thanks to Eric Schnetter for the patch. -- Add support for OSHMEM v1.2 APIs -- Improve efficiency of oshmem_preconnect_all algorithm -- Fix bug in buffered sends support -- Fix double free in edge case of mpirun. Thanks to @jsharpe for - the patch -- Multiple one-sided support fixes -- Fix integer overflow in the tuned "reduce" collective when - using buffers larger than INT_MAX in size -- Fix parse of user environment variables in mpirun. Thanks to - Stefano Garzarella for the patch -- Performance improvements in PSM2 support -- Fix NBS iBarrier for inter-communicators -- Fix bug in vader BTL during finalize -- Improved configure support for Fortran compilers -- Fix rank_file mapper to support default --slot-set. Thanks - to Matt Thompson for reporting it -- Update MPI_Testsome man page. Thanks to Eric Schnetter for - the suggestion -- Fix missing resize of the returned type for subarray and - darray types. Thanks to Keith Bennett and Dan Garmann for - reporting it -- Fix Java support on OSX 10.11. Thanks to Alexander Daryin - for reporting the problem -- Fix some compilation issues on Solaris 11.2. Thanks to - Paul Hargrove for his continued help in such areas - - -1.10.1: 4 Nov 2015 ------------------- - -- Workaround an optimization problem with gcc compilers >= 4.9.2 that - causes problems with memory registration, and forced - mpi_leave_pinned to default to 0 (i.e., off). Thanks to @oere for - the fix. -- Fix use of MPI_LB and MPI_UB in subarray and darray datatypes. - Thanks to Gus Correa and Dimitar Pashov for pointing out the issue. -- Minor updates to mpi_show_mpi_alloc_mem_leaks and - ompi_debug_show_handle_leaks functionality. -- Fix segv when invoking non-blocking reductions with a user-defined - operation. Thanks to Rupert Nash and Georg Geiser for identifying - the issue. -- No longer probe for PCI topology on Solaris (unless running as root). -- Fix for Intel Parallel Studio 2016 ifort partial support of the - !GCC$ pragma. Thanks to Fabrice Roy for reporting the problem. -- Bunches of Coverity / static analysis fixes. -- Fixed ROMIO to look for lstat in . Thanks to William - Throwe for submitting the patch both upstream and to Open MPI. -- Fixed minor memory leak when attempting to open plugins. -- Fixed type in MPI_IBARRIER C prototype. Thanks to Harald Servat for - reporting the issue. -- Add missing man pages for MPI_WIN_CREATE_DYNAMIC, MPI_WIN_ATTACH, - MPI_WIN_DETACH, MPI_WIN_ALLOCATE, MPI_WIN_ALLOCATE_SHARED. -- When mpirun-launching new applications, only close file descriptors - that are actually open (resulting in a faster launch in some - environments). -- Fix "test ==" issues in Open MPI's configure script. Thank to Kevin - Buckley for pointing out the issue. -- Fix performance issue in usnic BTL: ensure progress thread is - throttled back to not aggressively steal CPU cycles. -- Fix cache line size detection on POWER architectures. -- Add missing #include in a few places. Thanks to Orion Poplawski for - supplying the patch. -- When OpenSHMEM building is disabled, no longer install its header - files, help files, or man pages. Add man pages for oshrun, oshcc, - and oshfort. -- Fix mpi_f08 implementations of MPI_COMM_SET_INFO, and profiling - versions of MPI_BUFFER_DETACH, MPI_WIN_ALLOCATE, - MPI_WIN_ALLOCATE_SHARED, MPI_WTICK, and MPI_WTIME. -- Add orte_rmaps_dist_device MCA param, allowing users to map near a - specific device. -- Various updates/fixes to the openib BTL. -- Add missing defaults for the Mellanox ConnectX 3 card to the openib BTL. -- Minor bug fixes in the OFI MTL. -- Various updates to Mellanox's MXM, hcoll, and FCA components. -- Add OpenSHMEM man pages. Thanks to Tony Curtis for sharing the man - pages files from openshmem.org. -- Add missing "const" attributes to MPI_COMPARE_AND_SWAP, - MPI_FETCH_AND_OP, MPI_RACCUMULATE, and MPI_WIN_DETACH prototypes. - Thanks to Michael Knobloch and Takahiro Kawashima for bringing this - to our attention. -- Fix linking issues on some platforms (e.g., SLES 12). -- Fix hang on some corner cases when MPI applications abort. -- Add missing options to mpirun man page. Thanks to Daniel Letai - for bringing this to our attention. -- Add new --with-platform-patches-dir configure option -- Adjust relative selection priorities to ensure that MTL - support is favored over BTL support when both are available -- Use CUDA IPC for all sized messages for performance - - -1.10.0: 25 Aug 2015 -------------------- - -** NOTE: The v1.10.0 release marks the transition to Open MPI's new -** version numbering scheme. The v1.10.x release series is based on -** the v1.8.x series, but with a few new features. v2.x will be the -** next series after the v1.10.x series, and complete the transition -** to the new version numbering scheme. See README for more details -** on the new versioning scheme. -** -** NOTE: In accordance with OMPI version numbering, the v1.10 is *not* -** API compatible with the v1.8 release series. - -- Added libfabric support (see README for more details): - - usNIC BTL updated to use libfabric. - - Added OFI MTL (usable with PSM in libfabric v1.1.0). -- Added Intel Omni-Path support via new PSM2 MTL. -- Added "yalla" PML for faster MXM support. -- Removed support for MX -- Added persistent distributed virtual machine (pDVM) support for fast - workflow executions. -- Fixed typo in GCC inline assembly introduced in Open MPI v1.8.8. - Thanks to Paul Hargrove for pointing out the issue. -- Add missing man pages for MPI_Win_get|set_info(3). -- Ensure that session directories are cleaned up at the end of a run. -- Fixed linking issues on some OSs where symbols of dependent - libraries are not automatically publicly available. -- Improve hcoll and fca configury library detection. Thanks to David - Shrader for helping track down the issue. -- Removed the LAMA mapper (for use in setting affinity). Its - functionality has been largely superseded by other mpirun CLI - options. -- CUDA: Made the asynchronous copy mode be the default. -- Fix a malloc(0) warning in MPI_IREDUCE_SCATTER_BLOCK. Thanks to - Lisandro Dalcín for reporting the issue. -- Fix typo in MPI_Scatter(3) man page. Thanks to Akshay Venkatesh for - noticing the mistake. -- Add rudimentary protection from TCP port scanners. -- Fix typo in Open MPI error handling. Thanks to Åke Sandgren for - pointing out the error. -- Increased the performance of the CM PML (i.e., the Portals, PSM, - PSM2, MXM, and OFI transports). -- Restored visibility of blocking send requests in message queue - debuggers (e.g., TotalView, DDT). -- Fixed obscure IPv6-related bug in the TCP BTL. -- Add support for the "no_locks" MPI_Info key for one-sided - functionality. -- Fixed ibv_fork support for verbs-based networks. -- Fixed a variety of small bugs in OpenSHMEM. -- Fixed MXM configure with additional CPPFLAGS and LDFLAGS. Thanks to - David Shrader for the patch. -- Fixed incorrect memalign threshhold in the openib BTL. Thanks to - Xavier Besseron for pointing out the issue. - - -1.8.8: 5 Aug 2015 ------------------ - -- Fix a segfault in MPI_FINALIZE with the PSM MTL. -- Fix mpi_f08 sentinels (e.g., MPI_STATUS_IGNORE) handling. -- Set some additional MXM default values for OSHMEM. -- Fix an invalid memory access in MPI_MRECV and MPI_IMRECV. -- Include two fixes that were mistakenly left out of the official - v1.8.7 tarball: - - Fixed MPI_WIN_POST and MPI_WIN_START for zero-size messages - - Protect the OOB TCP ports from segfaulting when accessed by port - scanners - - -1.8.7: 15 Jul 2015 ------------------- - -** NOTE: v1.8.7 technically breaks ABI with prior versions -** in the 1.8 series because it repairs two incorrect API -** signatures. However, users will only need to recompile -** if they were using those functions - which they couldn't -** have been, because the signatures were wrong :-) - -- Plugged a memory leak that impacted blocking sends -- Fixed incorrect declaration for MPI_T_pvar_get_index and added - missing return code MPI_T_INVALID_NAME. -- Fixed an uninitialized variable in PMI2 support -- Added new vendor part id for Mellanox ConnectX4-LX -- Fixed NBC_Copy for legitimate zero-size messages -- Fixed MPI_Win_post and MPI_Win_start for zero-size messages -- Protect the OOB ports from segfaulting when accessed by port scanners -- Fixed several Fortran typos -- Fixed configure detection of XRC support -- Fixed support for highly heterogeneous systems to avoid - memory corruption when printing out the bindings - -1.8.6: 17 Jun 2015 ------------------- - -- Fixed memory leak on Mac OS-X exposed by TCP keepalive -- Fixed keepalive support to ensure that daemon/node failure - results in complete job cleanup -- Update Java binding support -- Fixed MPI_THREAD_MULTIPLE bug in vader shared memory BTL -- Fixed issue during shutdown when CUDA initialization wasn't complete -- Fixed orted environment when no prefix given -- Fixed trivial typo in MPI_Neighbor_allgather manpage -- Fixed tree-spawn support for sh and ksh shells -- Several data type fixes -- Fixed IPv6 support bug -- Cleaned up an unlikely build issue -- Fixed PMI2 process map parsing for cyclic mappings -- Fixed memalign threshold in openib BTL -- Fixed debugger access to message queues for blocking send/recv - - -1.8.5: 5 May 2015 ------------------ - -- Fixed configure problems in some cases when using an external hwloc - installation. Thanks to Erick Schnetter for reporting the error and - helping track down the source of the problem. -- Fixed linker error on OS X when using the clang compiler. Thanks to - Erick Schnetter for reporting the error and helping track down the - source of the problem. -- Fixed MPI_THREAD_MULTIPLE deadlock error in the vader BTL. Thanks - to Thomas Klimpel for reporting the issue. -- Fixed several Valgrind warnings. Thanks for Lisandro Dalcín for - contributing a patch fixing some one-sided code paths. -- Fixed version compatibility test in OOB that broke ABI within the - 1.8 series. NOTE: this will not resolve the problem between pre-1.8.5 - versions, but will fix it going forward. -- Fix some issues related to running on Intel Xeon Phi coprocessors. -- Opportunistically switch away from using GNU Libtool's libltdl - library when possible (by default). -- Fix some VampirTrace errors. Thanks to Paul Hargrove for reporting - the issues. -- Correct default binding patterns when --use-hwthread-cpus was - specified and nprocs <= 2. -- Fix warnings about -finline-functions when compiling with clang. -- Updated the embedded hwloc with several bug fixes, including the - "duplicate Lhwloc1 symbol" that multiple users reported on some - platforms. -- Do not error when mpirun is invoked with with default bindings - (i.e., no binding was specified), and one or more nodes do not - support bindings. Thanks to Annu Desari for pointing out the - problem. -- Let root invoke "mpirun --version" to check the version without - printing the "Don't run as root!" warnings. Thanks to Robert McLay - for the suggestion. -- Fixed several bugs in OpenSHMEM support. -- Extended vader shared memory support to 32-bit architectures. -- Fix handling of very large datatypes. Thanks to Bogdan Sataric for - the bug report. -- Fixed a bug in handling subarray MPI datatypes, and a bug when using - MPI_LB and MPI_UB. Thanks to Gus Correa for pointing out the issue. -- Restore user-settable bandwidth and latency PML MCA variables. -- Multiple bug fixes for cleanup during MPI_FINALIZE in unusual - situations. -- Added support for TCP keepalive signals to ensure timely termination - when sockets between daemons cannot be created (e.g., due to a - firewall). -- Added MCA parameter to allow full use of a SLURM allocation when - started from a tool (supports LLNL debugger). -- Fixed several bugs in the configure logic for PMI and hwloc. -- Fixed incorrect interface index in TCP communications setup. Thanks - to Mark Kettenis for spotting the problem and providing a patch. -- Fixed MPI_IREDUCE_SCATTER with single-process communicators when - MPI_IN_PLACE was not used. -- Added XRC support for OFED v3.12 and higher. -- Various updates and bug fixes to the Mellanox hcoll collective - support. -- Fix problems with Fortran compilers that did not support - REAL*16/COMPLEX*32 types. Thanks to Orion Poplawski for identifying - the issue. -- Fixed problem with rpath/runpath support in pkg-config files. - Thanks to Christoph Junghans for notifying us of the issue. -- Man page fixes: - - Removed erroneous "color" discussion from MPI_COMM_SPLIT_TYPE. - Thanks to Erick Schnetter for spotting the outdated text. - - Fixed prototypes for MPI_IBARRIER. Thanks to Maximilian for - finding the issue. - - Updated docs about buffer usage in non-blocking communications. - Thanks to Alexander Pozdneev for citing the outdated text. - - Added documentation about the 'ompi_unique' MPI_Info key with - MPI_PUBLISH_NAME. - - Fixed typo in MPI_INTERCOMM_MERGE. Thanks to Harald Servat for - noticing and sending a patch. - - Updated configure paths in HACKING. Thanks to Maximilien Levesque - for the fix. - - Fixed Fortran typo in MPI_WIN_LOCK_ALL. Thanks to Thomas Jahns - for pointing out the issue. -- Fixed a number of MPI one-sided bugs. -- Fixed MPI_COMM_SPAWN when invoked from a singleton job. -- Fixed a number of minor issues with CUDA support, including - registering of shared memory and supporting reduction support for - GPU buffers. -- Improved support for building OMPI on Cray platforms. -- Fixed performance regression introduced by the inadvertent default - enabling of MPI_THREAD_MULTIPLE support. - - -1.8.4: 19 Dec 2014 ------------------- - -- Fix MPI_SIZEOF; now available in mpif.h for modern Fortran compilers - (see README for more details). Also fixed various compiler/linker - errors. -- Fixed inadvertant Fortran ABI break between v1.8.1 and v1.8.2 in the - mpi interface module when compiled with gfortran >= v4.9. -- Fix various MPI_THREAD_MULTIPLE issues in the TCP BTL. -- mpirun no longer requires the --hetero-nodes switch; it will - automatically detect when running in heterogeneous scenarios. -- Update LSF support, to include revamped affinity functionality. -- Update embedded hwloc to v1.9.1. -- Fixed max registerable memory computation in the openib BTL. -- Updated error message when debuggers are unable to find various - symbols/types to be more clear. Thanks to Dave Love for raising the - issue. -- Added proper support for LSF and PBS/Torque libraries in static builds. -- Rankfiles now support physical processor IDs. -- Fixed potential hang in MPI_ABORT. -- Fixed problems with the PSM MTL and "re-connect" scenarios, such as - MPI_INTERCOMM_CREATE. -- Fix MPI_IREDUCE_SCATTER with a single process. -- Fix (rare) race condition in stdout/stderr funneling to mpirun where - some trailing output could get lost when a process terminated. -- Removed inadvertent change that set --enable-mpi-thread-multiple "on" - by default, thus impacting performance for non-threaded apps. -- Significantly reduced startup time by optimizing internal hash table - implementation. -- Fixed OS X linking with the Fortran mpi module when used with - gfortran >= 4.9. Thanks to Github user yafshar for raising the - issue. -- Fixed memory leak on Cygwin platforms. Thanks for Marco Atzeri for - reporting the issue. -- Fixed seg fault in neighborhood collectives when the degree of the - topology is higher than the communicator size. Thanks to Lisandro - Dalcín for reporting the issue. -- Fixed segfault in neighborhood collectives under certain use-cases. -- Fixed various issues regarding Solaris support. Thanks to Siegmar - Gross for patiently identifying all the issues. -- Fixed PMI configure tests for certain Slurm installation patterns. -- Fixed param registration issue in Java bindings. Thanks to Takahiro - Kawashima and Siegmar Gross for identifying the issue. -- Several man page fixes. -- Silence several warnings and close some memory leaks (more remain, - but it's better than it was). -- Re-enabled the use of CMA and knem in the shared memory BTL. -- Updated mpirun manpage to correctly explain new map/rank/binding options. -- Fixed MPI_IALLGATHER problem with intercommunicators. Thanks for - Takahiro Kawashima for the patch. -- Numerous updates and performance improvements to OpenSHMEM. -- Turned off message coalescing in the openib BTL until a proper fix - for that capability can be provided (tentatively expected for 1.8.5) -- Fix a bug in iof output that dates back to the dinosaurs which would - output extra bytes if the system was very heavily loaded -- Fix a bug where specifying mca_component_show_load_errors=0 could - cause ompi_info to segfault -- Updated valgrind suppression file - - -1.8.3: 26 Sep 2014 ------------------- - -- Fixed application abort bug to ensure that MPI_Abort exits appropriately - and returns the provided exit status -- Fixed some alignment (not all) issues identified by Clang -- Allow CUDA-aware to work with nonblocking collectives. Forces packing to - happen when using GPU buffers. -- Fixed configure test issue with Intel 2015 Fortran compiler -- Fixed some PGI-related errors -- Provide better help message when encountering a firewall -- Fixed MCA parameter quoting to protect multi-word params and params - that contain special characters -- Improved the bind-to help message to clarify the defaults -- Add new MPI-3.1 tools interface -- Several performance optimizations and memory leak cleanups -- Turn off the coll/ml plugin unless specifically requested as it - remains in an experimental state -- Fix LSF support by adding required libraries for the latest LSF - releases. Thanks to Joshua Randal for supplying the initial - patches. - - -1.8.2: 25 Aug 2014 ------------------- - -- Fix auto-wireup of OOB, allowing ORTE to automatically - test all available NICs -- "Un-deprecate" pernode, npernode, and npersocket options - by popular demand -- Add missing Fortran bindings for MPI_WIN_LOCK_ALL, - MPI_WIN_UNLOCK_ALL, and MPI_WIN_SYNC. -- Fix cascading/over-quoting in some cases with the rsh/ssh-based - launcher. Thanks to multiple users for raising the issue. -- Properly add support for gfortran 4.9 ignore TKR pragma (it was - erroneously only partially added in v1.7.5). Thanks to Marcus - Daniels for raising the issue. -- Update/improve help messages in the usnic BTL. -- Resolve a race condition in MPI_Abort. -- Fix obscure cases where static linking from wrapper compilers would - fail. -- Clarify the configure --help message about when OpenSHMEM is - enabled/disabled by default. Thanks to Paul Hargrove for the - suggestion. -- Align pages properly where relevant. Thanks to Paul Hargrove for - identifying the issue. -- Various compiler warning and minor fixes for OpenBSD, FreeBSD, and - Solaris/SPARC. Thanks to Paul Hargrove for the patches. -- Properly pass function pointers from Fortran to C in the mpi_f08 - module, thereby now supporting gfortran 4.9. Thanks to Tobias - Burnus for assistance and testing with this issue. -- Improve support for Cray CLE 5. -- Fix mpirun regression: ensure exit status is non-zero if mpirun is - terminated due to signal. -- Improved CUDA efficiency of asynchronous copies. -- Fix to parameter type in MPI_Type_indexed.3. Thanks to Bastian - Beischer for reporting the mistake. -- Fix NUMA distance calculations in the openib BTL. -- Decrease time required to shut down mpirun at the end of a job. -- More RMA fixes. -- More hostfile fixes from Tetsuya Mishima. -- Fix darray issue where UB was not computed correctly. -- Fix mpi_f08 parameter name for MPI_GET_LIBRARY_VERSION. Thanks to - Junchao Zhang for pointing out the issue. -- Ensure mpirun aborts properly when unable to map processes in - scheduled environments. -- Ensure that MPI RMA error codes show up properly. Thanks to - Lisandro Dalcín for reporting the issue. -- Minor bug fixes and improvements to the bash and zsh mpirun - autocompletion scripts. -- Fix sequential mpirun process mapper. Thanks to Bill Chen for - reporting the issue. -- Correct SLURM stdout/stderr redirection. -- Added missing portals 4 files. -- Performance improvements for blocking sends and receives. -- Lots of cleanup to the ml collective component -- Added new Java methods to provide full MPI coverage -- Many OSHMEM cleanups -- Prevent comm_spawn from automatically launching a VM across - all available nodes -- Close many memory leaks to achieve valgrind-clean operation -- Better handling of TCP connection discovery for mismatched networks - where we don't have a direct 1:1 subnet match between nodes -- Prevent segfault when OMPI info tools are used in pipes and user - exits one step of that pipe before completing output - - -1.8.1: 23 Apr 2014 ------------------- - -- Fix for critical bug: mpirun removed files (but not directories) - from / when run as root. Thanks to Jay Fenlason and Orion Poplawski - for bringing the issue to our attention and helping identify the - fix. - - -1.8: 31 Mar 2014 ----------------- - -- Commit upstream ROMIO fix for mixed NFS+local filesystem environments. -- Several fixes for MPI-3 one-sided support. For example, - arbitrary-length datatypes are now supported. -- Add config support for the Mellanox ConnectX 4 card. -- Add missing MPI_COMM_GET|SET_INFO functions, and missing - MPI_WEIGHTS_EMPTY and MPI_ERR_RMA_SHARED constants. Thanks to - Lisandro Dalcín for pointing out the issue. -- Update some help messages in OSHMEM, the usnic BTL, the TCP BTL, and - ORTE, and update documentation about ompi_info's --level option. -- Fix some compiler warnings. -- Ensure that ORTE daemons are not bound to a single processor - if TaskAffinity is set on by default in Slurm. Thanks to Artem Polyakov - for identifying the problem and providing a patch - - -1.7.5 20 Mar 2014 ------------------ - - ********************************************************************** - * Open MPI is now fully MPI-3.0 compliant - ********************************************************************** - -- Add Linux OpenSHMEM support built on top of Open MPI's MPI - layer. Thanks to Mellanox for contributing this new feature. -- Allow restricting ORTE daemons to specific cores using the - orte_daemon_cores MCA param. -- Ensure to properly set "locality" flags for processes launched via - MPI dynamic functions such as MPI_COMM_SPAWN. -- Fix MPI_GRAPH_CREATE when nnodes is smaller than the size of the old - communicator. -- usnic BTL now supports underlying UDP transport. -- usnic BTL now checks for common connectivty errors at first send to - a remote server. -- Minor scalability improvements in the usnic BTL. -- ompi_info now lists whether the Java MPI bindings are available or not. -- MPI-3: mpi.h and the Fortran interfaces now report MPI_VERSION==3 - and MPI_SUBVERSION==0. -- MPI-3: Added support for new RMA functions and functionality. -- Fix MPI_Info "const buglet. Thanks to Orion Poplawski for - identifying the issue. -- Multiple fixes to mapping/binding options. Thanks to Tetsuya Mishima - for his assistance. -- Multiple fixes for normal and abnormal process termination, - including singleton MPI_Abort and ensuring to kill entire process - groups when abnormally terminating a job. -- Fix DESTDIR install for javadocs. Thanks to Orion Poplawski for - pointing out the issue. -- Various performance improvements for the MPI Java bindings. -- OMPI now uses its own internal random number generator and will not - perturb srand() and friends. -- Some cleanups for Cygwin builds. Thanks to Marco Atzeri for the - patches. -- Add a new collective component (coll/ml) that provides substantially - improved performance. It is still experimental, and requires - setting coll_ml_priority > 0 to become active. -- Add version check during startup to ensure you are using the same - version of Open MPI on all nodes in a job. -- Significantly improved the performance of MPI_DIMS_CREATE for large - values. Thanks to Andreas Schäfer for the contribution. -- Removed ASYNCHRONOUS keyword from the "ignore TKR" mpi_f08 module. -- Deprecated the following mpirun options: - --bynode, --bycore, --byslot: replaced with --map-by node|core|slot. - --npernode, --npersocket: replaced with --map-by ppr:N:node and - --map-by ppr:N:socket, respectively -- Pick NFS "infinitely stale" fix from ROMIO upstream. -- Various PMI2 fixes and extension to support broader range of mappings. -- Improve launch performance at large scale. -- Add support for PBS/Torque environments that set environment - variables to indicate the number of slots available on each nodes. - Set the ras_tm_smp MCA parameter to "1" to enable this mode. -- Add new, more scalable endpoint exchange (commonly called "modex") - method that only exchanges endpoint data on a per-peer basis - on first message. Not all transports have been updated to use - this feature. Set the rte_orte_direct_modex parameter to "1" - to enable this mode. - - -1.7.4: 5 Feb 2014 ------------------ - - ********************************************************************** - * CRITICAL CHANGE - * - * As of release 1.7.4, OpenMPI's default mapping, ranking, and binding - * settings have changed: - * - * Mapping: - * if #procs <= 2, default to map-by core - * if #procs > 2, default to map-by socket - * Ranking: - * if default mapping is used, then default to rank-by slot - * if map-by is given, then default to rank-by , - * where is whatever object we mapped against - * Binding: - * default to bind-to core - * - * Users can override any of these settings individually using the - * corresponding MCA parameter. Note that multi-threaded applications - * in particular may want to override at least the binding default - * to allow threads to use multiple cores. - ********************************************************************** - -- Restore version number output in "ompi_info --all". -- Various bug fixes for the mpi_f08 Fortran bindings. -- Fix ROMIO compile error with Lustre 2.4. Thanks to Adam Moody for - reporting the issue. -- Various fixes for 32 bit platforms. -- Add ability to selectively disable building the mpi or mpi_f08 - module. See the README file for details. -- Fix MX MTL finalization issue. -- Fix ROMIO issue when opening a file with MPI_MODE_EXCL. -- Fix PowerPC and MIPS assembly issues. -- Various fixes to the hcoll and FCA collective offload modules. -- Prevent integer overflow when creating datatypes. Thanks to - original patch from Gilles Gouaillardet. -- Port some upstream hwloc fixes to Open MPI's embedded copy for - working around buggy NUMA node cpusets and including mising header - files. Thanks to Jeff Becker and Paul Hargrove for reporting the - issues. -- Fix recursive invocation issues in the MXM MTL. -- Various bug fixes to the new MCA parameter back-end system. -- Have the posix fbtl module link against -laio on NetBSD platforms. - Thanks to Paul Hargrove for noticing the issue. -- Various updates and fixes to network filesystem detection to support - more operating systems. -- Add gfortran v4.9 "ignore TKR" syntax to the mpi Fortran module. -- Various compiler fixes for several BSD-based platforms. Thanks to - Paul Hargrove for reporting the issues. -- Fix when MPI_COMM_SPAWN[_MULTIPLE] is used on oversubscribed - systems. -- Change the output from --report bindings to simply state that a - process is not bound, instead of reporting that it is bound to all - processors. -- Per MPI-3.0 guidance, remove support for all MPI subroutines with - choice buffers from the TKR-based mpi Fortran module. Thanks to Jed - Brown for raising the issue. -- Only allow the usnic BTL to build on 64 bit platforms. -- Various bug fixes to SLURM support, to include ensuring proper - exiting on abnormal termination. -- Ensure that MPI_COMM_SPAWN[_MULTIPLE] jobs get the same mapping - directives that were used with mpirun. -- Fixed the application of TCP_NODELAY. -- Change the TCP BTL to not warn if a non-existent interface is - ignored. -- Restored the "--bycore" mpirun option for backwards compatibility. -- Fixed debugger attach functionality. Thanks to Ashley Pittman for - reporting the issue and suggesting the fix. -- Fixed faulty MPI_IBCAST when invoked on a communicator with only - one process. -- Add new Mellanox device IDs to the openib BTL. -- Progress towards cleaning up various internal memory leaks as - reported by Valgrind. -- Fixed some annoying flex-generated warnings that have been there for - years. Thanks to Tom Fogal for the initial patch. -- Support user-provided environment variables via the "env" info key - to MPI_COMM_SPAWN[_MULTIPLE]. Thanks to Tom Fogal for the feature - request. -- Fix uninitialized variable in MPI_DIST_GRAPH_CREATE. -- Fix a variety of memory errors on SPARC platforms. Thanks to - Siegmar Gross for reporting and testing all the issues. -- Remove Solaris threads support. When building on Solaris, pthreads - will be used. -- Correctly handle the convertor internal stack for persistent - receives. Thanks to Guillaume Gouaillardet for identifying the - problem. -- Add support for using an external libevent via --with-libevent. See - the README for more details. -- Various OMPIO updates and fixes. -- Add support for the MPIEXEC_TIMEOUT environment variable. If set, - mpirun will terminate the job after this many seconds. -- Update the internal copy of ROMIO to that which shipped in MPICH - 3.0.4. -- Various performance tweaks and improvements in the usnic BTL, - including now reporting MPI_T performance variables for each usnic - device. -- Fix to not access send datatypes for non-root processes with - MPI_ISCATTER[V] and MPI_IGATHER[V]. Thanks to Pierre Jolivet for - supplying the initial patch. -- Update VampirTrace to 5.14.4.9. -- Fix ptmalloc2 hook disable when used with ummunotify. -- Change the default connection manager for the openib BTL to be based - on UD verbs data exchanges instead of ORTE OOB data exchanges. -- Fix Fortran compile error when compiling with 8-byte INTEGERs and - 4-byte ints. -- Fix C++11 issue identified by Jeremiah Willcock. -- Many changes, updates, and bug fixes to the ORTE run-time layer. -- Correctly handle MPI_REDUCE_SCATTER with recvcounts of 0. -- Update man pages for MPI-3, and add some missing man pages for - MPI-2.x functions. -- Updated mpi_f08 module in accordance with post-MPI-3.0 errata which - basically removed BIND(C) from all interfaces. -- Fixed MPI_IN_PLACE detection for MPI_SCATTER[V] in Fortran - routines. Thanks to Charles Gerlach for identifying the issue. -- Added support for routable RoCE to the openib BTL. -- Update embedded hwloc to v1.7.2. -- ErrMgr framework redesigned to better support fault tolerance development - activities. See the following RFC for details: - https://www.open-mpi.org/community/lists/devel/2010/03/7589.php -- Added database framework to OPAL and changed all modex operations - to flow thru it, also included additional system info in the - available data -- Added staged state machine to support sequential work flows -- Added distributed file system support for accessing files across - nodes that do not have networked file systems -- Extended filem framework to support scalable pre-positioning of - files for use by applications, adding new "raw" component that - transmits files across the daemon network -- Native Windows support has been removed. A cygwin package is - available from that group for Windows-based use. -- Added new MPI Java bindings. See the Javadocs for more details on - the API. -- Wrapper compilers now add rpath support by default to generated - executables on systems that support it. This behavior can be - disabled via --disable-wrapper-rpath. See note in README about ABI - issues when using rpath in MPI applications. -- Added a new parallel I/O component and multiple new frameworks to - support parallel I/O operations. -- Fixed MPI_STATUS_SIZE Fortran issue when used with 8-byte Fortran - INTEGERs and 4-byte C ints. Since this issue affects ABI, it is - only enabled if Open MPI is configured with - --enable-abi-breaking-fortran-status-i8-fix. Thanks to Jim Parker - for supplying the initial patch. -- Add support for Intel Phi SCIF transport. -- For CUDA-aware MPI configured with CUDA 6.0, use new pointer - attribute to avoid extra synchronization in stream 0 when using - CUDA IPC between GPUs on the same node. -- For CUDA-aware MPI configured with CUDA 6.0, compile in support - of GPU Direct RDMA in openib BTL to improve small message latency. -- Updated ROMIO from MPICH v3.0.4. -- MPI-3: Added support for remaining non-blocking collectives. -- MPI-3: Added support for neighborhood collectives. -- MPI-3: Updated C bindings with consistent use of []. -- MPI-3: Added the const keyword to read-only buffers. -- MPI-3: Added support for non-blocking communicator duplication. -- MPI-3: Added support for non-collective communicator creation. - - -1.7.3: 17 Oct 2013 ------------------- - -- Make CUDA-aware support dynamically load libcuda.so so CUDA-aware - MPI library can run on systems without CUDA software. -- Fix various issues with dynamic processes and intercommunicator - operations under Torque. Thanks to Suraj Prabhakaran for reporting - the problem. -- Enable support for the Mellanox MXM2 library by default. -- Improve support for Portals 4. -- Various Solaris fixes. Many thanks to Siegmar Gross for his - incredible patience in reporting all the issues. -- MPI-2.2: Add reduction support for MPI_C_*COMPLEX and MPI::*COMPLEX. -- Fixed internal accounting when openpty() fails. Thanks to Michal - Peclo for reporting the issue and providing a patch. -- Fixed too-large memory consumption in XRC mode of the openib BTL. - Thanks to Alexey Ryzhikh for the patch. -- Add bozo check for negative np values to mpirun to prevent a - deadlock. Thanks to Upinder Malhi for identifying the issue. -- Fixed MPI_IS_THREAD_MAIN behavior. Thanks to Lisandro Dalcín for - pointing out the problem. -- Various rankfile fixes. -- Fix functionality over iWARP devices. -- Various memory and performance optimizations and tweaks. -- Fix MPI_Cancel issue identified by Fujitsu. -- Add missing support for MPI_Get_address in the "use mpi" TKR - implementation. Thanks to Hugo Gagnon for identifying the issue. -- MPI-3: Add support for MPI_Count. -- MPI-2.2: Add missing MPI_IN_PLACE support for MPI_ALLTOALL. -- Added new usnic BTL to support the Cisco usNIC device. -- Minor VampirTrace update to 5.14.4.4. -- Removed support for ancient OS X systems (i.e., prior to 10.5). -- Fixed obscure packing/unpacking datatype bug. Thanks to Takahiro - Kawashima for identifying the issue. -- Add run-time support for PMI2 environments. -- Update openib BTL default parameters to include support for Mellanox - ConnectX3-Pro devices. -- Update libevent to v2.0.21. -- "ompi_info --param TYPE PLUGIN" now only shows a small number of MCA - parameters by default. Add "--level 9" or "--all" to see *all* MCA - parameters. See README for more details. -- Add support for asynchronous CUDA-aware copies. -- Add support for Mellanox MPI collective operation offload via the - "hcoll" library. -- MPI-3: Add support for the MPI_T interface. Open MPI's MCA - parameters are now accessible via the MPI_T control variable - interface. Support has been added for a small number of MPI_T - performance variables. -- Add Gentoo memory hooks override. Thanks to Justin Bronder for the - patch. -- Added new "mindist" process mapper, allowing placement of processes - via PCI locality information reported by the BIOS. -- MPI-2.2: Add support for MPI_Dist_graph functionality. -- Enable generic, client-side support for PMI2 implementations. Can - be leveraged by any resource manager that implements PMI2; e.g. SLURM, - versions 2.6 and higher. - -1.7.2: 26 Jun 2013 ------------------- - -- Major VampirTrace update to 5.14.4.2. - (** also appeared: 1.6.5) -- Fix to set flag==1 when MPI_IPROBE is called with MPI_PROC_NULL. - (** also appeared: 1.6.5) -- Set the Intel Phi device to be ignored by default by the openib BTL. - (** also appeared: 1.6.5) -- Decrease the internal memory storage used by intrinsic MPI datatypes - for Fortran types. Thanks to Takahiro Kawashima for the initial - patch. - (** also appeared: 1.6.5) -- Fix total registered memory calculation for Mellanox ConnectIB and - OFED 2.0. - (** also appeared: 1.6.5) -- Fix possible data corruption in the MXM MTL component. - (** also appeared: 1.6.5) -- Remove extraneous -L from hwloc's embedding. Thanks to Stefan - Friedel for reporting the issue. - (** also appeared: 1.6.5) -- Fix contiguous datatype memory check. Thanks to Eric Chamberland - for reporting the issue. - (** also appeared: 1.6.5) -- Make the openib BTL more friendly to ignoring verbs devices that are - not RC-capable. - (** also appeared: 1.6.5) -- Fix some MPI datatype engine issues. Thanks to Thomas Jahns for - reporting the issue. - (** also appeared: 1.6.5) -- Add INI information for Chelsio T5 device. - (** also appeared: 1.6.5) -- Integrate MXM STREAM support for MPI_ISEND and MPI_IRECV, and other - minor MXM fixes. - (** also appeared: 1.6.5) -- Fix to not show amorphous "MPI was already finalized" error when - failing to MPI_File_close an open file. Thanks to Brian Smith for - reporting the issue. - (** also appeared: 1.6.5) -- Add a distance-based mapping component to find the socket "closest" - to the PCI bus. -- Fix an error that caused epoll to automatically be disabled - in libevent. -- Upgrade hwloc to 1.5.2. -- *Really* fixed XRC compile issue in Open Fabrics support. -- Fix MXM connection establishment flow. -- Fixed parallel debugger ability to attach to MPI jobs. -- Fixed some minor memory leaks. -- Fixed datatype corruption issue when combining datatypes of specific - formats. -- Added Location Aware Mapping Algorithm (LAMA) mapping component. -- Fixes for MPI_STATUS handling in corner cases. -- Add a distance-based mapping component to find the socket "closest" - to the PCI bus. - - -1.7.1: 16 Apr 2013 ------------------- - -- Fixed compile error when --without-memory-manager was specified - on Linux -- Fixed XRC compile issue in Open Fabrics support. - - -1.7: 1 Apr 2013 ---------------- - -- Added MPI-3 functionality: - - MPI_GET_LIBRARY_VERSION - - Matched probe - - MPI_TYPE_CREATE_HINDEXED_BLOCK - - Non-blocking collectives - - MPI_INFO_ENV support - - Fortran '08 bindings (see below) -- Dropped support for checkpoint/restart due to loss of maintainer :-( -- Enabled compile-time warning of deprecated MPI functions by default - (in supported compilers). -- Revamped Fortran MPI bindings (see the README for details): - - "mpifort" is now the preferred wrapper compiler for Fortran - - Added "use mpi_f08" bindings (for compilers that support it) - - Added better "use mpi" support (for compilers that support it) - - Removed incorrect MPI_SCATTERV interface from "mpi" module that - was added in the 1.5.x series for ABI reasons. -- Lots of VampirTrace upgrades and fixes; upgrade to v5.14.3. -- Modified process affinity system to provide warning when bindings - result in being "bound to all", which is equivalent to not being - bound. -- Removed maffinity, paffinity, and carto frameworks (and associated - MCA params). -- Upgraded to hwloc v1.5.1. -- Added performance improvements to the OpenIB (OpenFabrics) BTL. -- Made malloc hooks more friendly to IO interprosers. Thanks to the - bug report and suggested fix from Darshan maintainer Phil Carns. -- Added support for the DMTCP checkpoint/restart system. -- Added support for the Cray uGNI interconnect. -- Fixed header file problems on OpenBSD. -- Fixed issue with MPI_TYPE_CREATE_F90_REAL. -- Wrapper compilers now explicitly list/link all Open MPI libraries if - they detect static linking CLI arguments. -- Open MPI now requires a C99 compiler to build. Please upgrade your - C compiler if you do not have a C99-compliant compiler. -- Fix MPI_GET_PROCESSOR_NAME Fortran binding to set ierr properly. - Thanks to LANL for spotting the error. -- Many MXM and FCA updates. -- Fixed erroneous free of putenv'ed string that showed up in Valgrind - reports. -- Fixed MPI_IN_PLACE case for MPI_ALLGATHER. -- Fixed a bug that prevented MCA params from being forwarded to - daemons upon launch. -- Fixed issues with VT and CUDA --with-cuda[-libdir] configuration CLI - parameters. -- Entirely new implementation of many MPI collective routines focused - on better performance. -- Revamped autogen / build system. -- Add new sensor framework to ORTE that includes modules for detecting - stalled applications and processes that consume too much memory. -- Added new state machine framework to ORTE that converts ORTE into an - event-driven state machine using the event library. -- Added a new MCA parameter (ess_base_stream_buffering) that allows the user - to override the system default for buffering of stdout/stderr streams - (via setvbuf). Parameter is not visible via ompi_info. -- Revamped the launch system to allow consideration of node hardware - in assigning process locations and bindings. -- Added the -novm option to preserve the prior launch behavior. -- Revamped the process mapping system to utilize node hardware by adding - new map-by, rank-by, and bind-to cmd line options. -- Added new MCA parameter to provide protection against IO forwarding - backlog. -- Dropped support for native Windows due to loss of maintainers. :-( -- Added a new parallel I/O component and multiple new frameworks to - support parallel I/O operations. -- Fix typo in orte_setup_hadoop.m4. Thanks to Aleksej Saushev for - reporting it -- Fix a very old error in opal_path_access(). Thanks to Marco Atzeri - for chasing it down. - - -1.6.6: Not released -------------------- - -- Prevent integer overflow in datatype creation. Thanks to Gilles - Gouaillardet for identifying the problem and providing a preliminary - version of the patch. -- Ensure help-opal-hwloc-base.txt is included in distribution - tarballs. Thanks to Gilles Gouaillardet for supplying the patch. -- Correctly handle the invalid status for NULL and inactive requests. - Thanks to KAWASHIMA Takahiro for submitting the initial patch. -- Fixed MPI_STATUS_SIZE Fortran issue when used with 8-byte Fortran - INTEGERs and 4-byte C ints. Since this issue affects ABI, it is - only enabled if Open MPI is configured with - --enable-abi-breaking-fortran-status-i8-fix. Thanks to Jim Parker - for supplying the initial patch. -- Fix datatype issue for sending from the middle of non-contiguous - data. -- Fixed failure error with pty support. Thanks to Michal Pecio for - the patch. -- Fixed debugger support for direct-launched jobs. -- Fix MPI_IS_THREAD_MAIN to return the correct value. Thanks to - Lisandro Dalcín for pointing out the issue. -- Update VT to 5.14.4.4: - - Fix C++-11 issue. - - Fix support for building RPMs on Fedora with CUDA libraries. -- Add openib part number for ConnectX3-Pro HCA. -- Ensure to check that all resolved IP addresses are local. -- Fix MPI_COMM_SPAWN via rsh when mpirun is on a different server. -- Add Gentoo "sandbox" memory hooks override. - - -1.6.5: 26 Jun 2013 ------------------- - -- Updated default SRQ parameters for the openib BTL. - (** also to appear: 1.7.2) -- Major VampirTrace update to 5.14.4.2. - (** also to appear: 1.7.2) -- Fix to set flag==1 when MPI_IPROBE is called with MPI_PROC_NULL. - (** also to appear: 1.7.2) -- Set the Intel Phi device to be ignored by default by the openib BTL. - (** also to appear: 1.7.2) -- Decrease the internal memory storage used by intrinsic MPI datatypes - for Fortran types. Thanks to Takahiro Kawashima for the initial - patch. - (** also to appear: 1.7.2) -- Fix total registered memory calculation for Mellanox ConnectIB and - OFED 2.0. - (** also to appear: 1.7.2) -- Fix possible data corruption in the MXM MTL component. - (** also to appear: 1.7.2) -- Remove extraneous -L from hwloc's embedding. Thanks to Stefan - Friedel for reporting the issue. - (** also to appear: 1.7.2) -- Fix contiguous datatype memory check. Thanks to Eric Chamberland - for reporting the issue. - (** also to appear: 1.7.2) -- Make the openib BTL more friendly to ignoring verbs devices that are - not RC-capable. - (** also to appear: 1.7.2) -- Fix some MPI datatype engine issues. Thanks to Thomas Jahns for - reporting the issue. - (** also to appear: 1.7.2) -- Add INI information for Chelsio T5 device. - (** also to appear: 1.7.2) -- Integrate MXM STREAM support for MPI_ISEND and MPI_IRECV, and other - minor MXM fixes. - (** also to appear: 1.7.2) -- Improved alignment for OpenFabrics buffers. -- Fix to not show amorphous "MPI was already finalized" error when - failing to MPI_File_close an open file. Thanks to Brian Smith for - reporting the issue. - (** also to appear: 1.7.2) - - -1.6.4: 21 Feb 2013 ------------------- - -- Fix Cygwin shared memory and debugger plugin support. Thanks to - Marco Atzeri for reporting the issue and providing initial patches. -- Fix to obtaining the correct available nodes when a rankfile is - providing the allocation. Thanks to Siegmar Gross for reporting the - problem. -- Fix process binding issue on Solaris. Thanks to Siegmar Gross for - reporting the problem. -- Updates for MXM 2.0. -- Major VT update to 5.14.2.3. -- Fixed F77 constants for Cygwin/Cmake build. -- Fix a linker error when configuring --without-hwloc. -- Automatically provide compiler flags that compile properly on some - types of ARM systems. -- Fix slot_list behavior when multiple sockets are specified. Thanks - to Siegmar Gross for reporting the problem. -- Fixed memory leak in one-sided operations. Thanks to Victor - Vysotskiy for letting us know about this one. -- Added performance improvements to the OpenIB (OpenFabrics) BTL. -- Improved error message when process affinity fails. -- Fixed MPI_MINLOC on man pages for MPI_REDUCE(_LOCAL). Thanks to Jed - Brown for noticing the problem and supplying a fix. -- Made malloc hooks more friendly to IO interprosers. Thanks to the - bug report and suggested fix from Darshan maintainer Phil Carns. -- Restored ability to direct launch under SLURM without PMI support. -- Fixed MPI datatype issues on OpenBSD. -- Major VT update to 5.14.2.3. -- Support FCA v3.0+. -- Fixed header file problems on OpenBSD. -- Fixed issue with MPI_TYPE_CREATE_F90_REAL. -- Fix an issue with using external libltdl installations. Thanks to - opolawski for identifying the problem. -- Fixed MPI_IN_PLACE case for MPI_ALLGATHER for FCA. -- Allow SLURM PMI support to look in lib64 directories. Thanks to - Guillaume Papaure for the patch. -- Restore "use mpi" ABI compatibility with the rest of the 1.5/1.6 - series (except for v1.6.3, where it was accidentally broken). -- Fix a very old error in opal_path_access(). Thanks to Marco Atzeri - for chasing it down. - - -1.6.3: 30 Oct 2012 ------------------- - -- Fix mpirun --launch-agent behavior when a prefix is specified. - Thanks to Reuti for identifying the issue. -- Fixed memchecker configury. -- Brought over some compiler warning squashes from the development trunk. -- Fix spawning from a singleton to multiple hosts when the "add-host" - MPI_Info key is used. Thanks to Brian Budge for pointing out the - problem. -- Add Mellanox ConnextIB IDs and max inline value. -- Fix rankfile when no -np is given. -- FreeBSD detection improvement. Thanks to Brooks Davis for the - patch. -- Removed TCP warnings on Windows. -- Improved collective algorithm selection for very large messages. -- Fix PSM MTL affinity settings. -- Fix issue with MPI_OP_COMMUTATIVE in the mpif.h bindings. Thanks to - Åke Sandgren for providing a patch to fix the issue. -- Fix issue with MPI_SIZEOF when using CHARACTER and LOGICAL types in - the mpi module. Thanks to Åke Sandgren for providing a patch to fix - the issue. - - -1.6.2: 25 Sep 2012 ------------------- - -- Fix issue with MX MTL. Thanks to Doug Eadline for raising the issue. -- Fix singleton MPI_COMM_SPAWN when the result job spans multiple nodes. -- Fix MXM hang, and update for latest version of MXM. -- Update to support Mellanox FCA 2.5. -- Fix startup hang for large jobs. -- Ensure MPI_TESTANY / MPI_WAITANY properly set the empty status when - count==0. -- Fix MPI_CART_SUB behavior of not copying periods to the new - communicator properly. Thanks to John Craske for the bug report. -- Add btl_openib_abort_not_enough_reg_mem MCA parameter to cause Open - MPI to abort MPI jobs if there is not enough registered memory - available on the system (vs. just printing a warning). Thanks to - Brock Palen for raising the issue. -- Minor fix to Fortran MPI_INFO_GET: only copy a value back to the - user's buffer if the flag is .TRUE. -- Fix VampirTrace compilation issue with the PGI compiler suite. - - -1.6.1: 22 Aug 2012 ------------------- - -- A bunch of changes to eliminate hangs on OpenFabrics-based networks. - Users with Mellanox hardware are ***STRONGLY ENCOURAGED*** to check - their registered memory kernel module settings to ensure that the OS - will allow registering more than 8GB of memory. See this FAQ item - for details: - - https://www.open-mpi.org/faq/?category=openfabrics#ib-low-reg-mem - - - Fall back to send/receive semantics if registered memory is - unavilable for RDMA. - - Fix two fragment leaks when registered memory is exhausted. - - Hueristically determine how much registered memory is available - and warn if it's significantly less than all of RAM. - - Artifically limit the amount of registered memory each MPI process - can use to about 1/Nth to total registered memory available. - - Improve error messages when events occur that are likely due to - unexpected registered memory exhaustion. - -- Fix double semicolon error in the C++ in . Thanks to John - Foster for pointing out the issue. -- Allow -Xclang to be specified multiple times in CFLAGS. Thanks to - P. Martin for raising the issue. -- Break up a giant "print *" statement in the ABI-preserving incorrect - MPI_SCATTER interface in the "large" Fortran "mpi" module. Thanks - to Juan Escobar for the initial patch. -- Switch the MPI_ALLTOALLV default algorithm to a pairwise exchange. -- Increase the openib BTL default CQ length to handle more types of - OpenFabrics devices. -- Lots of VampirTrace fixes; upgrade to v5.13.0.4. -- Map MPI_2INTEGER to underlying MPI_INTEGERs, not MPI_INTs. -- Ensure that the OMPI version number is toleant of handling spaces. - Thanks to dragonboy for identifying the issue. -- Fixed IN parameter marking on Fortran "mpi" module - MPI_COMM_TEST_INTER interface. -- Various MXM improvements. -- Make the output of "mpirun --report-bindings" much more friendly / - human-readable. -- Properly handle MPI_COMPLEX8|16|32. -- More fixes for mpirun's processor affinity options (--bind-to-core - and friends). -- Use aligned memory for OpenFabrics registered memory. -- Multiple fixes for parameter checking in MPI_ALLGATHERV, - MPI_REDUCE_SCATTER, MPI_SCATTERV, and MPI_GATHERV. Thanks to the - mpi4py community (Bennet Fauber, Lisandro Dalcín, Jonathan Dursi). -- Fixed file positioning overflows in MPI_FILE_GET_POSITION, - MPI_FILE_GET_POSITION_SHARED, FILE_GET_SIZE, FILE_GET_VIEW. -- Removed the broken --cpu-set mpirun option. -- Fix cleanup of MPI errorcodes. Thanks to Alexey Bayduraev for the - patch. -- Fix default hostfile location. Thanks to Götz Waschk for noticing - the issue. -- Improve several error messages. - - -1.6: 14 May 2012 ----------------- - -- Fix some process affinity issues. When binding a process, Open MPI - will now bind to all available hyperthreads in a core (or socket, - depending on the binding options specified). - --> Note that "mpirun --bind-to-socket ..." does not work on POWER6- - and POWER7-based systems with some Linux kernel versions. See - the FAQ on the Open MPI web site for more information. -- Add support for ARM5 and ARM6 (in addition to the existing ARM7 - support). Thanks to Evan Clinton for the patch. -- Minor Mellanox MXM fixes. -- Properly detect FDR10, FDR, and EDR OpenFabrics devices. -- Minor fixes to the mpirun(1) and MPI_Comm_create(3) man pages. -- Prevent segv if COMM_SPAWN_MULTIPLE fails. Thanks to Fujitsu for - the patch. -- Disable interposed memory management in fakeroot environments. This - fixes a problem in some build environments. -- Minor hwloc updates. -- Array versions of MPI_TEST and MPI_WAIT with a count==0 will now - return immediately with MPI_SUCCESS. Thanks to Jeremiah Willcock - for the suggestion. -- Update VampirTrace to v5.12.2. -- Properly handle forwarding stdin to all processes when "mpirun - --stdin all" is used. -- Workaround XLC assembly bug. -- OS X Tiger (10.4) has not been supported for a while, so forcibly - abort configure if we detect it. -- Fix segv in the openib BTL when running on SPARC 64 systems. -- Fix some include file ordering issues on some BSD-based platforms. - Thanks to Paul Hargove for this (and many, many other) fixes. -- Properly handle .FALSE. return parameter value to attribute copy - callback functions. -- Fix a bunch of minor C++ API issues; thanks to Fujitsu for the patch. -- Fixed the default hostfile MCA parameter behavior. -- Per the MPI spec, ensure not to touch the port_name parameter to - MPI_CLOSE_PORT (it's an IN parameter). - - -1.5.5: 27 Mar 2012 ------------------- - -- Many, many portability configure/build fixes courtesy of Paul - Hargrove. Thanks, Paul! -- Fixed shared memory fault tolerance support compiler errors. -- Removed not-production-quality rshd and tmd PLM launchers. -- Minor updates to the Open MPI SRPM spec file. -- Fixed mpirun's --bind-to-socket option. -- A few MPI_THREAD_MULTIPLE fixes in the shared memory BTL. -- Upgrade the GNU Autotools used to bootstrap the 1.5/1.6 series to - all the latest versions at the time of this release. -- Categorically state in the README that if you're having a problem - with Open MPI with the Linux Intel 12.1 compilers, *upgrade your - Intel Compiler Suite to the latest patch version*, and the problems - will go away. :-) -- Fix the --without-memory-manager configure option. -- Fixes for Totalview/DDT MPI-capable debuggers. -- Update rsh/ssh support to properly handle the Mac OS X library path - (i.e., DYLD_LIBRARY_PATH). -- Make warning about shared memory backing files on a networked file - system be optional (i.e., can be disabled via MCA parameter). -- Several fixes to processor and memory affinity. -- Various shared memory infrastructure improvements. -- Various checkpoint/restart fixes. -- Fix MPI_IN_PLACE (and other MPI sentinel values) on OS X. Thanks to - Dave Goodell for providing the magic OS X gcc linker flags necessary. -- Various man page corrections and typo fixes. Thanks to Fujitsu for - the patch. -- Updated wrapper compiler man pages to list the various --showme - options that are available. -- Add PMI direct-launch support (e.g., "srun mpi_application" under - SLURM). -- Correctly compute the aligned address when packing the - datatype description. Thanks to Fujitsu for the patch. -- Fix MPI obscure corner case handling in packing MPI datatypes. - Thanks to Fujitsu for providing the patch. -- Workaround an Intel compiler v12.1.0 2011.6.233 vector optimization - bug. -- Output the MPI API in ompi_info output. -- Major VT update to 5.12.1.4. -- Upgrade embedded Hardware Locality (hwloc) v1.3.2, plus some - post-1.3.2-release bug fixes. All processor and memory binding is - now done through hwloc. Woo hoo! Note that this fixes core binding - on AMD Opteron 6200 and 4200 series-based systems (sometimes known - as Interlagos, Valencia, or other Bulldozer-based chips). -- New MCA parameters to control process-wide memory binding policy: - hwloc_base_mem_alloc_policy, hwloc_base_mem_bind_failure_action (see - ompi_info --param hwloc base). -- Removed direct support for libnuma. Libnuma support may now be - picked up through hwloc. -- Added MPI_IN_PLACE support to MPI_EXSCAN. -- Various fixes for building on Windows, including MinGW support. -- Removed support for the OpenFabrics IBCM connection manager. -- Updated Chelsio T4 and Intel NE OpenFabrics default buffer settings. -- Increased the default RDMA CM timeout to 30 seconds. -- Issue a warning if both btl_tcp_if_include and btl_tcp_if_exclude - are specified. -- Many fixes to the Mellanox MXM transport. - - -1.5.4: 18 Aug 2011 ------------------- - -- Add support for the (as yet unreleased) Mellanox MXM transport. -- Add support for dynamic service levels (SLs) in the openib BTL. -- Fixed C++ bindings cosmetic/warnings issue with - MPI::Comm::NULL_COPY_FN and MPI::Comm::NULL_DELETE_FN. Thanks to - Júlio Hoffimann for identifying the issues. -- Also allow the word "slots" in rankfiles (i.e., not just "slot"). - (** also to appear in 1.4.4) -- Add Mellanox ConnectX 3 device IDs to the openib BTL defaults. - (** also to appear in 1.4.4) -- Various FCA updates. -- Fix 32 bit SIGBUS errors on Solaris SPARC platforms. -- Add missing ARM assembly code files. -- Update to allow more than 128 entries in an appfile. - (** also to appear in 1.4.4) -- Various VT updates and bug fixes. -- Update description of btl_openib_cq_size to be more accurate. - (** also to appear in 1.4.4) -- Various assembly "clobber" fixes. -- Fix a hang in carto selection in obscure situations. -- Guard the inclusion of execinfo.h since not all platforms have it. Thanks - to Aleksej Saushev for identifying this issue. - (** also to appear in 1.4.4) -- Support Solaris legacy munmap prototype changes. - (** also to appear in 1.4.4) -- Updated to Automake 1.11.1 per - https://www.open-mpi.org/community/lists/devel/2011/07/9492.php. -- Fix compilation of LSF support. -- Update MPI_Comm_spawn_multiple.3 man page to reflect what it - actually does. -- Fix for possible corruption of the environment. Thanks to Peter - Thompson for the suggestion. (** also to appear in 1.4.4) -- Enable use of PSM on direct-launch SLURM jobs. -- Update paffinity hwloc to v1.2, and to fix minor bugs affinity - assignment bugs on PPC64/Linux platforms. -- Let the openib BTL auto-detect its bandwidth. -- Support new MPI-2.2 datatypes. -- Updates to support more datatypes in MPI one-sided communication. -- Fix recursive locking bug when MPI-IO was used with - MPI_THREAD_MULTIPLE. (** also to appear in 1.4.4) -- Fix mpirun handling of prefix conflicts. -- Ensure mpirun's --xterm options leaves sessions attached. - (** also to appear in 1.4.4) -- Fixed type of sendcounts and displs in the "use mpi" F90 module. - ABI is preserved, but applications may well be broken. See the - README for more details. Thanks to Stanislav Sazykin for - identifying the issue. (** also to appear in 1.4.4) -- Fix indexed datatype leaks. Thanks to Pascal Deveze for supplying - the initial patch. (** also to appear in 1.4.4) -- Fix debugger mapping when mpirun's -npernode option is used. -- Fixed support for configure's --disable-dlopen option when used with - "make distclean". -- Fix segv associated with MPI_Comm_create with MPI_GROUP_EMPTY. - Thanks to Dominik Goeddeke for finding this. - (** also to appear in 1.4.4) -- Improved LoadLeveler ORTE support. -- Add new WinVerbs BTL plugin, supporting native OpenFabrics verbs on - Windows (the "wv" BTL). -- Add new btl_openib_gid_index MCA parameter to allow selecting which - GID to use on an OpenFabrics device's GID table. -- Add support for PCI relaxed ordering in the OpenFabrics BTL (when - available). -- Update rsh logic to allow correct SGE operation. -- Ensure that the mca_paffinity_alone MCA parameter only appears once - in the ompi_info output. Thanks to Gus Correa for identifying the - issue. -- Fixed return codes from MPI_PROBE and MPI_IPROBE. - (** also to appear in 1.4.4) -- Remove --enable-progress-thread configure option; it doesn't work on - the v1.5 branch. Rename --enable-mpi-threads to - --enable-mpi-thread-multiple. Add new --enable-opal-multi-threads - option. -- Updates for Intel Fortran compiler version 12. -- Remove bproc support. Farewell bproc! -- If something goes wrong during MPI_INIT, fix the error - message to say that it's illegal to invoke MPI_INIT before - MPI_INIT. - - -1.5.3: 16 Mar 2011 ------------------- - -- Add missing "affinity" MPI extension (i.e., the OMPI_Affinity_str() - API) that was accidentally left out of the 1.5.2 release. - - -1.5.2: 9 Mar 2011 ------------------ - -- Replaced all custom topology / affinity code with initial support - for hwloc v1.1.1 (PLPA has been removed -- long live hwloc!). Note - that hwloc is bundled with Open MPI, but an external hwloc can be - used, if desired. See README for more details. -- Many CMake updates for Windows builds. -- Updated opal_cr_thread_sleep_wait MCA param default value to make it - less aggressive. -- Updated debugger support to allow Totalview attaching from jobs - launched directly via srun (not mpirun). Thanks to Nikolay Piskun - for the patch. -- Added more FTB/CIFTS support. -- Fixed compile error with the PGI compiler. -- Portability fixes to allow the openib BTL to run on the Solaris - verbs stack. -- Fixed multi-token command-line issues when using the mpirun - --debug switch. For example: - mpirun --debug -np 2 a.out "foo bar" - Thanks to Gabriele Fatigati for reporting the issue. -- Added ARM support. -- Added the MPI_ROOT environment variable in the Open MPI Linux SRPM - for customers who use the BPS and LSF batch managers. -- Updated ROMIO from MPICH v1.3.1 (plus one additional patch). -- Fixed some deprecated MPI API function notification messages. -- Added new "bfo" PML that provides failover on OpenFabrics networks. -- Fixed some buffer memcheck issues in MPI_*_init. -- Added Solaris-specific chip detection and performance improvements. -- Fix some compile errors on Solaris. -- Updated the "rmcast" framework with bug fixes, new functionality. -- Updated the Voltaire FCA component with bug fixes, new - functionality. Support for FCA version 2.1. -- Fix gcc 4.4.x and 4.5.x over-aggressive warning notifications on - possibly freeing stack variables. Thanks to the Gentoo packagers - for reporting the issue. -- Make the openib component be verbose when it disqualifies itself due - to MPI_THREAD_MULTIPLE. -- Minor man page fixes. -- Various checkpoint / restart fixes. -- Fix race condition in the one-sided unlock code. Thanks to - Guillaume Thouvenin for finding the issue. -- Improve help message aggregation. -- Add OMPI_Affinity_str() optional user-level API function (i.e., the - "affinity" MPI extension). See README for more details. -- Added btl_tcp_if_seq MCA parameter to select a different ethernet - interface for each MPI process on a node. This parameter is only - useful when used with virtual ethernet interfaces on a single - network card (e.g., when using virtual interfaces give dedicated - hardware resources on the NIC to each process). -- Changed behavior of mpirun to terminate if it receives 10 (or more) - SIGPIPEs. -- Fixed oversubscription detection. -- Added new mtl_mx_board and mtl_mx_endpoint MCA parameters. -- Added ummunotify support for OpenFabrics-based transports. See the - README for more details. - - -1.5.1: 15 Dec 2010 ------------------- - -- Fixes for the Oracle Studio 12.2 Fortran compiler. -- Fix SPARC and SPARCv9 atomics. Thanks to Nicola Stange for the - initial patch. -- Fix Libtool issues with the IBM XL compiler in 64-bit mode. -- Restore the reset of the libevent progress counter to avoid - over-sampling the event library. -- Update memory barrier support. -- Use memmove (instead of memcpy) when necessary (e.g., source and - destination overlap). -- Fixed ompi-top crash. -- Fix to handle Autoconf --program-transforms properly and other - m4/configury updates. Thanks to the GASNet project for the - --program transforms fix. -- Allow hostfiles to specify usernames on a per-host basis. -- Update wrapper compiler scripts to search for perl during configure, - per request from the BSD maintainers. -- Minor man page fixes. -- Added --with-libltdl option to allow building Open MPI with an - external installation of libltdl. -- Fixed various issues with -D_FORTIFY_SOURCE=2. -- Various VT fixes and updates. - - -1.5: 10 Oct 2010 ----------------- - -- Added "knem" support: direct process-to-process copying for shared - memory message passing. See https://runtime.bordeaux.inria.fr/knem/ - and the README file for more details. -- Updated shared library versioning scheme and linking style of MPI - applications. The MPI application ABI has been broken from the - v1.3/v1.4 series. MPI applications compiled against any prior - version of Open MPI will need to, at a minimum, re-link. See the - README file for more details. -- Added "fca" collective component, enabling MPI collective offload - support for Voltaire switches. -- Fixed MPI one-sided operations with large target displacements. - Thanks to Brian Price and Jed Brown for reporting the issue. -- Fixed MPI_GET_COUNT when used with large counts. Thanks to Jed - Brown for reporting the issue. -- Made the openib BTL safer if extremely low SRQ settings are used. -- Fixed handling of the array_of_argv parameter in the Fortran - binding of MPI_COMM_SPAWN_MULTIPLE (** also to appear: 1.4.3). -- Fixed malloc(0) warnings in some collectives. -- Fixed a problem with the Fortran binding for - MPI_FILE_CREATE_ERRHANDLER. Thanks to Secretan Yves for identifying - the issue (** also to appear: 1.4.3). -- Updates to the LSF PLM to ensure that the path is correctly passed. - Thanks to Teng Lin for the patch (** also to appear: 1.4.3). -- Fixes for the F90 MPI_COMM_SET_ERRHANDLER and MPI_WIN_SET_ERRHANDLER - bindings. Thanks to Paul Kapinos for pointing out the issue - (** also to appear: 1.4.3). -- Fixed extra_state parameter types in F90 prototypes for - MPI_COMM_CREATE_KEYVAL, MPI_GREQUEST_START, MPI_REGISTER_DATAREP, - MPI_TYPE_CREATE_KEYVAL, and MPI_WIN_CREATE_KEYVAL. -- Fixes for Solaris oversubscription detection. -- If the PML determines it can't reach a peer process, print a - slightly more helpful message. Thanks to Nick Edmonds for the - suggestion. -- Make btl_openib_if_include/exclude function the same way - btl_tcp_if_include/exclude works (i.e., supplying an _include list - overrides supplying an _exclude list). -- Apply more scalable reachability algorithm on platforms with more - than 8 TCP interfaces. -- Various assembly code updates for more modern platforms / compilers. -- Relax restrictions on using certain kinds of MPI datatypes with - one-sided operations. Users beware; not all MPI datatypes are valid - for use with one-sided operations! -- Improve behavior of MPI_COMM_SPAWN with regards to --bynode. -- Various threading fixes in the openib BTL and other core pieces of - Open MPI. -- Various help file and man pages updates. -- Various FreeBSD and NetBSD updates and fixes. Thanks to Kevin - Buckley and Aleksej Saushev for their work. -- Fix case where freeing communicators in MPI_FINALIZE could cause - process failures. -- Print warnings if shared memory state files are opened on what look - like networked filesystems. -- Update libevent to v1.4.13. -- Allow propagating signals to processes that call fork(). -- Fix bug where MPI_GATHER was sometimes incorrectly examining the - datatype on non-root processes. Thanks to Michael Hofmann for - investigating the issue. -- Various Microsoft Windows fixes. -- Various Catamount fixes. -- Various checkpoint / restart fixes. -- Xgrid support has been removed until it can be fixed (patches - would be welcome). -- Added simplistic "libompitrace" contrib package. Using the MPI - profiling interface, it essentially prints out to stderr when select - MPI functions are invoked. -- Update bundled VampirTrace to v5.8.2. -- Add pkg-config(1) configuration files for ompi, ompi-c, ompi-cxx, - ompi-f77, ompi-f90. See the README for more details. -- Removed the libopenmpi_malloc library (added in the v1.3 series) - since it is no longer necessary -- Add several notifier plugins (generally used when Open MPI detects - system/network administrator-worthy problems); each have their own - MCA parameters to govern their usage. See "ompi_info --param - notifier " for more details. - - command to execute arbitrary commands (e.g., run a script). - - file to send output to a file. - - ftb to send output to the Fault Tolerant Backplane (see - https://wiki.mcs.anl.gov/cifts/index.php/CIFTS) - - hnp to send the output to mpirun. - - smtp (requires libesmtp) to send an email. - -1.4.5: 12 Feb 2012 ------------------- - -- Fixed the --disable-memory-manager configure switch. - (** also to appear in 1.5.5) -- Fix typos in code and man pages. Thanks to Fujitsu for these fixes. - (** also to appear in 1.5.5) -- Improve management of the registration cache; when full, try freeing - old entries and attempt to re-register. -- Fixed a data packing pointer alignment issue. Thanks to Fujitsu - for the patch. - (** also to appear in 1.5.5) -- Add ability to turn off warning about having the shared memory backing - store over a networked filesystem. Thanks to Chris Samuel for this - suggestion. - (** also to appear in 1.5.5) -- Removed an unnecessary memmove() and plugged a couple of small memory leaks - in the openib OOB connection setup code. -- Fixed some QLogic bugs. Thanks to Mark Debbage from QLogic for the patches. -- Fixed problem with MPI_IN_PLACE and other sentinel Fortran constants - on OS X. - (** also to appear in 1.5.5) -- Fix SLURM cpus-per-task allocation. - (** also to appear in 1.5.5) -- Fix the datatype engine for when data left over from the previous - pack was larger than the allowed space in the pack buffer. Thanks to - Yuki Matsumoto and Takahiro Kawashima for the bug report and the - patch. -- Fix Fortran value for MPI_MAX_PORT_NAME. Thanks to Enzo Dari for - raising the issue. -- Workaround an Intel compiler v12.1.0 2011.6.233 vector optimization - bug. -- Fix issues on Solaris with the openib BTL. -- Fixes for the Oracle Studio 12.2 Fortran compiler. -- Update iWARP parameters for the Intel NICs. - (** also to appear in 1.5.5) -- Fix obscure cases where MPI_ALLGATHER could crash. Thanks to Andrew - Senin for reporting the problem. - (** also to appear in 1.5.5) - - -1.4.4: 11 Oct 2011 ------------------- - -- Modified a memcpy() call in the openib btl connection setup to use - memmove() instead because of the possibility of an overlapping - copy (as identified by valgrind). -- Changed use of sys_timer_get_cycles() to the more appropriate - wrapper: opal_timer_base_get_cycles(). Thanks to Jani Monoses - for this fix. -- Corrected the reported default value of btl_openib_ib_timeout - in the "IB retries exceeded" error message. Thanks to Kevin Buckley - for this correction. -- Increased rdmacm address resolution timeout from 1s to 30s & - updated Chelsio T4 openib BTL defaults. Thanks to Steve Wise - for these updates. - (** also to appear in 1.5.5) -- Ensure that MPI_Accumulate error return in 1.4 is consistent with - 1.5.x and trunk. -- Allow the word "slots" in rankfiles (i.e., not just "slot"). - (** also appeared in 1.5.4) -- Add Mellanox ConnectX 3 device IDs to the openib BTL defaults. - (** also appeared in 1.5.4) -- Update description of btl_openib_cq_size to be more accurate. -- Ensure mpirun's --xterm options leaves sessions attached. - (** also appeared in 1.5.4) -- Update to allow more than 128 entries in an appfile. - (** also appeared in 1.5.4) -- Update description of btl_openib_cq_size to be more accurate. - (** also appeared in 1.5.4) -- Fix for deadlock when handling recursive attribute keyval deletions - (e.g., when using ROMIO with MPI_THREAD_MULTIPLE). -- Fix indexed datatype leaks. Thanks to Pascal Deveze for supplying - the initial patch. (** also appeared in 1.5.4) -- Fixed the F90 types of the sendcounts and displs parameters to - MPI_SCATTERV. Thanks to Stanislav Sazykin for identifying the issue. - (** also appeared in 1.5.4) -- Exclude opal/libltdl from "make distclean" when --disable-dlopen is - used. Thanks to David Gunter for reporting the issue. -- Fixed a segv in MPI_Comm_create when called with GROUP_EMPTY. - Thanks to Dominik Goeddeke for finding this. - (** also appeared in 1.5.4) -- Fixed return codes from MPI_PROBE and MPI_IPROBE. - (** also appeared in 1.5.4) -- Fixed undefined symbol error when using the vtf90 profiling tool. -- Fix for referencing an uninitialized variable in DPM ORTE. Thanks - to Avinash Malik for reporting the issue. -- Fix for correctly handling multi-token args when using debuggers. -- Eliminated the unneeded u_int*_t datatype definitions. -- Change in ORTE DPM to get around gcc 4.[45].x compiler wanrings - about possibly calling free() on a non-heap variable, even though it - will never happen because the refcount will never go to zero. -- Fixed incorrect text in MPI_File_set_view man page. -- Fix in MPI_Init_thread for checkpoint/restart. -- Fix for libtool issue when using pgcc to compile ompi in conjunction - with the -tp option. -- Fixed a race condition in osc_rdma_sync. Thanks to Guillaume - Thouvenin for finding this issue. -- Clarification of MPI_Init_thread man page. -- Fixed an indexing problem in precondition_transports. -- Fixed a problem in which duplicated libs were being specified for - linking. Thanks to Hicham Mouline for noticing it. -- Various autogen.sh fixes. -- Fix for memchecking buffers during MPI_*INIT. -- Man page cleanups. Thanks to Jeremiah Willcock and Jed Brown. -- Fix for VT rpmbuild on RHEL5. -- Support Solaris legacy munmap prototype changes. - (** also appeared in 1.5.4) -- Expands app_idx to int32_t to allow more than 127 app_contexts. -- Guard the inclusion of execinfo.h since not all platforms have it. Thanks - to Aleksej Saushev for identifying this issue. - (** also appeared in 1.5.4) -- Fix to avoid possible environment corruption. Thanks to Peter Thompson - for identifying the issue and supplying a patch. - (** also appeared in 1.5.4) -- Fixed paffinity base MCA duplicate registrations. Thanks to Gus - Correa for bringing this to our attention. -- Fix recursive locking bug when MPI-IO was used with - MPI_THREAD_MULTIPLE. (** also appeared in 1.5.4) -- F90 MPI API fixes. -- Fixed a misleading MPI_Bcast error message. Thanks to Jeremiah - Willcock for reporting this. -- Added to ptmalloc's hooks.c (it's not always included - by default on some systems). -- Libtool patch to get around a build problem when using the IBM XL - compilers. -- Fix to detect and avoid overlapping memcpy(). Thanks to - Francis Pellegrini for identifying the issue. -- Fix to allow ompi to work on top of RoCE vLANs. -- Restored a missing debugger flag to support TotalView. Thanks to - David Turner and the TV folks for supplying the fix. -- Updated SLURM support to 1.5.1. -- Removed an extraneous #include from the TCP BTL. -- When specifying OOB ports, fix to convert the ports into network - byte order before binding. -- Fixed use of memory barriers in the SM BTL. This fixed segv's when - compiling with Intel 10.0.025 or PGI 9.0-3. -- Fix to prevent the SM BTL from creating its mmap'd file in - directories that are remotely mounted. - - -1.4.3: 6 Sep 2010 ------------------ - -- Fixed handling of the array_of_argv parameter in the Fortran - binding of MPI_COMM_SPAWN_MULTIPLE (** also to appear: 1.5). -- Fixed a problem with the Fortran binding for - MPI_FILE_CREATE_ERRHANDLER. Thanks to Secretan Yves for identifying - the issue (** also to appear: 1.5). -- Updates to the LSF PLM to ensure that the path is correctly passed. - Thanks to Teng Lin for the patch (** also to appear: 1.5). -- Fixes for the F90 MPI_COMM_SET_ERRHANDLER and MPI_WIN_SET_ERRHANDLER - bindings. Thanks to Paul Kapinos for pointing out the issue. - (** also to appear: 1.5). -- Fixed various MPI_THREAD_MULTIPLE race conditions. -- Fixed an issue with an undeclared variable from ptmalloc2 munmap on - BSD systems. -- Fixes for BSD interface detection. -- Various other BSD fixes. Thanks to Kevin Buckley helping to track. - all of this down. -- Fixed issues with the use of the -nper* mpirun command line arguments. -- Fixed an issue with coll tuned dynamic rules. -- Fixed an issue with the use of OPAL_DESTDIR being applied too aggressively. -- Fixed an issue with one-sided xfers when the displacement exceeds 2GBytes. -- Change to ensure TotalView works properly on Darwin. -- Added support for Visual Studio 2010. -- Fix to ensure proper placement of VampirTrace header files. -- Needed to add volatile keyword to a varialbe used in debugging - (MPIR_being_debugged). -- Fixed a bug in inter-allgather. -- Fixed malloc(0) warnings. -- Corrected a typo the MPI_Comm_size man page (intra -> inter). Thanks - to Simon number.cruncher for pointing this out. -- Fixed a SegV in orted when given more than 127 app_contexts. -- Removed xgrid source code from the 1.4 branch since it is no longer - supported in the 1.4 series. -- Removed the --enable-opal-progress-threads config option since - opal progress thread support does not work in 1.4.x. -- Fixed a defect in VampirTrace's vtfilter. -- Fixed wrong Windows path in hnp_contact. -- Removed the requirement for a paffinity component. -- Removed a hardcoded limit of 64 interconnected jobs. -- Fix to allow singletons to use ompi-server for rendezvous. -- Fixed bug in output-filename option. -- Fix to correctly handle failures in mx_init(). -- Fixed a potential Fortran memory leak. -- Fixed an incorrect branch in some ppc32 assembly code. Thanks - to Matthew Clark for this fix. -- Remove use of undocumented AS_VAR_GET macro during configuration. -- Fixed an issue with VampirTrace's wrapper for MPI_init_thread. -- Updated mca-btl-openib-device-params.ini file with various new vendor id's. -- Configuration fixes to ensure CPPFLAGS in handled properly if a non-standard - valgrind location was specified. -- Various man page updates - - -1.4.2: 4 May 2010 ------------------ - -- Fixed problem when running in heterogeneous environments. Thanks to - Timur Magomedov for helping to track down this issue. -- Update LSF support to ensure that the path is passed correctly. - Thanks to Teng Lin for submitting a patch. -- Fixed some miscellaneous oversubscription detection bugs. -- IBM re-licensed its LoadLeveler code to be BSD-compliant. -- Various OpenBSD and NetBSD build and run-time fixes. Many thanks to - the OpenBSD community for their time, expertise, and patience - getting these fixes incorporated into Open MPI's main line. -- Various fixes for multithreading deadlocks, race conditions, and - other nefarious things. -- Fixed ROMIO's handling of "nearly" contiguous issues (e.g., with - non-zero true_lb). Thanks for Pascal Deveze for the patch. -- Bunches of Windows build fixes. Many thanks to several Windows - users for their help in improving our support on Windows. -- Now allow the graceful failover from MTLs to BTLs if no MTLs can - initialize successfully. -- Added "clobber" information to various atomic operations, fixing - erroneous behavior in some newer versions of the GNU compiler suite. -- Update various iWARP and InfiniBand device specifications in the - OpenFabrics .ini support file. -- Fix the use of hostfiles when a username is supplied. -- Various fixes for rankfile support. -- Updated the internal version of VampirTrace to 5.4.12. -- Fixed OS X TCP wireup issues having to do with IPv4/IPv6 confusion - (see https://svn.open-mpi.org/trac/ompi/changeset/22788 for more - details). -- Fixed some problems in processor affinity support, including when - there are "holes" in the processor namespace (e.g., offline - processors). -- Ensure that Open MPI's "session directory" (usually located in /tmp) - is cleaned up after process termination. -- Fixed some problems with the collective "hierarch" implementation - that could occur in some obscure conditions. -- Various MPI_REQUEST_NULL, API parameter checking, and attribute - error handling fixes. Thanks to Lisandro Dalcín for reporting the - issues. -- Fix case where MPI_GATHER erroneously used datatypes on non-root - nodes. Thanks to Michael Hofmann for investigating the issue. -- Patched ROMIO support for PVFS2 > v2.7 (patch taken from MPICH2 - version of ROMIO). -- Fixed "mpirun --report-bindings" behavior when used with - mpi_paffinity_alone=1. Also fixed mpi_paffinity_alone=1 behavior - with non-MPI applications. Thanks to Brice Goglin for noticing the - problem. -- Ensure that all OpenFabrics devices have compatible receive_queues - specifications before allowing them to communicate. See the lengthy - comment in https://svn.open-mpi.org/trac/ompi/changeset/22592 for - more details. -- Fix some issues with checkpoint/restart. -- Improve the pre-MPI_INIT/post-MPI_FINALIZE error messages. -- Ensure that loopback addresses are never advertised to peer - processes for RDMA/OpenFabrics support. -- Fixed a CSUM PML false positive. -- Various fixes for Catamount support. -- Minor update to wrapper compilers in how user-specific argv is - ordered on the final command line. Thanks to Jed Brown for the - suggestions. -- Removed flex.exe binary from Open MPI tarballs; now generate flex - code from a newer (Windows-friendly) flex when we make official - tarballs. - - -1.4.1: 15 Jan 2010 ------------------- - -- Update to PLPA v1.3.2, addressing a licensing issue identified by - the Fedora project. See - https://svn.open-mpi.org/trac/plpa/changeset/262 for details. -- Add check for malformed checkpoint metadata files (Ticket #2141). -- Fix error path in ompi-checkpoint when not able to checkpoint - (Ticket #2138). -- Cleanup component release logic when selecting checkpoint/restart - enabled components (Ticket #2135). -- Fixed VT node name detection for Cray XT platforms, and fixed some - broken VT documentation files. -- Fix a possible race condition in tearing down RDMA CM-based - connections. -- Relax error checking on MPI_GRAPH_CREATE. Thanks to David Singleton - for pointing out the issue. -- Fix a shared memory "hang" problem that occurred on x86/x86_64 - platforms when used with the GNU >=4.4.x compiler series. -- Add fix for Libtool 2.2.6b's problems with the PGI 10.x compiler - suite. Inspired directly from the upstream Libtool patches that fix - the issue (but we need something working before the next Libtool - release). - - -1.4: 8 Dec 2009 ---------------- - -The *only* change in the Open MPI v1.4 release (as compared to v1.3.4) -was to update the embedded version of Libtool's libltdl to address a -potential security vulnerability. Specifically: Open MPI v1.3.4 was -created with GNU Libtool 2.2.6a; Open MPI v1.4 was created with GNU -Libtool 2.2.6b. There are no other changes between Open MPI v1.3.4 -and v1.4. - - -1.3.4: 13 Feb 2010 ------------------- - -- Fix some issues in OMPI's SRPM with regard to shell_scripts_basename - and its use with mpi-selector. Thanks to Bill Johnstone for - pointing out the problem. -- Added many new MPI job process affinity options to mpirun. See the - newly-updated mpirun(1) man page for details. -- Several updates to mpirun's XML output. -- Update to fix a few Valgrind warnings with regards to the ptmalloc2 - allocator and Open MPI's use of PLPA. -- Many updates and fixes to the (non-default) "sm" collective - component (i.e., native shared memory MPI collective operations). -- Updates and fixes to some MPI_COMM_SPAWN_MULTIPLE corner cases. -- Fix some internal copying functions in Open MPI's use of PLPA. -- Correct some SLURM nodelist parsing logic that may have interfered - with large jobs. Additionally, per advice from the SLURM team, - change the environment variable that we use for obtaining the job's - allocation. -- Revert to an older, safer (but slower) communicator ID allocation - algorithm. -- Fixed minimum distance finding for OpenFabrics devices in the openib - BTL. -- Relax the parameter checking MPI_CART_CREATE a bit. -- Fix MPI_COMM_SPAWN[_MULTIPLE] to only error-check the info arguments - on the root process. Thanks to Federico Golfre Andreasi for - reporting the problem. -- Fixed some BLCR configure issues. -- Fixed a potential deadlock when the openib BTL was used with - MPI_THREAD_MULTIPLE. -- Fixed dynamic rules selection for the "tuned" coll component. -- Added a launch progress meter to mpirun (useful for large jobs; set - the orte_report_launch_progress MCA parameter to 1 to see it). -- Reduced the number of file descriptors consumed by each MPI process. -- Add new device IDs for Chelsio T3 RNICs to the openib BTL config file. -- Fix some CRS self component issues. -- Added some MCA parameters to the PSM MTL to tune its run-time - behavior. -- Fix some VT issues with MPI_BOTTOM/MPI_IN_PLACE. -- Man page updates from the Debain Open MPI package maintainers. -- Add cycle counter support for the Alpha and Sparc platforms. -- Pass visibility flags to libltdl's configure script, resulting in - those symbols being hidden. This appears to mainly solve the - problem of applications attempting to use different versions of - libltdl from that used to build Open MPI. - - -1.3.3: 14 Jul 2009 ------------------- - -- Fix a number of issues with the openib BTL (OpenFabrics) RDMA CM, - including a memory corruption bug, a shutdown deadlock, and a route - timeout. Thanks to David McMillen and Hal Rosenstock for help in - tracking down the issues. -- Change the behavior of the EXTRA_STATE parameter that is passed to - Fortran attribute callback functions: this value is now stored - internally in MPI -- it no longer references the original value - passed by MPI_*_CREATE_KEYVAL. -- Allow the overriding RFC1918 and RFC3330 for the specification of - "private" networks, thereby influencing Open MPI's TCP - "reachability" computations. -- Improve flow control issues in the sm btl, by both tweaking the - shared memory progression rules and by enabling the "sync" collective - to barrier every 1,000th collective. -- Various fixes for the IBM XL C/C++ v10.1 compiler. -- Allow explicit disabling of ptmalloc2 hooks at runtime (e.g., enable - support for Debian's builtroot system). Thanks to Manuel Prinz and - the rest of the Debian crew for helping identify and fix this issue. -- Various minor fixes for the I/O forwarding subsystem. -- Big endian iWARP fixes in the Open Fabrics RDMA CM support. -- Update support for various OpenFabrics devices in the openib BTL's - .ini file. -- Fixed undefined symbol issue with Open MPI's parallel debugger - message queue support so it can be compiled by Sun Studio compilers. -- Update MPI_SUBVERSION to 1 in the Fortran bindings. -- Fix MPI_GRAPH_CREATE Fortran 90 binding. -- Fix MPI_GROUP_COMPARE behavior with regards to MPI_IDENT. Thanks to - Geoffrey Irving for identifying the problem and supplying the fix. -- Silence gcc 4.1 compiler warnings about type punning. Thanks to - Number Cruncher for the fix. -- Added more Valgrind and other memory-cleanup fixes. Thanks to - various Open MPI users for help with these issues. -- Miscellaneous VampirTrace fixes. -- More fixes for openib credits in heavy-congestion scenarios. -- Slightly decrease the latency in the openib BTL in some conditions - (add "send immediate" support to the openib BTL). -- Ensure to allow MPI_REQUEST_GET_STATUS to accept an - MPI_STATUS_IGNORE parameter. Thanks to Shaun Jackman for the bug - report. -- Added Microsoft Windows support. See README.WINDOWS file for - details. - - -1.3.2: 22 Apr 2009 ------------------- - -- Fixed a potential infinite loop in the openib BTL that could occur - in senders in some frequent-communication scenarios. Thanks to Don - Wood for reporting the problem. -- Add a new checksum PML variation on ob1 (main MPI point-to-point - communication engine) to detect memory corruption in node-to-node - messages -- Add a new configuration option to add padding to the openib - header so the data is aligned -- Add a new configuration option to use an alternative checksum algo - when using the checksum PML -- Fixed a problem reported by multiple users on the mailing list that - the LSF support would fail to find the appropriate libraries at - run-time. -- Allow empty shell designations from getpwuid(). Thanks to Sergey - Koposov for the bug report. -- Ensure that mpirun exits with non-zero status when applications die - due to user signal. Thanks to Geoffroy Pignot for suggesting the - fix. -- Ensure that MPI_VERSION / MPI_SUBVERSION match what is returned by - MPI_GET_VERSION. Thanks to Rob Egan for reporting the error. -- Updated MPI_*KEYVAL_CREATE functions to properly handle Fortran - extra state. -- A variety of ob1 (main MPI point-to-point communication engine) bug - fixes that could have caused hangs or seg faults. -- Do not install Open MPI's signal handlers in MPI_INIT if there are - already signal handlers installed. Thanks to Kees Verstoep for - bringing the issue to our attention. -- Fix GM support to not seg fault in MPI_INIT. -- Various VampirTrace fixes. -- Various PLPA fixes. -- No longer create BTLs for invalid (TCP) devices. -- Various man page style and lint cleanups. -- Fix critical OpenFabrics-related bug noted here: - https://www.open-mpi.org/community/lists/announce/2009/03/0029.php. - Open MPI now uses a much more robust memory intercept scheme that is - quite similar to what is used by MX. The use of "-lopenmpi-malloc" - is no longer necessary, is deprecated, and is expected to disappear - in a future release. -lopenmpi-malloc will continue to work for the - duration of the Open MPI v1.3 and v1.4 series. -- Fix some OpenFabrics shutdown errors, both regarding iWARP and SRQ. -- Allow the udapl BTL to work on Solaris platforms that support - relaxed PCI ordering. -- Fix problem where the mpirun would sometimes use rsh/ssh to launch on - the localhost (instead of simply forking). -- Minor SLURM stdin fixes. -- Fix to run properly under SGE jobs. -- Scalability and latency improvements for shared memory jobs: convert - to using one message queue instead of N queues. -- Automatically size the shared-memory area (mmap file) to match - better what is needed; specifically, so that large-np jobs will start. -- Use fixed-length MPI predefined handles in order to provide ABI - compatibility between Open MPI releases. -- Fix building of the posix paffinity component to properly get the - number of processors in loosely tested environments (e.g., - FreeBSD). Thanks to Steve Kargl for reporting the issue. -- Fix --with-libnuma handling in configure. Thanks to Gus Correa for - reporting the problem. - - -1.3.1: 19 Mar 2009 ------------------- - -- Added "sync" coll component to allow users to synchronize every N - collective operations on a given communicator. -- Increased the default values of the IB and RNR timeout MCA parameters. -- Fix a compiler error noted by Mostyn Lewis with the PGI 8.0 compiler. -- Fix an error that prevented stdin from being forwarded if the - rsh launcher was in use. Thanks to Branden Moore for pointing out - the problem. -- Correct a case where the added datatype is considered as contiguous but - has gaps in the beginning. -- Fix an error that limited the number of comm_spawns that could - simultaneously be running in some environments -- Correct a corner case in OB1's GET protocol for long messages; the - error could sometimes cause MPI jobs using the openib BTL to hang. -- Fix a bunch of bugs in the IO forwarding (IOF) subsystem and add some - new options to output to files and redirect output to xterm. Thanks to - Jody Weissmann for helping test out many of the new fixes and - features. -- Fix SLURM race condition. -- Fix MPI_File_c2f(MPI_FILE_NULL) to return 0, not -1. Thanks to - Lisandro Dalcín for the bug report. -- Fix the DSO build of tm PLM. -- Various fixes for size disparity between C int's and Fortran - INTEGER's. Thanks to Christoph van Wullen for the bug report. -- Ensure that mpirun exits with a non-zero exit status when daemons or - processes abort or fail to launch. -- Various fixes to work around Intel (NetEffect) RNIC behavior. -- Various fixes for mpirun's --preload-files and --preload-binary - options. -- Fix the string name in MPI::ERRORS_THROW_EXCEPTIONS. -- Add ability to forward SIFTSTP and SIGCONT to MPI processes if you - set the MCA parameter orte_forward_job_control to 1. -- Allow the sm BTL to allocate larger amounts of shared memory if - desired (helpful for very large multi-core boxen). -- Fix a few places where we used PATH_MAX instead of OPAL_PATH_MAX, - leading to compile problems on some platforms. Thanks to Andrea Iob - for the bug report. -- Fix mca_btl_openib_warn_no_device_params_found MCA parameter; it - was accidentally being ignored. -- Fix some run-time issues with the sctp BTL. -- Ensure that RTLD_NEXT exists before trying to use it (e.g., it - doesn't exist on Cygwin). Thanks to Gustavo Seabra for reporting - the issue. -- Various fixes to VampirTrace, including fixing compile errors on - some platforms. -- Fixed missing MPI_Comm_accept.3 man page; fixed minor issue in - orterun.1 man page. Thanks to Dirk Eddelbuettel for identifying the - problem and submitting a patch. -- Implement the XML formatted output of stdout/stderr/stddiag. -- Fixed mpirun's -wdir switch to ensure that working directories for - multiple app contexts are properly handled. Thanks to Geoffroy - Pignot for reporting the problem. -- Improvements to the MPI C++ integer constants: - - Allow MPI::SEEK_* constants to be used as constants - - Allow other MPI C++ constants to be used as array sizes -- Fix minor problem with orte-restart's command line options. See - ticket #1761 for details. Thanks to Gregor Dschung for reporting - the problem. - - -1.3: 19 Jan 2009 ----------------- - -- Extended the OS X 10.5.x (Leopard) workaround for a problem when - assembly code is compiled with -g[0-9]. Thanks to Barry Smith for - reporting the problem. See ticket #1701. -- Disabled MPI_REAL16 and MPI_COMPLEX32 support on platforms where the - bit representation of REAL*16 is different than that of the C type - of the same size (usually long double). Thanks to Julien Devriendt - for reporting the issue. See ticket #1603. -- Increased the size of MPI_MAX_PORT_NAME to 1024 from 36. See ticket #1533. -- Added "notify debugger on abort" feature. See tickets #1509 and #1510. - Thanks to Seppo Sahrakropi for the bug report. -- Upgraded Open MPI tarballs to use Autoconf 2.63, Automake 1.10.1, - Libtool 2.2.6a. -- Added missing MPI::Comm::Call_errhandler() function. Thanks to Dave - Goodell for bringing this to our attention. -- Increased MPI_SUBVERSION value in mpi.h to 1 (i.e., MPI 2.1). -- Changed behavior of MPI_GRAPH_CREATE, MPI_TOPO_CREATE, and several - other topology functions per MPI-2.1. -- Fix the type of the C++ constant MPI::IN_PLACE. -- Various enhancements to the openib BTL: - - Added btl_openib_if_[in|ex]clude MCA parameters for - including/excluding comma-delimited lists of HCAs and ports. - - Added RDMA CM support, includng btl_openib_cpc_[in|ex]clude MCA - parameters - - Added NUMA support to only use "near" network adapters - - Added "Bucket SRQ" (BSRQ) support to better utilize registered - memory, including btl_openib_receive_queues MCA parameter - - Added ConnectX XRC support (and integrated with BSRQ) - - Added btl_openib_ib_max_inline_data MCA parameter - - Added iWARP support - - Revamped flow control mechansisms to be more efficient - - "mpi_leave_pinned=1" is now the default when possible, - automatically improving performance for large messages when - application buffers are re-used -- Elimiated duplicated error messages when multiple MPI processes fail - with the same error. -- Added NUMA support to the shared memory BTL. -- Add Valgrind-based memory checking for MPI-semantic checks. -- Add support for some optional Fortran datatypes (MPI_LOGICAL1, - MPI_LOGICAL2, MPI_LOGICAL4 and MPI_LOGICAL8). -- Remove the use of the STL from the C++ bindings. -- Added support for Platform/LSF job launchers. Must be Platform LSF - v7.0.2 or later. -- Updated ROMIO with the version from MPICH2 1.0.7. -- Added RDMA capable one-sided component (called rdma), which - can be used with BTL components that expose a full one-sided - interface. -- Added the optional datatype MPI_REAL2. As this is added to the "end of" - predefined datatypes in the fortran header files, there will not be - any compatibility issues. -- Added Portable Linux Processor Affinity (PLPA) for Linux. -- Addition of a finer symbols export control via the visibiliy feature - offered by some compilers. -- Added checkpoint/restart process fault tolerance support. Initially - support a LAM/MPI-like protocol. -- Removed "mvapi" BTL; all InfiniBand support now uses the OpenFabrics - driver stacks ("openib" BTL). -- Added more stringent MPI API parameter checking to help user-level - debugging. -- The ptmalloc2 memory manager component is now by default built as - a standalone library named libopenmpi-malloc. Users wanting to - use leave_pinned with ptmalloc2 will now need to link the library - into their application explicitly. All other users will use the - libc-provided allocator instead of Open MPI's ptmalloc2. This change - may be overriden with the configure option enable-ptmalloc2-internal -- The leave_pinned options will now default to using mallopt on - Linux in the cases where ptmalloc2 was not linked in. mallopt - will also only be available if munmap can be intercepted (the - default whenever Open MPI is not compiled with --without-memory- - manager. -- Open MPI will now complain and refuse to use leave_pinned if - no memory intercept / mallopt option is available. -- Add option of using Perl-based wrapper compilers instead of the - C-based wrapper compilers. The Perl-based version does not - have the features of the C-based version, but does work better - in cross-compile environments. - - -1.2.9: 14 Feb 2009 ------------------- - -- Fix a segfault when using one-sided communications on some forms of derived - datatypes. Thanks to Dorian Krause for reporting the bug. See #1715. -- Fix an alignment problem affecting one-sided communications on - some architectures (e.g., SPARC64). See #1738. -- Fix compilation on Solaris when thread support is enabled in Open MPI - (e.g., when using --with-threads). See #1736. -- Correctly take into account the MTU that an OpenFabrics device port - is using. See #1722 and - https://bugs.openfabrics.org/show_bug.cgi?id=1369. -- Fix two datatype engine bugs. See #1677. - Thanks to Peter Kjellstrom for the bugreport. -- Fix the bml r2 help filename so the help message can be found. See #1623. -- Fix a compilation problem on RHEL4U3 with the PGI 32 bit compiler - caused by . See ticket #1613. -- Fix the --enable-cxx-exceptions configure option. See ticket #1607. -- Properly handle when the MX BTL cannot open an endpoint. See ticket #1621. -- Fix a double free of events on the tcp_events list. See ticket #1631. -- Fix a buffer overun in opal_free_list_grow (called by MPI_Init). - Thanks to Patrick Farrell for the bugreport and Stephan Kramer for - the bugfix. See ticket #1583. -- Fix a problem setting OPAL_PREFIX for remote sh-based shells. - See ticket #1580. - - -1.2.8: 14 Oct 2008 ------------------- - -- Tweaked one memory barrier in the openib component to be more conservative. - May fix a problem observed on PPC machines. See ticket #1532. -- Fix OpenFabrics IB partition support. See ticket #1557. -- Restore v1.1 feature that sourced .profile on remote nodes if the default - shell will not do so (e.g. /bin/sh and /bin/ksh). See ticket #1560. -- Fix segfault in MPI_Init_thread() if ompi_mpi_init() fails. See ticket #1562. -- Adjust SLURM support to first look for $SLURM_JOB_CPUS_PER_NODE instead of - the deprecated $SLURM_TASKS_PER_NODE environment variable. This change - may be *required* when using SLURM v1.2 and above. See ticket #1536. -- Fix the MPIR_Proctable to be in process rank order. See ticket #1529. -- Fix a regression introduced in 1.2.6 for the IBM eHCA. See ticket #1526. - - -1.2.7: 28 Aug 2008 ------------------- - -- Add some Sun HCA vendor IDs. See ticket #1461. -- Fixed a memory leak in MPI_Alltoallw when called from Fortran. - Thanks to Dave Grote for the bugreport. See ticket #1457. -- Only link in libutil when it is needed/desired. Thanks to - Brian Barret for diagnosing and fixing the problem. See ticket #1455. -- Update some QLogic HCA vendor IDs. See ticket #1453. -- Fix F90 binding for MPI_CART_GET. Thanks to Scott Beardsley for - bringing it to our attention. See ticket #1429. -- Remove a spurious warning message generated in/by ROMIO. See ticket #1421. -- Fix a bug where command-line MCA parameters were not overriding - MCA parameters set from environment variables. See ticket #1380. -- Fix a bug in the AMD64 atomics assembly. Thanks to Gabriele Fatigati - for the bug report and bugfix. See ticket #1351. -- Fix a gather and scatter bug on intercommunicators when the datatype - being moved is 0 bytes. See ticket #1331. -- Some more man page fixes from the Debian maintainers. - See tickets #1324 and #1329. -- Have openib BTL (OpenFabrics support) check for the presence of - /sys/class/infiniband before allowing itself to be used. This check - prevents spurious "OMPI did not find RDMA hardware!" notices on - systems that have the software drivers installed, but no - corresponding hardware. See tickets #1321 and #1305. -- Added vendor IDs for some ConnectX openib HCAs. See ticket #1311. -- Fix some RPM specfile inconsistencies. See ticket #1308. - Thanks to Jim Kusznir for noticing the problem. -- Removed an unused function prototype that caused warnings on - some systems (e.g., OS X). See ticket #1274. -- Fix a deadlock in inter-communicator scatter/gather operations. - Thanks to Martin Audet for the bug report. See ticket #1268. - - -1.2.6: 7 Apr 2008 ------------------ - -- Fix a bug in the inter-allgather for asymmetric inter-communicators. - Thanks to Martin Audet for the bug report. See ticket #1247. -- Fix a bug in the openib BTL when setting the CQ depth. Thanks - to Jon Mason for the bug report and fix. See ticket #1245. -- On Mac OS X Leopard, the execinfo component will be used for - backtraces, making for a more durable solution. See ticket #1246. -- Added vendor IDs for some QLogic DDR openib HCAs. See ticket #1227. -- Updated the URL to get the latest config.guess and config.sub files. - Thanks to Ralf Wildenhues for the bug report. See ticket #1226. -- Added shared contexts support to PSM MTL. See ticket #1225. -- Added pml_ob1_use_early_completion MCA parameter to allow users - to turn off the OB1 early completion semantic and avoid "stall" - problems seen on InfiniBand in some cases. See ticket #1224. -- Sanitized some #define macros used in mpi.h to avoid compiler warnings - caused by MPI programs built with different autoconf versions. - Thanks to Ben Allan for reporting the problem, and thanks to - Brian Barrett for the fix. See ticket #1220. -- Some man page fixes from the Debian maintainers. See ticket #1219. -- Made the openib BTL a bit more resilient in the face of driver - errors. See ticket #1217. -- Fixed F90 interface for MPI_CART_CREATE. See ticket #1208. - Thanks to Michal Charemza for reporting the problem. -- Fixed some C++ compiler warnings. See ticket #1203. -- Fixed formatting of the orterun man page. See ticket #1202. - Thanks to Peter Breitenlohner for the patch. - - -1.2.5: 8 Jan 2008 ------------------ - -- Fixed compile issue with open() on Fedora 8 (and newer) platforms. - Thanks to Sebastian Schmitzdorff for noticing the problem. -- Added run-time warnings during MPI_INIT when MPI_THREAD_MULTIPLE - and/or progression threads are used (the OMPI v1.2 series does not - support these well at all). -- Better handling of ECONNABORTED from connect on Linux. Thanks to - Bob Soliday for noticing the problem; thanks to Brian Barrett for - submitting a patch. -- Reduce extraneous output from OOB when TCP connections must - be retried. Thanks to Brian Barrett for submitting a patch. -- Fix for ConnectX devices and OFED 1.3. See ticket #1190. -- Fixed a configure problem for Fortran 90 on Cray systems. Ticket #1189. -- Fix an uninitialized variable in the error case in opal_init.c. - Thanks to Åke Sandgren for pointing out the mistake. -- Fixed a hang in configure if $USER was not defined. Thanks to - Darrell Kresge for noticing the problem. See ticket #900. -- Added support for parallel debuggers even when we have an optimized build. - See ticket #1178. -- Worked around a bus error in the Mac OS X 10.5.X (Leopard) linker when - compiling Open MPI with -g. See ticket #1179. -- Removed some warnings about 'rm' from Mac OS X 10.5 (Leopard) builds. -- Fix the handling of mx_finalize(). See ticket #1177. - Thanks to Åke Sandgren for bringing this issue to our attention. -- Fixed minor file descriptor leak in the Altix timer code. Thanks to - Paul Hargrove for noticing the problem and supplying the fix. -- Fix a problem when using a different compiler for C and Objective C. - See ticket #1153. -- Fix segfault in MPI_COMM_SPAWN when the user specified a working - directory. Thanks to Murat Knecht for reporting this and suggesting - a fix. -- A few manpage fixes from the Debian Open MPI maintainers. Thanks to - Tilman Koschnick, Sylvestre Ledru, and Dirk Eddelbuettel. -- Fixed issue with pthread detection when compilers are not all - from the same vendor. Thanks to Åke Sandgren for the bug - report. See ticket #1150. -- Fixed vector collectives in the self module. See ticket #1166. -- Fixed some data-type engine bugs: an indexing bug, and an alignment bug. - See ticket #1165. -- Only set the MPI_APPNUM attribute if it is defined. See ticket - #1164. - - -1.2.4: 26 Sep 2007 ------------------- - -- Really added support for TotalView/DDT parallel debugger message queue - debugging (it was mistakenly listed as "added" in the 1.2 release). -- Fixed a build issue with GNU/kFreeBSD. Thanks to Petr Salinger for - the patch. -- Added missing MPI_FILE_NULL constant in Fortran. Thanks to - Bernd Schubert for bringing this to our attention. -- Change such that the UDAPL BTL is now only built in Linux when - explicitly specified via the --with-udapl configure command line - switch. -- Fixed an issue with umask not being propagated when using the TM - launcher. -- Fixed behavior if number of slots is not the same on all bproc nodes. -- Fixed a hang on systems without GPR support (ex. Cray XT3/4). -- Prevent users of 32-bit MPI apps from requesting >= 2GB of shared - memory. -- Added a Portals MTL. -- Fix 0 sized MPI_ALLOC_MEM requests. Thanks to Lisandro Dalcín for - pointing out the problem. -- Fixed a segfault crash on large SMPs when doing collectives. -- A variety of fixes for Cray XT3/4 class of machines. -- Fixed which error handler is used when MPI_COMM_SELF is passed - to MPI_COMM_FREE. Thanks to Lisandro Dalcín for the bug report. -- Fixed compilation on platforms that don't have hton/ntoh. -- Fixed a logic problem in the fortran binding for MPI_TYPE_MATCH_SIZE. - Thanks to Jeff Dusenberry for pointing out the problem and supplying - the fix. -- Fixed a problem with MPI_BOTTOM in various places of the f77-interface. - Thanks to Daniel Spangberg for bringing this up. -- Fixed problem where MPI-optional Fortran datatypes were not - correctly initialized. -- Fixed several problems with stdin/stdout forwarding. -- Fixed overflow problems with the sm mpool MCA parameters on large SMPs. -- Added support for the DDT parallel debugger via orterun's --debug - command line option. -- Added some sanity/error checks to the openib MCA parameter parsing - code. -- Updated the udapl BTL to use RDMA capabilities. -- Allow use of the BProc head node if it was allocated to the user. - Thanks to Sean Kelly for reporting the problem and helping debug it. -- Fixed a ROMIO problem where non-blocking I/O errors were not properly - reported to the user. -- Made remote process launch check the $SHELL environment variable if - a valid shell was not otherwise found for the user. - Thanks to Alf Wachsmann for the bugreport and suggested fix. -- Added/updated some vendor IDs for a few openib HCAs. -- Fixed a couple of failures that could occur when specifying devices - for use by the OOB. -- Removed dependency on sysfsutils from the openib BTL for - libibverbs >=v1.1 (i.e., OFED 1.2 and beyond). - - -1.2.3: 20 Jun 2007 ------------------- - -- Fix a regression in comm_spawn functionality that inadvertently - caused the mapping of child processes to always start at the same - place. Thanks to Prakash Velayutham for helping discover the - problem. -- Fix segfault when a user's home directory is unavailable on a remote - node. Thanks to Guillaume Thomas-Collignon for bringing the issue - to our attention. -- Fix MPI_IPROBE to properly handle MPI_STATUS_IGNORE on mx and psm - MTLs. Thanks to Sophia Corwell for finding this and supplying a - reproducer. -- Fix some error messages in the tcp BTL. -- Use _NSGetEnviron instead of environ on Mac OS X so that there - are no undefined symbols in the shared libraries. -- On OS X, when MACOSX_DEPLOYMENT_TARGET is 10.3 or higher, support - building the Fortran 90 bindings as a shared library. Thanks to - Jack Howarth for his advice on making this work. -- No longer require extra include flag for the C++ bindings. -- Fix detection of weak symbols support with Intel compilers. -- Fix issue found by Josh England: ompi_info would not show framework - MCA parameters set in the environment properly. -- Rename the oob_tcp_include/exclude MCA params to oob_tcp_if_include/exclude - so that they match the naming convention of the btl_tcp_if_include/exclude - params. The old names are depreciated, but will still work. -- Add -wd as a synonym for the -wdir orterun/mpirun option. -- Fix the mvapi BTL to compile properly with compilers that do not support - anonymous unions. Thanks to Luis Kornblueh for reporting the bug. - - -1.2.2: 16 May 2007 ------------------- - -- Fix regression in 1.2.1 regarding the handling of $CC with both - absolute and relative path names. -- Fix F90 array of status dimensions. Thanks to Randy Bramley for - noticing the problem. -- Add btl_openib_ib_pkey_value MCA parameter for controlling IB port selection. -- Fixed a variety of threading/locking bugs. -- Fixed some compiler warnings associated with ROMIO, OS X, and gridengine. -- If pbs-config can be found, use it to look for TM support. Thanks - to Bas van der Vlies for the inspiration and preliminary work. -- Fixed a deadlock in orterun when the rsh PLS encounters some errors. - - -1.2.1: 25 Apr 2007 ------------------- - -- Fixed a number of connection establishment errors in the TCP out- - of-band messaging system. -- Fixed a memory leak when using mpi_comm calls. - Thanks to Bas van der Vlies for reporting the problem. -- Fixed various memory leaks in OPAL and ORTE. -- Improved launch times when using TM (PBS Pro, Torque, Open PBS). -- Fixed mpi_leave_pinned to work for all datatypes. -- Fix functionality allowing users to disable sbrk() (the - mpool_base_disable_sbrk MCA parameter) on platforms that support it. -- Fixed a pair of problems with the TCP "listen_thread" mode for the - oob_tcp_listen_mode MCA parameter that would cause failures when - attempting to launch applications. -- Fixed a segfault if there was a failure opening a BTL MX endpoint. -- Fixed a problem with mpirun's --nolocal option introduced in 1.2. -- Re-enabled MPI_COMM_SPAWN_MULTIPLE from singletons. -- LoadLeveler and TM configure fixes, Thanks to Martin Audet for the - bug report. -- Various C++ MPI attributes fixes. -- Fixed issues with backtrace code on 64 bit Intel & PPC OS X builds. -- Fixed issues with multi-word CC variables and libtool. - Thanks to Bert Wesarg for the bug reports. -- Fix issue with non-uniform node naming schemes in SLURM. -- Fix file descriptor leak in the Grid Engine/N1GE support. -- Fix compile error on OS X 10.3.x introduced with Open MPI 1.1.5. -- Implement MPI_TYPE_CREATE_DARRAY function (was in 1.1.5 but not 1.2). -- Recognize zsh shell when using rsh/ssh for launching MPI jobs. -- Ability to set the OPAL_DESTDIR or OPAL_PREFIX environment - variables to "re-root" an existing Open MPI installation. -- Always include -I for Fortran compiles, even if the prefix is - /usr/local. -- Support for "fork()" in MPI applications that use the - OpenFabrics stack (OFED v1.2 or later). -- Support for setting specific limits on registered memory. - - -1.2: 15 Mar 2007 ----------------- - -- Fixed race condition in the shared memory fifo's, which led to - orphaned messages. -- Corrected the size of the shared memory file - subtracted out the - space the header was occupying. -- Add support for MPI_2COMPLEX and MPI_2DOUBLE_COMPLEX. -- Always ensure to create $(includedir)/openmpi, even if the C++ - bindings are disabled so that the wrapper compilers don't point to - a directory that doesn't exist. Thanks to Martin Audet for - identifying the problem. -- Fixes for endian handling in MPI process startup. -- Openib BTL initialization fixes for cases where MPI processes in the - same job has different numbers of active ports on the same physical - fabric. -- Print more descriptive information when displaying backtraces on - OS's that support this functionality, such as the hostname and PID - of the process in question. -- Fixes to properly handle MPI exceptions in C++ on communicators, - windows, and files. -- Much more reliable runtime support, particularly with regards to MPI - job startup scalability, BProc support, and cleanup in failure - scenarios (e.g., MPI_ABORT, MPI processes abnormally terminating, - etc.). -- Significant performance improvements for MPI collectives, - particularly on high-speed networks. -- Various fixes in the MX BTL component. -- Fix C++ typecast problems with MPI_ERRCODES_IGNORE. Thanks to - Satish Balay for bringing this to our attention. -- Allow run-time specification of the maximum amount of registered - memory for OpenFabrics and GM. -- Users who utilize the wrapper compilers (e.g., mpicc and mpif77) - will not notice, but the underlying library names for ORTE and OPAL - have changed to libopen-rte and libopen-pal, respectively (listed - here because there are undoubtedly some users who are not using the - wrapper compilers). -- Many bug fixes to MPI-2 one-sided support. -- Added support for TotalView message queue debugging. -- Fixes for MPI_STATUS_SET_ELEMENTS. -- Print better error messages when mpirun's "-nolocal" is used when - there is only one node available. -- Added man pages for several Open MPI executables and the MPI API - functions. -- A number of fixes for Alpha platforms. -- A variety of Fortran API fixes. -- Build the Fortran MPI API as a separate library to allow these - functions to be profiled properly. -- Add new --enable-mpirun-prefix-by-default configure option to always - imply the --prefix option to mpirun, preventing many rsh/ssh-based - users from needing to modify their shell startup files. -- Add a number of missing constants in the C++ bindings. -- Added tight integration with Sun N1 Grid Engine (N1GE) 6 and the - open source Grid Engine. -- Allow building the F90 MPI bindings as shared libraries for most - compilers / platforms. Explicitly disallow building the F90 - bindings as shared libraries on OS X because of complicated - situations with Fortran common blocks and lack of support for - unresolved common symbols in shared libraries. -- Added stacktrace support for Solaris and Mac OS X. -- Update event library to libevent-1.1b. -- Fixed standards conformance issues with MPI_ERR_TRUNCATED and - setting MPI_ERROR during MPI_TEST/MPI_WAIT. -- Addition of "cm" PML to better support library-level matching - interconnects, with support for Myrinet/MX, and QLogic PSM-based - networks. -- Addition of "udapl" BTL for transport across uDAPL interconnects. -- Really check that the $CXX given to configure is a C++ compiler - (not a C compiler that "sorta works" as a C++ compiler). -- Properly check for local host only addresses properly, looking - for 127.0.0.0/8, rather than just 127.0.0.1. - - -1.1.5: 19 Mar 2007 ------------------- - -- Implement MPI_TYPE_CREATE_DARRAY function. -- Fix race condition in shared memory BTL startup that could cause MPI - applications to hang in MPI_INIT. -- Fix syntax error in a corner case of the event library. Thanks to - Bert Wesarg for pointing this out. -- Add new MCA parameter (mpi_preconnect_oob) for pre-connecting the - "out of band" channels between all MPI processes. Most helpful for - MPI applications over InfiniBand where process A sends an initial - message to process B, but process B does not enter the MPI library - for a long time. -- Fix for a race condition in shared memory locking semantics. -- Add major, minor, and release version number of Open MPI to mpi.h. - Thanks to Martin Audet for the suggestion. -- Fix the "restrict" compiler check in configure. -- Fix a problem with argument checking in MPI_TYPE_CREATE_SUBARRAY. -- Fix a problem with compiling the XGrid components with non-gcc - compilers. - - -1.1.4: 30 Jan 2007 ------------------- - -- Fixed 64-bit alignment issues with TCP interface detection on - intel-based OS X machines. -- Adjusted TCP interface selection to automatically ignore Linux - channel-bonded slave interfaces. -- Fixed the type of the first parameter to the MPI F90 binding for - MPI_INITIALIZED. Thanks to Tim Campbell for pointing out the - problem. -- Fix a bunch of places in the Fortran MPI bindings where (MPI_Fint*) - was mistakenly being used instead of (MPI_Aint*). -- Fixes for fortran MPI_STARTALL, which could sometimes return - incorrect request values. Thanks to Tim Campbell for pointing out - the problem. -- Include both pre- and post-MPI-2 errata bindings for - MPI::Win::Get_attr. -- Fix math error on Intel OS X platforms that would greatly increase - shared memory latency. -- Fix type casting issue with MPI_ERRCODES_IGNORE that would cause - errors when using a C++ compiler. Thanks to Barry Smith for - bringing this to our attention. -- Fix possible segmentation fault during shutdown when using the - MX BTL. - - -1.1.3: 26 Jan 2007 ------------------- - -- Remove the "hierarch" coll component; it was not intended to be - included in stable releases yet. -- Fix a race condition with stdout/stderr not appearing properly from - all processes upon termination of an MPI job. -- Fix internal accounting errors with the self BTL. -- Fix typos in the code path for when sizeof(int) != sizeof(INTEGER) - in the MPI F77 bindings functions. Thanks to Pierre-Matthieu - Anglade for bringing this problem to our attention. -- Fix for a memory leak in the derived datatype function - ompi_ddt_duplicate(). Thanks to Andreas Schäfer for reporting, - diagnosing, and patching the leak. -- Used better performing basic algorithm for MPI_ALLGATHERV. -- Added a workaround for a bug in the Intel 9.1 C++ compiler (all - versions up to and including 20060925) in the MPI C++ bindings that - caused run-time failures. Thanks to Scott Weitzenkamp for reporting - this problem. -- Fix MPI_SIZEOF implementation in the F90 bindings for COMPLEX - variable types. -- Fixes for persistent requests involving MPI_PROC_NULL. Thanks to - Lisandro Dalcín for reporting the problem. -- Fixes to MPI_TEST* and MPI_WAIT* for proper MPI exception reporting. - Thanks to Lisandro Dalcín for finding the issue. -- Various fixes for MPI generalized request handling; addition of - missing MPI::Grequest functionality to the C++ bindings. -- Add "mpi_preconnect_all" MCA parameter to force wireup of all MPI - connections during MPI_INIT (vs. making connections lazily whenever - the first MPI communication occurs between a pair of peers). -- Fix a problem for when $FC and/or $F77 were specified as multiple - tokens. Thanks to Orion Poplawski for identifying the problem and - to Ralf Wildenhues for suggesting the fix. -- Fix several MPI_*ERRHANDLER* functions and MPI_GROUP_TRANSLATE_RANKS - with respect to what arguments they allowed and the behavior that - they effected. Thanks to Lisandro Dalcín for reporting the - problems. - - -1.1.2: 18 Oct 2006 ------------------- - -- Really fix Fortran status handling in MPI_WAITSOME and MPI_TESTSOME. -- Various datatype fixes, reported by several users as causing - failures in the BLACS testing suite. Thanks to Harald Forbert, Åke - Sandgren and, Michael Kluskens for reporting the problem. -- Correctness and performance fixes for heterogeneous environments. -- Fixed a error in command line parsing on some platforms (causing - mpirun to crash without doing anything). -- Fix for initialization hangs on 64 bit Mac OS X PowerPC systems. -- Fixed some memory allocation problems in mpirun that could cause - random problems if "-np" was not specified on the command line. -- Add Kerberos authentication support for XGrid. -- Added LoadLeveler support for jobs larger than 128 tasks. -- Fix for large-sized Fortran LOGICAL datatypes. -- Fix various error checking in MPI_INFO_GET_NTHKEY and - MPI_GROUP_TRANSLATE_RANKS, and some collective operations - (particularly with regards to MPI_IN_PLACE). Thanks to Lisandro - Dalcín for reporting the problems. -- Fix receiving messages to buffers allocated by MPI_ALLOC_MEM. -- Fix a number of race conditions with the MPI-2 Onesided - interface. -- Fix the "tuned" collective componenete where some cases where - MPI_BCAST could hang. -- Update TCP support to support non-uniform TCP environments. -- Allow the "poe" RAS component to be built on AIX or Linux. -- Only install mpif.h if the rest of the Fortran bindings are - installed. -- Fixes for BProc node selection. -- Add some missing Fortran MPI-2 IO constants. - - -1.1.1: 28 Aug 2006 ------------------- - -- Fix for Fortran string handling in various MPI API functions. -- Fix for Fortran status handling in MPI_WAITSOME and MPI_TESTSOME. -- Various fixes for the XL compilers. -- Automatically disable using mallot() on AIX. -- Memory fixes for 64 bit platforms with registering MCA parameters in - the self and MX BTL components. -- Fixes for BProc to support oversubscription and changes to the - mapping algorithm so that mapping processes "by slot" works as - expected. -- Fixes for various abort cases to not hang and clean up nicely. -- If using the Intel 9.0 v20051201 compiler on an IA64 platform, the - ptmalloc2 memory manager component will automatically disable - itself. Other versions of the Intel compiler on this platform seem - to work fine (e.g., 9.1). -- Added "host" MPI_Info key to MPI_COMM_SPAWN and - MPI_COMM_SPAWN_MULTIPLE. -- Add missing C++ methods: MPI::Datatype::Create_indexed_block, - MPI::Datatype::Create_resized, MPI::Datatype::Get_true_extent. -- Fix OSX linker issue with Fortran bindings. -- Fixed MPI_COMM_SPAWN to start spawning new processes in slots that - (according to Open MPI) are not already in use. -- Added capability to "mpirun a.out" (without specifying -np) that - will run on all currently-allocated resources (e.g., within a batch - job such as SLURM, Torque, etc.). -- Fix a bug with one particular case of MPI_BCAST. Thanks to Doug - Gregor for identifying the problem. -- Ensure that the shared memory mapped file is only created when there - is more than one process on a node. -- Fixed problems with BProc stdin forwarding. -- Fixed problem with MPI_TYPE_INDEXED datatypes. Thanks to Yven - Fournier for identifying this problem. -- Fix some thread safety issues in MPI attributes and the openib BTL. -- Fix the BProc allocator to not potentially use the same resources - across multiple ORTE universes. -- Fix gm resource leak. -- More latency reduction throughout the code base. -- Make the TM PLS (PBS Pro, Torque, Open PBS) more scalable, and fix - some latent bugs that crept in v1.1. Thanks to the Thunderbird crew - at Sandia National Laboratories and Martin Schaffoner for access to - testing facilities to make this happen. -- Added new command line options to mpirun: - --nolocal: Do not run any MPI processes on the same node as mpirun - (compatibility with the OSC mpiexec launcher) - --nooversubscribe: Abort if the number of processes requested would - cause oversubscription - --quiet / -q: do not show spurious status messages - --version / -V: show the version of Open MPI -- Fix bus error in XGrid process starter. Thanks to Frank from the - Open MPI user's list for identifying the problem. -- Fix data size mismatches that caused memory errors on PPC64 - platforms during the startup of the openib BTL. -- Allow propagation of SIGUSR1 and SIGUSR2 signals from mpirun to - back-end MPI processes. -- Add missing MPI::Is_finalized() function. - - -1.1: 23 Jun 2006 ----------------- - -- Various MPI datatype fixes, optimizations. -- Fixed various problems on the SPARC architecture (e.g., not - correctly aligning addresses within structs). -- Improvements in various run-time error messages to be more clear - about what they mean and where the errors are occurring. -- Various fixes to mpirun's handling of --prefix. -- Updates and fixes for Cray/Red Storm support. -- Major improvements to the Fortran 90 MPI bindings: - - General improvements in compile/linking time and portability - between different F90 compilers. - - Addition of "trivial", "small" (the default), and "medium" - Fortran 90 MPI module sizes (v1.0.x's F90 module was - equivalent to "medium"). See the README file for more - explanation. - - Fix various MPI F90 interface functions and constant types to - match. Thanks to Michael Kluskens for pointing out the problems - to us. -- Allow short messagees to use RDMA (vs. send/receive semantics) to a - limited number peers in both the mvapi and openib BTL components. - This reduces communication latency over IB channels. -- Numerous performance improvements throughout the entire code base. -- Many minor threading fixes. -- Add a define OMPI_SKIP_CXX to allow the user to skip the mpicxx.h from - being included in mpi.h. It allows the user to compile C code with a CXX - compiler without including the CXX bindings. -- PERUSE support has been added. In order to activate it add - --enable-peruse to the configure options. All events described in - the PERUSE 2.0 draft are supported, plus one Open MPI - extension. PERUSE_COMM_REQ_XFER_CONTINUE allow to see how the data - is segmented internally, using multiple interfaces or the pipeline - engine. However, this version only support one event of each type - simultaneously attached to a communicator. -- Add support for running jobs in heterogeneous environments. - Currently supports environments with different endianness and - different representations of C++ bool and Fortran LOGICAL. - Mismatched sizes for other datatypes is not supported. -- Open MPI now includes an implementation of the MPI-2 One-Sided - Communications specification. -- Open MPI is now configurable in cross-compilation environments. - Several Fortran 77 and Fortran 90 tests need to be pre-seeded with - results from a config.cache-like file. -- Add --debug option to mpirun to generically invoke a parallel debugger. - - -1.0.3: Not released (all fixes included in 1.1) ------------------------------------------------ - -- Fix a problem noted by Chris Hennes where MPI_INFO_SET incorrectly - disallowed long values. -- Fix a problem in the launch system that could cause inconsistent - launch behavior, particularly when launching large jobs. -- Require that the openib BTL find . Thanks to Josh - Aune for the suggestion. -- Include updates to support the upcoming Autoconf 2.60 and Libtool - 2.0. Thanks to Ralf Wildenhues for all the work! -- Fix bug with infinite loop in the "round robin" process mapper. - Thanks to Paul Donohue for reporting the problem. -- Enusre that memory hooks are removed properly during MPI_FINALIZE. - Thanks to Neil Ludban for reporting the problem. -- Various fixes to the included support for ROMIO. -- Fix to ensure that MPI_LONG_LONG and MPI_LONG_LONG_INT are actually - synonyms, as defined by the MPI standard. Thanks to Martin Audet - for reporting this. -- Fix Fortran 90 configure tests to properly utilize LDFLAGS and LIBS. - Thanks to Terry Reeves for reporting the problem. -- Fix shared memory progression in asynchronous progress scenarios. - Thanks to Mykael Bouquey for reporting the problem. -- Fixed back-end operations for predefined MPI_PROD for some - datatypes. Thanks to Bert Wesarg for reporting this. -- Adapted configure to be able to handle Torque 2.1.0p0's (and above) - new library name. Thanks to Brock Palen for pointing this out and - providing access to a Torque 2.1.0p0 cluster to test with. -- Fixed situation where mpirun could set a shell pipeline's stdout - to non-blocking, causing the shell pipeline to prematurely fail. - Thanks to Darrell Kresge for figuring out what was happening. -- Fixed problems with leave_pinned that could cause Badness with the - mvapi BTL. -- Fixed problems with MPI_FILE_OPEN and non-blocking MPI-2 IO access. -- Fixed various InfiniBand port matching issues during startup. - Thanks to Scott Weitzenkamp for identifying these problems. -- Fixed various configure, build and run-time issues with ROMIO. - Thanks to Dries Kimpe for bringing them to our attention. -- Fixed error in MPI_COMM_SPLIT when dealing with intercommunicators. - Thanks to Bert Wesarg for identifying the problem. -- Fixed backwards handling of "high" parameter in MPI_INTERCOMM_MERGE. - Thanks to Michael Kluskens for pointing this out to us. -- Fixed improper handling of string arguments in Fortran bindings - for MPI-IO functionality -- Fixed segmentation fault with 64 bit applications on Solaris when - using the shared memory transports. -- Fixed MPI_COMM_SELF attributes to free properly at the beginning of - MPI_FINALIZE. Thanks to Martin Audet for bringing this to our - attention. -- Fixed alignment tests for cross-compiling to not cause errors with - recent versions of GCC. - - -1.0.2: 7 Apr 2006 ------------------ - -- Fixed assembly race condition on AMD64 platforms. -- Fixed residual .TRUE. issue with copying MPI attributes set from - Fortran. -- Remove unnecessary logic from Solaris pty I/O forwarding. Thanks to - Francoise Roch for bringing this to our attention. -- Fixed error when count = 0 was given for multiple completion MPI - functions (MPI_TESTSOME, MPI_TESTANY, MPI_TESTALL, MPI_WAITSOME, - MPI_WAITANY, MPI_WAITALL). -- Better handling in MPI_ABORT for when peer processes have already - died, especially under some resource managers. -- Random updates to README file, to include notes about the Portland - compilers. -- Random, small threading fixes to prevent deadlock. -- Fixed a problem with handling long mpirun app files. Thanks to Ravi - Manumachu for identifying the problem. -- Fix handling of strings in several of the Fortran 77 bindings. -- Fix LinuxPPC assembly issues. Thanks to Julian Seward for reporting - the problem. -- Enable pty support for standard I/O forwarding on platforms that - have ptys but do not have openpty(). Thanks to Pierre Valiron for - bringing this to our attention. -- Disable inline assembly for PGI compilers to avoid compiler errors. - Thanks to Troy Telford for bringing this to our attention. -- Added MPI_UNSIGNED_CHAR and MPI_SIGNED_CHAR to the allowed reduction - types. -- Fix a segv in variable-length message displays on Opterons running - Solaris. Thanks to Pierre Valiron for reporting the issue. -- Added MPI_BOOL to the intrinsic reduction operations MPI_LAND, - MPI_LOR, MPI_LXOR. Thanks to Andy Selle for pointing this out to us. -- Fixed TCP BTL network matching logic during MPI_INIT; in some cases - on multi-NIC nodes, a NIC could get paired with a NIC on another - network (typically resulting in deadlock). Thanks to Ken Mighell - for pointing this out to us. -- Change the behavior of orterun (mpirun, mpirexec) to search for - argv[0] and the cwd on the target node (i.e., the node where the - executable will be running in all systems except BProc, where the - searches are run on the node where orterun is invoked). -- Fix race condition in shared memory transport that could cause - crashes on machines with weak memory consistency models (including - POWER/PowerPC machines). -- Fix warnings about setting read-only MCA parameters on bproc systems. -- Change the exit status set by mpirun when an application process is - killed by a signal. The exit status is now set to signo + 128, which - conforms with the behavior of (almost) all shells. -- Correct a datatype problem with the convertor when partially - unpacking data. Now we can position the convertor to any position - not only on the predefined types boundaries. Thanks to Yvan Fournier - for reporting this to us. -- Fix a number of standard I/O forwarding issues, including the - ability to background mpirun and a loss of data issue when - redirecting mpirun's standard input from a file. -- Fixed bug in ompi_info where rcache and bml MCA parameters would not - be displayed. -- Fixed umask issues in the session directory. Thanks to Glenn Morris - for reporting this to us. -- Fixed tcsh-based LD_LIBRARY_PATH issues with --prefix. Thanks to - Glen Morris for identifying the problem and suggesting the fix. -- Removed extraneous \n's when setting PATH and LD_LIBRARY_PATH in the - rsh startup. Thanks to Glen Morris for finding these typos. -- Fixed missing constants in MPI C++ bindings. -- Fixed some errors caused by threading issues. -- Fixed openib BTL flow control logic to not overrun the number of - send wqes available. -- Update to match newest OpenIB user-level library API. Thanks to - Roland Dreier for submitting this patch. -- Report errors properly when failing to register memory in the openib - BTL. -- Reduce memory footprint of openib BTL. -- Fix parsing problem with mpirun's "-tv" switch. Thanks to Chris - Gottbrath for supplying the fix. -- Fix Darwin net/if.h configure warning. -- The GNU assembler unbelievably defaults to making stacks executable. - So when using gas, add flags to explicitly tell it to not make - stacks executable (lame but necessary). -- Add missing MPI::Request::Get_status() methods. Thanks to Bill - Saphir for pointing this out to us. -- Improved error messages on memory registration errors (e.g., when - using high-speed networks). -- Open IB support now checks firmware for how many outstanding RDMA - requests are supported. Thanks to Mellanox for pointing this out to - us. -- Enable printing of stack traces in MPI processes upon SIGBUS, - SIGSEGV, and SIGFPE if the platform supports it. -- Fixed F90 compilation support for the Lahey compiler. -- Fixed issues with ROMIO shared library support. -- Fixed internal accounting problems with rsh support. -- Update to GNU Libtool 1.5.22. -- Fix error in configure script when setting CCAS to ias (the Intel - assembler). -- Added missing MPI::Intercomm collectives. -- Fixed MPI_IN_PLACE handling for Fortran collectives. -- Fixed some more C++ const_cast<> issues. Thanks for Martin Audet - (again) for bringing this to our attention. -- Updated ROMIO with the version from MPICH 1.2.7p1, marked as version - 2005-06-09. -- Fixes for some cases where the use of MPI_BOTTOM could cause - problems. -- Properly handle the case where an mVAPI does not have shared receive - queue support (such as the one shipped by SilverStorm / Infinicon - for OS X). - - -1.0.1: 12 Dec 2005 ------------------- - -- Fixed assembly on Solaris AMD platforms. Thanks to Pierre Valiron - for bringing this to our attention. -- Fixed long messages in the send-to-self case. -- Ensure that when the "leave_pinned" option is used, the memory hooks - are also enabled. Thanks to Gleb Natapov for pointing this out. -- Fixed compile errors for IRIX. -- Allow hostfiles to have integer host names (for BProc clusters). -- Fixed a problem with message matching of out-of-order fragments in - multiple network device scenarios. -- Converted all the C++ MPI bindings to use proper const_cast<>'s - instead of old C-style casts to get rid of const-ness. Thanks to - Martin Audet for raising the issue with us. -- Converted MPI_Offset to be a typedef instead of a #define because it - causes problems for some C++ parsers. Thanks to Martin Audet for - bringing this to our attention. -- Improved latency of TCP BTL. -- Fixed index value in MPI_TESTANY to be MPI_UNDEFINED if some - requests were not MPI_REQUEST_NULL, but no requests finished. -- Fixed several Fortran MPI API implementations that incorrectly used - integers instead of logicals or address-sized integers. -- Fix so that Open MPI correctly handles the Fortran value for .TRUE., - regardless of what the Fortran compiler's value for .TRUE. is. -- Improved scalability of MX startup. -- Fix datatype offset handling in the coll basic component's - MPI_SCATTERV implementation. -- Fix EOF handling on stdin. -- Fix missing MPI_F_STATUS_IGNORE and MPI_F_STATUSES_IGNORE - instanatiations. Thanks to Anthony Chan for pointing this out. -- Add a missing value for MPI_WIN_NULL in mpif.h. -- Bring over some fixes for the sm btl that somehow didn't make it - over from the trunk before v1.0. Thanks to Beth Tibbitts and Bill - Chung for helping identify this issue. -- Bring over some fixes for the iof that somehow didn't make it over - from the trunk before v1.0. -- Fix for --with-wrapper-ldflags handling. Thanks to Dries Kimpe for - pointing this out to us. - - -1.0: 17 Nov 2005 ----------------- - -Initial public release. diff --git a/README.FT.ULFM.md b/README.FT.ULFM.md deleted file mode 100644 index ae727a7f6a9..00000000000 --- a/README.FT.ULFM.md +++ /dev/null @@ -1,483 +0,0 @@ -ULFM Open MPI - -This README.md documents the features and options specific to the -**User Level Failure Mitigation (ULFM)** Open MPI implementation. -The upstream (i.e. non-resilient) Open MPI directions also apply to -this release, except when specified here, and can be found in its -README file. - -[TOC] - -Features -======== -This implementation conforms to the User Level Failure Mitigation (ULFM) -MPI Standard draft proposal. The ULFM proposal is developed by the MPI -Forum's Fault Tolerance Working Group to support the continued operation of -MPI programs after crash (node failures) have impacted the execution. The key -principle is that no MPI call (point-to-point, collective, RMA, IO, ...) can -block indefinitely after a failure, but must either succeed or raise an MPI -error. - -This implementation produces the three supplementary error codes and five -supplementary interfaces defined in the communicator section of the -[http://fault-tolerance.org/wp-content/uploads/2012/10/20170221-ft.pdf] -(ULFM chapter) standard draft document. - -+ `MPIX_ERR_PROC_FAILED` when a process failure prevents the completion of - an MPI operation. -+ `MPIX_ERR_PROC_FAILED_PENDING` when a potential sender matching a non-blocking - wildcard source receive has failed. -+ `MPIX_ERR_REVOKED` when one of the ranks in the application has invoked the - `MPI_Comm_revoke` operation on the communicator. -+ `MPIX_Comm_revoke(MPI_Comm comm)` Interrupts any communication pending on - the communicator at all ranks. -+ `MPIX_Comm_shrink(MPI_Comm comm, MPI_Comm* newcomm)` creates a new - communicator where dead processes in comm were removed. -+ `MPIX_Comm_agree(MPI_Comm comm, int *flag)` performs a consensus (i.e. fault - tolerant allreduce operation) on flag (with the operation bitwise AND). -+ `MPIX_Comm_failure_get_acked(MPI_Comm, MPI_Group*)` obtains the group of - currently acknowledged failed processes. -+ `MPIX_Comm_failure_ack(MPI_Comm)` acknowledges that the application intends - to ignore the effect of currently known failures on wildcard receive - completions and agreement return values. - -## Supported Systems -There are several MPI engines available in Open MPI, -notably, PML "ob1", "cm", "ucx", and MTL "ofi", "portals4", "psm2". -At this point, only "ob1" is adapted to support fault tolerance. - -"ob1" uses BTL ("Byte Transfer Layer") components for each supported -network. "ob1" supports a variety of networks that can be used in -combination with each other. Collective operations (blocking and -non-blocking) use an optimized implementation on top of "ob1". - -- Loopback (send-to-self) -- TCP -- UCT (InfiniBand) -- uGNI (Cray Gemini, Aries) -- Shared Memory (FT supported w/CMA and XPmem; KNEM is untested) -- Tuned and non-blocking collective communications - -A full list of supported, untested and disabled components is provided -later in this document. - -## More Information -More information (tutorials, examples, build instructions for leading -top500 systems) is also available in the Fault Tolerance Research -Hub website: - - -## Bibliographic References -If you are looking for, or want to cite a general reference for ULFM, -please use - -_Wesley Bland, Aurelien Bouteiller, Thomas Herault, George Bosilca, Jack -J. Dongarra: Post-failure recovery of MPI communication capability: Design -and rationale. IJHPCA 27(3): 244-254 (2013)._ - -Available from: http://journals.sagepub.com/doi/10.1177/1094342013488238. -___________________________________________________________________________ - -Building ULFM Open MPI -====================== -```bash -./configure --with-ft [...options...] -# use --with-ft to enable building with ULFM (default), -# --without-ft to disable it -make [-j N] all install -# use an integer value of N for parallel builds -``` -There are many available configure options (see `./configure --help` -for a full list); a summary of the more commonly used ones is included -in the upstream Open MPI README file. The following paragraph gives a -summary of ULFM Open MPI specific options behavior. - -## Configure options -+ `--with-ft=TYPE` - Specify the type of fault tolerance to enable. Options: mpi (ULFM MPI - draft standard). Fault tolerance build support is **enabled by default**. - -+ `--enable-mca-no-build=LIST` - Comma-separated list of _-_ pairs that will not be built. - For example, `--enable-mca-no-build=btl-portals,oob-ud` will disable - building the _portals BTL_ and the _ud OOB_ component. You can use this - option to disable components that are known to prevent supporting failure - management __when built-in__. By default, this list is empty, as ulfm has - a separate mechanism to warn about, or disable loading a component that - are poorly tested or known to cause breakage when fault-tolerance is - selected at runtime. - -+ `--with-pmi` - `--with-slurm` - Force the building of SLURM scheduler support. - Slurm with fault tolerance is tested. **Do not use `srun`**, otherwise your - application gets killed by the scheduler upon the first failure. Instead, - **Use `mpirun` in an `salloc/sbatch`**. - -+ `-with-lsf` - This is untested with fault tolerance. - -+ `--with-alps` - `--with-tm` - Force the building of PBS/Torque scheduler support. - PBS is tested with fault tolerance. **Use `mpirun` in a `qsub` - allocation.** - -+ `--disable-oshmem` - Disable building the OpenSHMEM implementation (by default, it is - enabled). - ULFM Fault Tolerance does not apply to OpenSHMEM. -___________________________________________________________________________ - -## Modified, Untested and Disabled Components -Frameworks and components which are not listed in the following list are -unmodified and support fault tolerance. Listed frameworks may be **modified** -(and work after a failure), **untested** (and work before a failure, but may -malfunction after a failure), or **disabled** (they cause unspecified behavior -all around when FT is enabled). - -All runtime disabled components are listed in the `ft-mpi` aggregate MCA param file -`$installdir/share/openmpi/amca-param-sets/ft-mpi`. You can tune the runtime behavior -with ULFM by either setting or unsetting variables in this file (or by overiding -the variable on the command line (e.g., ``--omca btl ofi,self``). Note that if FT is -runtime disabled, these components will load normally (this may change observed -performance when comparing with and without fault tolerance). - -- **pml** MPI point-to-point management layer - - "ob1" modified to **handle errors** - - "monitoring", "v" unmodified, **untested** - - "cm", "crcpw", "ucx" **disabled** - -- **btl** Point-to-point Byte Transfer Layer - - "ugni", "uct", "tcp", "sm(+cma,+xpmem)" modified to **handle errors** (removed - unconditional abort on error, expect performance similar to upstream) - - "ofi", "portals4", "smcuda", "usnic", "sm(+knem)" unmodified, - **untested** (may work properly, please report) - -- **mtl** Matching transport layer Used for MPI point-to-point messages on - some types of networks - - All "mtl" components are **disabled** - -- **coll** MPI collective algorithms - - "base", "basic", "tuned", "nbc" modified to **handle errors** - - "cuda", "inter", "sync", "sm" unmodified, **untested** (expect correct post-failure behavior) - - "hcoll", "portals4" unmodified, **disabled** (expect - unspecified post-failure behavior) - -- **osc** MPI one-sided communications - - Unmodified, **untested** (expect unspecified post-failure behavior) - -- **io** MPI I/O and dependent components - - _fs_ File system functions for MPI I/O - - _fbtl_ File byte transfer layer: abstraction for individual read/write - operations for OMPIO - - _fcoll_ Collective read and write operations for MPI I/O - - _sharedfp_ Shared file pointer operations for MPI I/O - - All components in these frameworks are unmodified, **untested** - (expect clean post-failure abort) - -- **vprotocol** Checkpoint/Restart components - - unmodified **untested** - -- **threads** `wait-sync` Multithreaded wait-synchronization object - - "pthreads" modified to **handle errors** (added a global interrupt to - trigger all wait_sync objects) - - "argotbots", "qthreads" unmodified, **disabled** (expect post-failure - deadlock) - -___________________________________________________________________________ - -Running ULFM Open MPI -===================== - -## Building your application - -As ULFM is still an extension to the MPI standard, you will need to -`#include ` in C, or `use mpi_ext` in Fortran to access the -supplementary error codes and functions. - -Compile your application as usual, using the provided `mpicc`, `mpif90`, or -`mpicxx` wrappers. - -## Running your application - -You can launch your application with fault tolerance by simply using the -provided `mpiexec`. Beware that your distribution may already provide a -version of MPI, make sure to set your `PATH` and `LD_LIBRARY_PATH` properly. -Note that fault tolerance is disabled by default in ULFM Open MPI; you can -enable the fault tolerance components by launching your application with -`mpiexec --enable-recovery`. - -## Running under a batch scheduler - -ULFM can operate under a job/batch scheduler, and is tested routinely with -both ALPS, PBS and Slurm. One difficulty comes from the fact that many job -schedulers will "cleanup" the application as soon as a process fails. In -order to avoid this problem, it is preferred that you use `mpiexec` -within an allocation (e.g. `salloc`, `sbatch`, `qsub`) rather than -a direct launch (e.g. `srun`). - -## Run-time tuning knobs - -ULFM comes with a variety of knobs for controlling how it runs. The default -parameters are sane and should result in very good performance in most -cases. You can change the default settings with `--omca mpi_ft_foo ` -for OMPI options, and with `--prtemca errmgr_detector_bar ` for -PRTE options. - -### PRTE level options - -- `prrte_enable_recovery (default: false)` controls automatic - cleanup of apps with failed processes within mpirun. Enabling this option - also enable `mpi_ft_enable`. -- `errmgr_detector_priority (default 1005`) selects the prte-based - failure detector. Only available when `prte_enable_recovery` is `true`. You - can set this to `0` when using the (experimental) OMPI detector instead. -- err`mgr_detector_heartbeat_period (default: 5e0)` controls the - heartbeat period. Recommended value is 1/2 of the timeout. -- `errmgr_detector_heartbeat_timeout (default: 1e1 seconds)` heartbeat - timeout (i.e. failure detection speed). Recommended value is 2 times - the heartbeat period. The default setup is tuned for failure-free - performance at the expense of fault detection reactivity. In environments - where faults are expected to be common, less conservative values can be - used (e.g., 100ms); Values lower than the TCP poll rate (typically 10ms) - can cause false positive. - -### OMPI level options - -- `mpi_ft_enable (default: same as prrte_enable_recovery)` - permits turning on/off fault tolerance at runtime. When false, failure - detection is disabled; Interfaces defined by the fault tolerance extensions - are substituted with dummy non-fault tolerant implementations (e.g., - `MPIX_Comm_agree` is implemented with `MPI_Allreduce`); All other controls - below become irrelevant. -- `mpi_ft_verbose (default: 0)` increases the output of the fault - tolerance activities. A value of 1 will report detected failures. -- `mpi_ft_detector (default: false)`, EXPERIMENTAL, controls - the activation of the OMPI level failure detector. When this detector - is turned off, all failure detection is delegated to PRTE (see above). - The OMPI level fault detector is experimental. There is a tradeoff between - failure detection accuracy and performance with this detector. Users that - experience accuracy issues may enable a more precise mode. - See the tuning knobs below to adjust to taste; - The OMPI failure detector operates on MPI_COMM_WORLD exclusively. - Processes connected from MPI_COMM_CONNECT/ACCEPT and MPI_COMM_SPAWN may - occasionally not be detected when they fail. -- `mpi_ft_detector_thread (default: false)` controls the use - of a thread to emit and receive failure detector's heartbeats. _Setting - this value to "true" will also set `MPI_THREAD_MULTIPLE` support, which - has a noticeable effect on latency (typically 1us increase)._ You may - want to **enable this option if you experience false positive** - processes incorrectly reported as failed with the OMPI failure detector. -- `mpi_ft_detector_period (default: 3e0 seconds)` heartbeat - period. Recommended value is 1/3 of the timeout. _Values lower than - 100us may impart a noticeable effect on latency (typically a 3us - increase)._ -- `mpi_ft_detector_timeout (default: 1e1 seconds)` heartbeat - timeout (i.e. failure detection speed). Recommended value is 3 times - the heartbeat period. - -## Known Limitations in ULFM - -- Infiniband support is provided through the OpenIB or UCT BTL, fault - tolerant operation over the UCX PML is not yet supported. -- TOPO, FILE, RMA are not fault tolerant. They are expected to work properly - before the occurence of the first failure. - -___________________________________________________________________________ - - -Changelog -========= - -## ULFM Integrated in Open MPI -ULFM is now integrated in Open MPI. This text will be updated when a new -Open MPI release is made. - -## ULFM Standalone Release 4.0.2u1 -This is a stability and upstream parity upgrade. It is based on the most -current Open MPI Release (v4.0.2, October 2019). - -- This release is based on Open MPI release v4.0.2 (ompi #cb5f4e737a). -- This release is based on ULFM master (ulfm #0e249ca1). -- New features - - Support for the UCT BTL enters beta stage. -- Bugfixes - - High sensitivity to noise in the failure detector. - - Deadlocks when revoking while BTL progress threads are updating messages. - - A case where the failure detector would keep observing a dead process forever. - - Disable the use of external pmix/libevent by default (the internals are modified - to handle error cases). - - Clean error paths leaving some rdma registration dangling. - - Do not remove the orte job/proc session dir prematurely upon error. - -## ULFM Standalone Release 4.0.1u1 -This is a stability and upstream parity upgrade. It improves stability, -performance and is based on the most current Open MPI Release (v4.0.1, -May 2019). - -- This release is based on Open MPI release v4.0.1 (ompi #b780667). -- This release is based on ULFM master (ulfm #cf8dc43f). -- New features - - Addition of the `MPI_Comm_is_revoked` function - - Renamed `ftbasic` collective component to `ftagree` - - Restored the `pcollreq` extension -- Bugfixes - - Failures of node-local siblings were not always detected - - Failure propagation and detection was slowed down by trying to - notify known dead processes - - There were deadlocks in multithreaded programs - - There were issues with PMPI when compiling Fortran Interfaces - - There were deadlocks on OS-X - -## ULFM Standalone Release 2.1 -This release is a bugfix and upstream parity upgrade. It improves stability, -performance and is based on the most current Open MPI master (November 2018). - -- ULFM is now based upon Open MPI master branch (#37954b5f). -- ULFM tuning MCA parameters are exposed by `ompi_info`. -- Fortran 90 bindings have been updated -- Bugfixes: - - Correct the behavior of process placement during an MPI_COMM_SPAWN when - some slots were occcupied by failed processes. - - MPI_COMM_SPAWN accepts process placement directives in the Info object. - - Fixed deadlocks in some NBC collective operations. - - Crashes and deadlocks in MPI_FINALIZE have been resolved. - - Any-source requests that returned with an error status of - MPIX_PROC_FAILED_PENDING can now correctly complete during - later MPI_WAIT/TEST. - -## ULFM Standalone Release 2.0 -Focus has been toward integration with current Open MPI master (November 2017), -performance, and stability. - -- ULFM is now based upon Open MPI master branch (#689f1be9). It will be - regularly updated until it will eventually be merged. -- Fault Tolerance is enabled by default and is controlled with MCA variables. -- Added support for multithreaded modes (MPI_THREAD_MULTIPLE, etc.) -- Added support for non-blocking collective operations (NBC). -- Added support for CMA shared memory transport (Vader). -- Added support for advanced failure detection at the MPI level. - Implements the algorithm described in "Failure detection and - propagation in HPC systems." . -- Removed the need for special handling of CID allocation. -- Non-usable components are automatically removed from the build during configure -- RMA, FILES, and TOPO components are enabled by default, and usage in a fault - tolerant execution warns that they may cause undefined behavior after a failure. -- Bugfixes: - - Code cleanup and performance cleanup in non-FT builds; --without-ft at - configure time gives an almost stock Open MPI. - - Code cleanup and performance cleanup in FT builds with FT runtime disabled; - --mca ft_enable_mpi false thoroughly disables FT runtime activities. - - Some error cases would return ERR_PENDING instead of ERR_PROC_FAILED in - collective operations. - - Some test could set ERR_PENDING or ERR_PROC_FAILED instead of - ERR_PROC_FAILED_PENDING for ANY_SOURCE receptions. -___________________________________________________________________________ - -## ULFM Standalone Release 1.1 -Focus has been toward improving stability, feature coverage for intercomms, -and following the updated specification for MPI_ERR_PROC_FAILED_PENDING. - -- Forked from Open MPI 1.5.5 devel branch -- Addition of the MPI_ERR_PROC_FAILED_PENDING error code, as per newer specification - revision. Properly returned from point-to-point, non-blocking ANY_SOURCE operations. -- Alias MPI_ERR_PROC_FAILED, MPI_ERR_PROC_FAILED_PENDING and MPI_ERR_REVOKED to the - corresponding standard blessed -extension- names MPIX_ERR_xxx. -- Support for Intercommunicators: - - Support for the blocking version of the agreement, MPI_COMM_AGREE on Intercommunicators. - - MPI_COMM_REVOKE tested on intercommunicators. -- Disabled completely (.ompi_ignore) many untested components. -- Changed the default ORTE failure notification propagation aggregation delay from 1s to 25ms. -- Added an OMPI internal failure propagator; failure propagation between SM domains is now - immediate. -- Bugfixes: - - SendRecv would not always report MPI_ERR_PROC_FAILED correctly. - - SendRecv could incorrectly update the status with errors pertaining to the Send portion - of the Sendrecv. - - Revoked send operations are now always completed or remote cancelled and may not - deadlock anymore. - - Cancelled send operations to a dead peer will not trigger an assert when the BTL reports - that same failure. - - Repeat calls to operations returning MPI_ERR_PROC_FAILED will eventually return - MPI_ERR_REVOKED when another process revokes the communicator. -___________________________________________________________________________ - -## ULFM Standalone Release 1.0 -Focus has been toward improving performance, both before and after the occurence of failures. -The list of new features includes: - -- Support for the non-blocking version of the agreement, MPI_COMM_IAGREE. -- Compliance with the latest ULFM specification draft. In particular, the - MPI_COMM_(I)AGREE semantic has changed. -- New algorithm to perform agreements, with a truly logarithmic complexity in number of - ranks, which translates into huge performance boosts in MPI_COMM_(I)AGREE and - MPI_COMM_SHRINK. -- New algorithm to perform communicator revocation. MPI_COMM_REVOKE performs a reliable - broadcast with a fixed maximum output degree, which scales logarithmically with the - number of ranks. -- Improved support for our traditional network layer: - - TCP: fully tested - - SM: fully tested (with the exception of XPMEM, which remains unsupported) -- Added support for High Performance networks - - Open IB: reasonably tested - - uGNI: reasonably tested -- The tuned collective module is now enabled by default (reasonably tested), expect a - huge performance boost compared to the former basic default setting - - Back-ported PBS/ALPS fixes from Open MPI - - Back-ported OpenIB bug/performance fixes from Open MPI - - Improve Context ID allocation algorithm to reduce overheads of Shrink - - Miscellaneous bug fixes -___________________________________________________________________________ - -## Binary Compatibility -ULFM Open MPI is binary compatible with any version of Open MPI compatible -with the underlying Open MPI master branch or release (see -the binary compatibility and version number section in the upstream Open MPI -README). That is, applications compiled with a compatible Open MPI can run -with the ULFM Open MPI `mpirun` and MPI libraries. Conversely, _as long as -the application does not employ one of the MPIX functions_, which are -exclusively defined in ULFM Open MPI, an application compiled with -ULFM Open MPI can be launched with a compatible Open MPI `mpirun` and run -with the non-fault tolerant MPI library. -___________________________________________________________________________ - -Contacting the Authors -====================== -Found a bug? Got a question? Want to make a suggestion? Want to -contribute to ULFM Open MPI? Working on a cool use-case? -Please let us know! - -The best way to report bugs, send comments, or ask questions is to -sign up on the user's mailing list: - - -Because of spam, only subscribers are allowed to post to these lists -(ensure that you subscribe with and post from exactly the same e-mail -address -- joe@example.com is considered different than -joe@mycomputer.example.com!). Visit these pages to subscribe to the -lists: - - -When submitting questions and problems, be sure to include as much -extra information as possible. This web page details all the -information that we request in order to provide assistance: - - -Thanks for your time. -___________________________________________________________________________ - -Copyright -========= - -``` -Copyright (c) 2012-2020 The University of Tennessee and The University - of Tennessee Research Foundation. All rights - reserved. - -$COPYRIGHT$ - -Additional copyrights may follow - -$HEADER$ -``` diff --git a/README.JAVA.md b/README.JAVA.md deleted file mode 100644 index 234c7a6a1c6..00000000000 --- a/README.JAVA.md +++ /dev/null @@ -1,281 +0,0 @@ -# Open MPI Java Bindings - -## Important node - -JAVA BINDINGS ARE PROVIDED ON A "PROVISIONAL" BASIS - I.E., THEY ARE -NOT PART OF THE CURRENT OR PROPOSED MPI STANDARDS. THUS, INCLUSION OF -JAVA SUPPORT IS NOT REQUIRED BY THE STANDARD. CONTINUED INCLUSION OF -THE JAVA BINDINGS IS CONTINGENT UPON ACTIVE USER INTEREST AND -CONTINUED DEVELOPER SUPPORT. - -## Overview - -This version of Open MPI provides support for Java-based -MPI applications. - -The rest of this document provides step-by-step instructions on -building OMPI with Java bindings, and compiling and running Java-based -MPI applications. Also, part of the functionality is explained with -examples. Further details about the design, implementation and usage -of Java bindings in Open MPI can be found in [1]. The bindings follow -a JNI approach, that is, we do not provide a pure Java implementation -of MPI primitives, but a thin layer on top of the C -implementation. This is the same approach as in mpiJava [2]; in fact, -mpiJava was taken as a starting point for Open MPI Java bindings, but -they were later totally rewritten. - -1. O. Vega-Gisbert, J. E. Roman, and J. M. Squyres. "Design and - implementation of Java bindings in Open MPI". Parallel Comput. - 59: 1-20 (2016). -2. M. Baker et al. "mpiJava: An object-oriented Java interface to - MPI". In Parallel and Distributed Processing, LNCS vol. 1586, - pp. 748-762, Springer (1999). - -## Building Java Bindings - -If this software was obtained as a developer-level checkout as opposed -to a tarball, you will need to start your build by running -`./autogen.pl`. This will also require that you have a fairly recent -version of GNU Autotools on your system - see the HACKING.md file for -details. - -Java support requires that Open MPI be built at least with shared libraries -(i.e., `--enable-shared`) - any additional options are fine and will not -conflict. Note that this is the default for Open MPI, so you don't -have to explicitly add the option. The Java bindings will build only -if `--enable-mpi-java` is specified, and a JDK is found in a typical -system default location. - -If the JDK is not in a place where we automatically find it, you can -specify the location. For example, this is required on the Mac -platform as the JDK headers are located in a non-typical location. Two -options are available for this purpose: - -1. `--with-jdk-bindir=`: the location of `javac` and `javah` -1. `--with-jdk-headers=`: the directory containing `jni.h` - -For simplicity, typical configurations are provided in platform files -under `contrib/platform/hadoop`. These will meet the needs of most -users, or at least provide a starting point for your own custom -configuration. - -In summary, therefore, you can configure the system using the -following Java-related options: - -``` -$ ./configure --with-platform=contrib/platform/hadoop/ ... - -```` - -or - -``` -$ ./configure --enable-mpi-java --with-jdk-bindir= --with-jdk-headers= ... -``` - -or simply - -``` -$ ./configure --enable-mpi-java ... -``` - -if JDK is in a "standard" place that we automatically find. - -## Running Java Applications - -For convenience, the `mpijavac` wrapper compiler has been provided for -compiling Java-based MPI applications. It ensures that all required MPI -libraries and class paths are defined. You can see the actual command -line using the `--showme` option, if you are interested. - -Once your application has been compiled, you can run it with the -standard `mpirun` command line: - -``` -$ mpirun java -``` - -For convenience, `mpirun` has been updated to detect the `java` command -and ensure that the required MPI libraries and class paths are defined -to support execution. You therefore do _NOT_ need to specify the Java -library path to the MPI installation, nor the MPI classpath. Any class -path definitions required for your application should be specified -either on the command line or via the `CLASSPATH` environment -variable. Note that the local directory will be added to the class -path if nothing is specified. - -As always, the `java` executable, all required libraries, and your -application classes must be available on all nodes. - -## Basic usage of Java bindings - -There is an MPI package that contains all classes of the MPI Java -bindings: `Comm`, `Datatype`, `Request`, etc. These classes have a -direct correspondence with classes defined by the MPI standard. MPI -primitives are just methods included in these classes. The convention -used for naming Java methods and classes is the usual camel-case -convention, e.g., the equivalent of `MPI_File_set_info(fh,info)` is -`fh.setInfo(info)`, where `fh` is an object of the class `File`. - -Apart from classes, the MPI package contains predefined public -attributes under a convenience class `MPI`. Examples are the -predefined communicator `MPI.COMM_WORLD` or predefined datatypes such -as `MPI.DOUBLE`. Also, MPI initialization and finalization are methods -of the `MPI` class and must be invoked by all MPI Java -applications. The following example illustrates these concepts: - -```java -import mpi.*; - -class ComputePi { - - public static void main(String args[]) throws MPIException { - - MPI.Init(args); - - int rank = MPI.COMM_WORLD.getRank(), - size = MPI.COMM_WORLD.getSize(), - nint = 100; // Intervals. - double h = 1.0/(double)nint, sum = 0.0; - - for(int i=rank+1; i<=nint; i+=size) { - double x = h * ((double)i - 0.5); - sum += (4.0 / (1.0 + x * x)); - } - - double sBuf[] = { h * sum }, - rBuf[] = new double[1]; - - MPI.COMM_WORLD.reduce(sBuf, rBuf, 1, MPI.DOUBLE, MPI.SUM, 0); - - if(rank == 0) System.out.println("PI: " + rBuf[0]); - MPI.Finalize(); - } -} -``` - -## Exception handling - -Java bindings in Open MPI support exception handling. By default, errors -are fatal, but this behavior can be changed. The Java API will throw -exceptions if the MPI.ERRORS_RETURN error handler is set: - -```java -MPI.COMM_WORLD.setErrhandler(MPI.ERRORS_RETURN); -``` - -If you add this statement to your program, it will show the line -where it breaks, instead of just crashing in case of an error. -Error-handling code can be separated from main application code by -means of try-catch blocks, for instance: - -```java -try -{ - File file = new File(MPI.COMM_SELF, "filename", MPI.MODE_RDONLY); -} -catch(MPIException ex) -{ - System.err.println("Error Message: "+ ex.getMessage()); - System.err.println(" Error Class: "+ ex.getErrorClass()); - ex.printStackTrace(); - System.exit(-1); -} -``` - -## How to specify buffers - -In MPI primitives that require a buffer (either send or receive) the -Java API admits a Java array. Since Java arrays can be relocated by -the Java runtime environment, the MPI Java bindings need to make a -copy of the contents of the array to a temporary buffer, then pass the -pointer to this buffer to the underlying C implementation. From the -practical point of view, this implies an overhead associated to all -buffers that are represented by Java arrays. The overhead is small -for small buffers but increases for large arrays. - -There is a pool of temporary buffers with a default capacity of 64K. -If a temporary buffer of 64K or less is needed, then the buffer will -be obtained from the pool. But if the buffer is larger, then it will -be necessary to allocate the buffer and free it later. - -The default capacity of pool buffers can be modified with an Open MPI -MCA parameter: - -``` -shell$ mpirun --mca mpi_java_eager size ... -``` - -Where `size` is the number of bytes, or kilobytes if it ends with 'k', -or megabytes if it ends with 'm'. - -An alternative is to use "direct buffers" provided by standard classes -available in the Java SDK such as `ByteBuffer`. For convenience we -provide a few static methods `new[Type]Buffer` in the `MPI` class to -create direct buffers for a number of basic datatypes. Elements of the -direct buffer can be accessed with methods `put()` and `get()`, and -the number of elements in the buffer can be obtained with the method -`capacity()`. This example illustrates its use: - -```java -int myself = MPI.COMM_WORLD.getRank(); -int tasks = MPI.COMM_WORLD.getSize(); - -IntBuffer in = MPI.newIntBuffer(MAXLEN * tasks), - out = MPI.newIntBuffer(MAXLEN); - -for(int i = 0; i < MAXLEN; i++) - out.put(i, myself); // fill the buffer with the rank - -Request request = MPI.COMM_WORLD.iAllGather( - out, MAXLEN, MPI.INT, in, MAXLEN, MPI.INT); -request.waitFor(); -request.free(); - -for(int i = 0; i < tasks; i++) -{ - for(int k = 0; k < MAXLEN; k++) - { - if(in.get(k + i * MAXLEN) != i) - throw new AssertionError("Unexpected value"); - } -} -``` - -Direct buffers are available for: `BYTE`, `CHAR`, `SHORT`, `INT`, -`LONG`, `FLOAT`, and `DOUBLE`. There is no direct buffer for booleans. - -Direct buffers are not a replacement for arrays, because they have -higher allocation and deallocation costs than arrays. In some -cases arrays will be a better choice. You can easily convert a -buffer into an array and vice versa. - -All non-blocking methods must use direct buffers and only -blocking methods can choose between arrays and direct buffers. - -The above example also illustrates that it is necessary to call -the `free()` method on objects whose class implements the `Freeable` -interface. Otherwise a memory leak is produced. - -## Specifying offsets in buffers - -In a C program, it is common to specify an offset in a array with -`&array[i]` or `array+i`, for instance to send data starting from -a given position in the array. The equivalent form in the Java bindings -is to `slice()` the buffer to start at an offset. Making a `slice()` -on a buffer is only necessary, when the offset is not zero. Slices -work for both arrays and direct buffers. - -```java -import static mpi.MPI.slice; -// ... -int numbers[] = new int[SIZE]; -// ... -MPI.COMM_WORLD.send(slice(numbers, offset), count, MPI.INT, 1, 0); -``` - -## Questions? Problems? - -If you have any problems, or find any bugs, please feel free to report -them to [Open MPI user's mailing -list](https://www.open-mpi.org/community/lists/ompi.php). diff --git a/README.md b/README.md index 8280b68efa4..3136ebc1dff 100644 --- a/README.md +++ b/README.md @@ -1,2296 +1,31 @@ # Open MPI -The Open MPI Project is an open source Message Passing Interface (MPI) -implementation that is developed and maintained by a consortium of -academic, research, and industry partners. Open MPI is therefore able -to combine the expertise, technologies, and resources from all across -the High Performance Computing community in order to build the best -MPI library available. Open MPI offers advantages for system and -software vendors, application developers and computer science -researchers. - -See [the MPI Forum web site](https://mpi-forum.org/) for information -about the MPI API specification. - -## Quick start - -In many cases, Open MPI can be built and installed by simply -indicating the installation directory on the command line: - -``` -$ tar xf openmpi-.tar.bz2 -$ cd openmpi- -$ ./configure --prefix= |& tee config.out -...lots of output... -$ make -j 8 |& tee make.out -...lots of output... -$ make install |& tee install.out -...lots of output... -``` - -Note that there are many, many configuration options to the -`./configure` step. Some of them may be needed for your particular -environmnet; see below for desciptions of the options available. - -If your installation prefix path is not writable by a regular user, -you may need to use sudo or su to run the `make install` step. For -example: - -``` -$ sudo make install |& tee install.out -[sudo] password for jsquyres: -...lots of output... -``` - -Finally, note that VPATH builds are fully supported. For example: - -``` -$ tar xf openmpi-.tar.bz2 -$ cd openmpi- -$ mkdir build -$ cd build -$ ../configure --prefix= |& tee config.out -...etc. -``` - -## Table of contents - -The rest of this file contains: - -* [General release notes about Open MPI](#general-notes) - * [Platform-specific notes](#platform-notes) - * [Compiler-specific notes](#compiler-notes) - * [Run-time support notes](#general-run-time-support-notes) - * [MPI functionality and features](#mpi-functionality-and-features) - * [OpenSHMEM functionality and - features](#openshmem-functionality-and-features) - * [MPI collectives](#mpi-collectives) - * [OpenSHMEM collectives](#openshmem-collectives) - * [Network support](#network-support) - * [Open MPI extensions](#open-mpi-extensions) -* [Detailed information on building Open MPI](#building-open-mpi) - * [Installation options](#installation-options) - * [Networking support and options](#networking-support--options) - * [Run-time system support and options](#run-time-system-support) - * [Miscellaneous support - libraries](#miscellaneous-support-libraries) - * [MPI functionality options](#mpi-functionality) - * [OpenSHMEM functionality options](#openshmem-functionality) - * [Miscellaneous functionality - options](#miscellaneous-functionality) -* [Open MPI version and library numbering - policies](#open-mpi-version-numbers-and-binary-compatibility) - * [Backwards compatibility polices](#backwards-compatibility) - * [Software version numbering](#software-version-number) - * [Shared library version numbering](#shared-library-version-number) -* [Information on how to both query and validate your Open MPI - installation](#checking-your-open-mpi-installation) -* [Description of Open MPI extensions](#open-mpi-api-extensions) - * [Compiling the extensions](#compiling-the-extensions) - * [Using the extensions](#using-the-extensions) -* [Examples showing how to compile Open MPI applications](#compiling-open-mpi-applications) -* [Examples showing how to run Open MPI applications](#running-open-mpi-applications) -* [Summary information on the various plugin - frameworks](#the-modular-component-architecture-mca) - * [MPI layer frameworks](#mpi-layer-frameworks) - * [OpenSHMEM component frameworks](#openshmem-component-frameworks) - * [Miscellaneous frameworks](#miscellaneous-frameworks) - * [Other notes about frameworks](#framework-notes) -* [How to get more help](#questions--problems) - -Also, note that much, much more information is also available [in the -Open MPI FAQ](https://www.open-mpi.org/faq/). - - -## General notes - -The following abbreviated list of release notes applies to this code -base as of this writing (April 2020): - -* Open MPI now includes two public software layers: MPI and OpenSHMEM. - Throughout this document, references to Open MPI implicitly include - both of these layers. When distinction between these two layers is - necessary, we will reference them as the "MPI" and "OpenSHMEM" - layers respectively. - -* OpenSHMEM is a collaborative effort between academia, industry, and - the U.S. Government to create a specification for a standardized API - for parallel programming in the Partitioned Global Address Space - (PGAS). For more information about the OpenSHMEM project, including - access to the current OpenSHMEM specification, please visit - http://openshmem.org/. - - This OpenSHMEM implementation will only work in Linux environments - with a restricted set of supported networks. - -* Open MPI includes support for a wide variety of supplemental - hardware and software package. When configuring Open MPI, you may - need to supply additional flags to the `configure` script in order - to tell Open MPI where the header files, libraries, and any other - required files are located. As such, running `configure` by itself - may not include support for all the devices (etc.) that you expect, - especially if their support headers / libraries are installed in - non-standard locations. Network interconnects are an easy example - to discuss -- Libfabric and OpenFabrics networks, for example, both - have supplemental headers and libraries that must be found before - Open MPI can build support for them. You must specify where these - files are with the appropriate options to configure. See the - listing of configure command-line switches, below, for more details. - -* The majority of Open MPI's documentation is here in this file, the - included man pages, and on [the web site - FAQ](https://www.open-mpi.org/). - -* Note that Open MPI documentation uses the word "component" - frequently; the word "plugin" is probably more familiar to most - users. As such, end users can probably completely substitute the - word "plugin" wherever you see "component" in our documentation. - For what it's worth, we use the word "component" for historical - reasons, mainly because it is part of our acronyms and internal API - function calls. - -* The run-time systems that are currently supported are: - * rsh / ssh - * PBS Pro, Torque - * Platform LSF (tested with v9.1.1 and later) - * SLURM - * Cray XE, XC, and XK - * Oracle Grid Engine (OGE) 6.1, 6.2 and open source Grid Engine - -* Systems that have been tested are: - * Linux (various flavors/distros), 64 bit (x86, ppc, aarch64), - with gcc (>=4.8.x+), clang (>=3.6.0), Absoft (fortran), Intel, - and Portland (*) - * macOS (10.14-10.15), 64 bit (x86_64) with XCode compilers - - (*) Be sure to read the Compiler Notes, below. - -* Other systems have been lightly (but not fully) tested: - * Linux (various flavors/distros), 32 bit, with gcc - * Cygwin 32 & 64 bit with gcc - * ARMv6, ARMv7 - * Other 64 bit platforms. - * OpenBSD. Requires configure options `--enable-mca-no-build=patcher` - and `--disable-dlopen` with this release. - * Problems have been reported when building Open MPI on FreeBSD 11.1 - using the clang-4.0 system compiler. A workaround is to build - Open MPI using the GNU compiler. - -* Open MPI has taken some steps towards [Reproducible - Builds](https://reproducible-builds.org/). Specifically, Open MPI's - `configure` and `make` process, by default, records the build date - and some system-specific information such as the hostname where Open - MPI was built and the username who built it. If you desire a - Reproducible Build, set the `$SOURCE_DATE_EPOCH`, `$USER` and - `$HOSTNAME` environment variables before invoking `configure` and - `make`, and Open MPI will use those values instead of invoking - `whoami` and/or `hostname`, respectively. See - https://reproducible-builds.org/docs/source-date-epoch/ for - information on the expected format and content of the - `$SOURCE_DATE_EPOCH` variable. - - -### Platform Notes - -- N/A - - -### Compiler Notes - -* Open MPI requires a C99-capable compiler to build. - -* On platforms other than x86-64, AArch64 (64-bit ARM), and PPC64, - Open MPI requires a compiler that either supports C11 atomics or - the GCC `__atomic` atomics (e.g., GCC >= v4.8.1). - -* 32-bit platforms are only supported with a recent compiler that - supports C11 atomics. This includes gcc 4.9.x+ (gcc 6.x or newer - recommened), icc 16, clang 3.1+, etc. - -* Mixing compilers from different vendors when building Open MPI - (e.g., using the C/C++ compiler from one vendor and the Fortran - compiler from a different vendor) has been successfully employed by - some Open MPI users (discussed on the Open MPI user's mailing list), - but such configurations are not tested and not documented. For - example, such configurations may require additional compiler / - linker flags to make Open MPI build properly. - - A not-uncommon case for this is when building on MacOS with the - system-default GCC compiler (i.e., `/usr/bin/gcc`), but a 3rd party - gfortran (e.g., provided by Homebrew, in `/usr/local/bin/gfortran`). - Since these compilers are provided by different organizations, they - have different default search paths. For example, if Homebrew has - also installed a local copy of Libevent (a 3rd party package that - Open MPI requires), the MacOS-default `gcc` linker will find it - without any additional command line flags, but the Homebrew-provided - gfortran linker will not. In this case, it may be necessary to - provide the following on the configure command line: - - ``` - $ ./configure FCFLAGS=-L/usr/local/lib ... - ``` - - This `-L` flag will then be passed to the Fortran linker when - creating Open MPI's Fortran libraries, and it will therefore be able - to find the installed Libevent. - -* In general, the latest versions of compilers of a given vendor's - series have the least bugs. We have seen cases where Vendor XYZ's - compiler version A.B fails to compile Open MPI, but version A.C - (where C>B) works just fine. If you run into a compile failure, you - might want to double check that you have the latest bug fixes and - patches for your compiler. - -* Users have reported issues with older versions of the Fortran PGI - compiler suite when using Open MPI's (non-default) `--enable-debug` - configure option. Per the above advice of using the most recent - version of a compiler series, the Open MPI team recommends using the - latest version of the PGI suite, and/or not using the `--enable-debug` - configure option. If it helps, here's what we have found with some - (not comprehensive) testing of various versions of the PGI compiler - suite: - - * pgi-8 : NO known good version with `--enable-debug` - * pgi-9 : 9.0-4 known GOOD - * pgi-10: 10.0-0 known GOOD - * pgi-11: NO known good version with `--enable-debug` - * pgi-12: 12.10 known BAD with `-m32`, but known GOOD without `-m32` - (and 12.8 and 12.9 both known BAD with `--enable-debug`) - * pgi-13: 13.9 known BAD with `-m32`, 13.10 known GOOD without `-m32` - * pgi-15: 15.10 known BAD with `-m32` - -* Similarly, there is a known Fortran PGI compiler issue with long - source directory path names that was resolved in 9.0-4 (9.0-3 is - known to be broken in this regard). - -* Open MPI does not support the PGI compiler suite on OS X or MacOS. - See issues below for more details: - * https://github.com/open-mpi/ompi/issues/2604 - * https://github.com/open-mpi/ompi/issues/2605 - -* OpenSHMEM Fortran bindings do not support the "no underscore" - Fortran symbol convention. IBM's `xlf` compilers build in that mode - by default. As such, IBM's `xlf` compilers cannot build/link the - OpenSHMEM Fortran bindings by default. A workaround is to pass - `FC="xlf -qextname"` at configure time to force a trailing - underscore. See [this - issue](https://github.com/open-mpi/ompi/issues/3612) for more - details. - -* MPI applications that use the mpi_f08 module on PowerPC platforms - (tested ppc64le) will likely experience runtime failures if: - * they are using a GNU linker (ld) version after v2.25.1 and before v2.28, - *and* - * they compiled with PGI (tested 17.5) or XL (tested v15.1.5) compilers. - This was noticed on Ubuntu 16.04 which uses the 2.26.1 version of - `ld` by default. However, this issue impacts any OS using a version - of `ld` noted above. This GNU linker regression will be fixed in - version 2.28. [Here is a link to the GNU bug on this - issue](https://sourceware.org/bugzilla/show_bug.cgi?id=21306). The - XL compiler will include a fix for this issue in a future release. - -* On NetBSD-6 (at least AMD64 and i386), and possibly on OpenBSD, - Libtool misidentifies properties of f95/g95, leading to obscure - compile-time failures if used to build Open MPI. You can work - around this issue by ensuring that libtool will not use f95/g95 - (e.g., by specifying `FC=`, or otherwise ensuring - a different Fortran compiler will be found earlier in the path than - `f95`/`g95`), or by disabling the Fortran MPI bindings with - `--disable-mpi-fortran`. - -* On OpenBSD/i386, if you configure with - `--enable-mca-no-build=patcher`, you will also need to add - `--disable-dlopen`. Otherwise, odd crashes can occur - nondeterministically. - -* Absoft 11.5.2 plus a service pack from September 2012 (which Absoft - says is available upon request), or a version later than 11.5.2 - (e.g., 11.5.3), is required to compile the Fortran `mpi_f08` - module. - -* Open MPI does not support the Sparc v8 CPU target. However, - as of Solaris Studio 12.1, and later compilers, one should not - specify `-xarch=v8plus` or `-xarch=v9`. The use of the options - `-m32` and `-m64` for producing 32 and 64 bit targets, respectively, - are now preferred by the Solaris Studio compilers. GCC may - require either `-m32` or `-mcpu=v9 -m32`, depending on GCC version. - -* If one tries to build OMPI on Ubuntu with Solaris Studio using the C++ - compiler and the `-m32` option, you might see a warning: - - ``` - CC: Warning: failed to detect system linker version, falling back to custom linker usage - ``` - - And the build will fail. One can overcome this error by either - setting `LD_LIBRARY_PATH` to the location of the 32 bit libraries - (most likely /lib32), or giving `LDFLAGS="-L/lib32 -R/lib32"` to the - `configure` command. Officially, Solaris Studio is not supported on - Ubuntu Linux distributions, so additional problems might be - incurred. - -* Open MPI does not support the `gccfss` compiler (GCC For SPARC - Systems; a now-defunct compiler project from Sun). - -* At least some versions of the Intel 8.1 compiler seg fault while - compiling certain Open MPI source code files. As such, it is not - supported. - -* It has been reported that the Intel 9.1 and 10.0 compilers fail to - compile Open MPI on IA64 platforms. As of 12 Sep 2012, there is - very little (if any) testing performed on IA64 platforms (with any - compiler). Support is "best effort" for these platforms, but it is - doubtful that any effort will be expended to fix the Intel 9.1 / - 10.0 compiler issuers on this platform. - -* Early versions of the Intel 12.1 Linux compiler suite on x86_64 seem - to have a bug that prevents Open MPI from working. Symptoms - including immediate segv of the wrapper compilers (e.g., `mpicc`) and - MPI applications. As of 1 Feb 2012, if you upgrade to the latest - version of the Intel 12.1 Linux compiler suite, the problem will go - away. - -* [Users have reported](https://github.com/open-mpi/ompi/issues/7615) - that the Intel Fortran compiler will fail to link Fortran-based MPI - applications on macOS with linker errors similar to this: - ``` - Undefined symbols for architecture x86_64: - "_ompi_buffer_detach_f08", referenced from: - import-atom in libmpi_usempif08.dylib - ld: symbol(s) not found for architecture x86_64 - ``` - It appears that setting the environment variable - `lt_cx_ld_force_load=no` before invoking Open MPI's `configure` - script works around the issue. For example: - ``` - shell$ lt_cv_ld_force_load=no ./configure ... - ``` - -* The Portland Group compilers prior to version 7.0 require the - `-Msignextend` compiler flag to extend the sign bit when converting - from a shorter to longer integer. This is is different than other - compilers (such as GNU). When compiling Open MPI with the Portland - compiler suite, the following flags should be passed to Open MPI's - `configure` script: - - ``` - shell$ ./configure CFLAGS=-Msignextend CXXFLAGS=-Msignextend \ - --with-wrapper-cflags=-Msignextend \ - --with-wrapper-cxxflags=-Msignextend ... - ``` - - This will both compile Open MPI with the proper compile flags and - also automatically add "-Msignextend" when the C and C++ MPI wrapper - compilers are used to compile user MPI applications. - -* It has been reported that Pathscale 5.0.5 and 6.0.527 compilers - give an internal compiler error when trying to build Open MPI. - -* As of July 2017, the Pathscale compiler suite apparently has no - further commercial support, and it does not look like there will be - further releases. Any issues discovered regarding building / - running Open MPI with the Pathscale compiler suite therefore may not - be able to be resolved. - -* Using the Absoft compiler to build the MPI Fortran bindings on Suse - 9.3 is known to fail due to a Libtool compatibility issue. - -* MPI Fortran API support has been completely overhauled since the - Open MPI v1.5/v1.6 series. - - There is now only a single Fortran MPI wrapper compiler and a - single Fortran OpenSHMEM wrapper compiler: `mpifort` and `oshfort`, - respectively. `mpif77` and `mpif90` still exist, but they are - symbolic links to `mpifort`. - - Similarly, Open MPI's `configure` script only recognizes the `FC` - and `FCFLAGS` environment variables (to specify the Fortran - compiler and compiler flags, respectively). The `F77` and `FFLAGS` - environment variables are ***IGNORED***. - - As a direct result, it is ***STRONGLY*** recommended that you - specify a Fortran compiler that uses file suffixes to determine - Fortran code layout (e.g., free form vs. fixed). For example, with - some versions of the IBM XLF compiler, it is preferable to use - `FC=xlf` instead of `FC=xlf90`, because `xlf` will automatically - determine the difference between free form and fixed Fortran source - code. - - However, many Fortran compilers allow specifying additional - command-line arguments to indicate which Fortran dialect to use. - For example, if `FC=xlf90`, you may need to use `mpifort --qfixed ...` - to compile fixed format Fortran source files. - - You can use either `ompi_info` or `oshmem_info` to see with which - Fortran compiler Open MPI was configured and compiled. - - There are up to three sets of Fortran MPI bindings that may be - provided (depending on your Fortran compiler): - - 1. `mpif.h`: This is the first MPI Fortran interface that was - defined in MPI-1. It is a file that is included in Fortran - source code. Open MPI's `mpif.h` does not declare any MPI - subroutines; they are all implicit. - - 1. `mpi` module: The `mpi` module file was added in MPI-2. It - provides strong compile-time parameter type checking for MPI - subroutines. - - 1. `mpi_f08` module: The `mpi_f08` module was added in MPI-3. It - provides many advantages over the `mpif.h` file and `mpi` module. - For example, MPI handles have distinct types (vs. all being - integers). See the MPI-3 document for more details. - - ***NOTE:*** The `mpi_f08` module is ***STRONGLY*** recommended for - all new MPI Fortran subroutines and applications. Note that the - `mpi_f08` module can be used in conjunction with the other two - Fortran MPI bindings in the same application (only one binding can - be used per subroutine/function, however). Full interoperability - between `mpif.h`/`mpi` module and `mpi_f08` module MPI handle types - is provided, allowing `mpi_f08` to be used in new subroutines in - legacy MPI applications. - - Per the OpenSHMEM specification, there is only one Fortran OpenSHMEM - binding provided: - - * `shmem.fh`: All Fortran OpenSHMEM programs should include - `shmem.f`, and Fortran OpenSHMEM programs that use constants - defined by OpenSHMEM ***MUST*** include `shmem.fh`. - - The following notes apply to the above-listed Fortran bindings: - - * All Fortran compilers support the `mpif.h`/`shmem.fh`-based - bindings, with one exception: the `MPI_SIZEOF` interfaces will - only be present when Open MPI is built with a Fortran compiler - that supports the `INTERFACE` keyword and `ISO_FORTRAN_ENV`. Most - notably, this excludes the GNU Fortran compiler suite before - version 4.9. - - * The level of support provided by the `mpi` module is based on your - Fortran compiler. - - If Open MPI is built with a non-GNU Fortran compiler, or if Open - MPI is built with the GNU Fortran compiler >= v4.9, all MPI - subroutines will be prototyped in the `mpi` module. All calls to - MPI subroutines will therefore have their parameter types checked - at compile time. - - If Open MPI is built with an old `gfortran` (i.e., < v4.9), a - limited `mpi` module will be built. Due to the limitations of - these compilers, and per guidance from the MPI-3 specification, - all MPI subroutines with "choice" buffers are specifically *not* - included in the `mpi` module, and their parameters will not be - checked at compile time. Specifically, all MPI subroutines with - no "choice" buffers are prototyped and will receive strong - parameter type checking at run-time (e.g., `MPI_INIT`, - `MPI_COMM_RANK`, etc.). - - Similar to the `mpif.h` interface, `MPI_SIZEOF` is only supported - on Fortran compilers that support `INTERFACE` and - `ISO_FORTRAN_ENV`. - - * The `mpi_f08` module has been tested with the Intel Fortran - compiler and gfortran >= 4.9. Other modern Fortran compilers - likely also work. - - Many older Fortran compilers do not provide enough modern Fortran - features to support the `mpi_f08` module. For example, `gfortran` - < v4.9 does provide enough support for the `mpi_f08` module. - - You can examine the output of the following command to see all - the Fortran features that are/are not enabled in your Open MPI - installation: - - ``` - shell$ ompi_info | grep -i fort - ``` -* As of October 2021, the NVIDIA 'nvfortran' compiler version 21.1 - fails to link `libmpi_usempif08.la` (see - https://github.com/open-mpi/ompi/issues/8919). One can work-around - this issue by explicitely setting `FCFLAGS=-fPIC` during configure. - -### General Run-Time Support Notes - -* The Open MPI installation must be in your `PATH` on all nodes (and - potentially `LD_LIBRARY_PATH` or `DYLD_LIBRARY_PATH`, if - `libmpi`/`libshmem` is a shared library), unless using the - `--prefix` or `--enable-mpirun-prefix-by-default` functionality (see - below). - -* Open MPI's run-time behavior can be customized via Modular Component - Architecture (MCA) parameters (see below for more information on how - to get/set MCA parameter values). Some MCA parameters can be set in - a way that renders Open MPI inoperable (see notes about MCA - parameters later in this file). In particular, some parameters have - required options that must be included. - - * If specified, the `btl` parameter must include the `self` - component, or Open MPI will not be able to deliver messages to the - same rank as the sender. For example: `mpirun --mca btl tcp,self - ...` - * If specified, the `btl_tcp_if_exclude` parameter must include the - loopback device (`lo` on many Linux platforms), or Open MPI will - not be able to route MPI messages using the TCP BTL. For example: - `mpirun --mca btl_tcp_if_exclude lo,eth1 ...` - -* Running on nodes with different endian and/or different datatype - sizes within a single parallel job is supported in this release. - However, Open MPI does not resize data when datatypes differ in size - (for example, sending a 4 byte `MPI_DOUBLE` and receiving an 8 byte - `MPI_DOUBLE` will fail). - - -### MPI Functionality and Features - -* All MPI-3.1 functionality is supported. - -* Note that starting with Open MPI v4.0.0, prototypes for several - legacy MPI-1 symbols that were deleted in the MPI-3.0 specification - (which was published in 2012) are no longer available by default in - `mpi.h`. Specifically, several MPI-1 symbols were deprecated in the - 1996 publishing of the MPI-2.0 specification. These deprecated - symbols were eventually removed from the MPI-3.0 specification in - 2012. - - The symbols that now no longer appear by default in Open MPI's - `mpi.h` are: - - * `MPI_Address` (replaced by `MPI_Get_address`) - * `MPI_Errhandler_create` (replaced by `MPI_Comm_create_errhandler`) - * `MPI_Errhandler_get` (replaced by `MPI_Comm_get_errhandler`) - * `MPI_Errhandler_set` (replaced by `MPI_Comm_set_errhandler`) - * `MPI_Type_extent` (replaced by `MPI_Type_get_extent`) - * `MPI_Type_hindexed` (replaced by `MPI_Type_create_hindexed`) - * `MPI_Type_hvector` (replaced by `MPI_Type_create_hvector`) - * `MPI_Type_lb` (replaced by `MPI_Type_get_extent`) - * `MPI_Type_struct` (replaced by `MPI_Type_create_struct`) - * `MPI_Type_ub` (replaced by `MPI_Type_get_extent`) - * `MPI_LB` (replaced by `MPI_Type_create_resized`) - * `MPI_UB` (replaced by `MPI_Type_create_resized`) - * `MPI_COMBINER_HINDEXED_INTEGER` - * `MPI_COMBINER_HVECTOR_INTEGER` - * `MPI_COMBINER_STRUCT_INTEGER` - * `MPI_Handler_function` (replaced by `MPI_Comm_errhandler_function`) - - Although these symbols are no longer prototyped in `mpi.h`, they - are still present in the MPI library in Open MPI v4.0.x. This - enables legacy MPI applications to link and run successfully with - Open MPI v4.0.x, even though they will fail to compile. - - ***WARNING:*** Future releases of Open MPI beyond the v4.0.x series - may remove these symbols altogether. - - ***WARNING:*** The Open MPI team ***STRONGLY*** encourages all MPI - application developers to stop using these constructs that were - first deprecated over 20 years ago, and finally removed from the MPI - specification in MPI-3.0 (in 2012). - - ***WARNING:*** [The Open MPI - FAQ](https://www.open-mpi.org/faq/?category=mpi-removed) contains - examples of how to update legacy MPI applications using these - deleted symbols to use the "new" symbols. - - All that being said, if you are unable to immediately update your - application to stop using these legacy MPI-1 symbols, you can - re-enable them in `mpi.h` by configuring Open MPI with the - `--enable-mpi1-compatibility` flag. - -* Rank reordering support is available using the TreeMatch library. It - is activated for the graph and `dist_graph` communicator topologies. - -* When using MPI deprecated functions, some compilers will emit - warnings. For example: - - ``` - shell$ cat deprecated_example.c - #include - void foo(void) { - MPI_Datatype type; - MPI_Type_struct(1, NULL, NULL, NULL, &type); - } - shell$ mpicc -c deprecated_example.c - deprecated_example.c: In function 'foo': - deprecated_example.c:4: warning: 'MPI_Type_struct' is deprecated (declared at /opt/openmpi/include/mpi.h:1522) - shell$ - ``` - -* `MPI_THREAD_MULTIPLE` is supported with some exceptions. - - The following PMLs support `MPI_THREAD_MULTIPLE`: - 1. `cm` (see list (1) of supported MTLs, below) - 1. `ob1` (see list (2) of supported BTLs, below) - 1. `ucx` - - (1) The `cm` PML and the following MTLs support `MPI_THREAD_MULTIPLE`: - 1. `ofi` (Libfabric) - 1. `portals4` - - (2) The `ob1` PML and the following BTLs support `MPI_THREAD_MULTIPLE`: - 1. `self` - 1. `sm` - 1. `smcuda` - 1. `tcp` - 1. `ugni` - 1. `usnic` - - Currently, MPI File operations are not thread safe even if MPI is - initialized for `MPI_THREAD_MULTIPLE` support. - -* `MPI_REAL16` and `MPI_COMPLEX32` are only supported on platforms - where a portable C datatype can be found that matches the Fortran - type `REAL*16`, both in size and bit representation. - -* The "libompitrace" library is bundled in Open MPI and is installed - by default (it can be disabled via the `--disable-libompitrace` - flag). This library provides a simplistic tracing of select MPI - function calls via the MPI profiling interface. Linking it in to - your application via (e.g., via `-lompitrace`) will automatically - output to stderr when some MPI functions are invoked: - - ``` - shell$ cd examples/ - shell$ mpicc hello_c.c -o hello_c -lompitrace - shell$ mpirun -np 1 hello_c - MPI_INIT: argc 1 - Hello, world, I am 0 of 1 - MPI_BARRIER[0]: comm MPI_COMM_WORLD - MPI_FINALIZE[0] - shell$ - ``` - - Keep in mind that the output from the trace library is going to - `stderr`, so it may output in a slightly different order than the - `stdout` from your application. - - This library is being offered as a "proof of concept" / convenience - from Open MPI. If there is interest, it is trivially easy to extend - it to printf for other MPI functions. Pull requests on github.com - would be greatly appreciated. - - -### OpenSHMEM Functionality and Features - -All OpenSHMEM-1.3 functionality is supported. - - -### MPI Collectives - -* The `cuda` coll component provides CUDA-aware support for the - reduction type collectives with GPU buffers. This component is only - compiled into the library when the library has been configured with - CUDA-aware support. It intercepts calls to the reduction - collectives, copies the data to staging buffers if GPU buffers, then - calls underlying collectives to do the work. - - -### OpenSHMEM Collectives - -* The `fca` scoll component: the Mellanox Fabric Collective - Accelerator (FCA) is a solution for offloading collective operations - from the MPI process onto Mellanox QDR InfiniBand switch CPUs and - HCAs. - -* The `basic` scoll component: Reference implementation of all - OpenSHMEM collective operations. - - -### Network Support - -* There are several main MPI network models available: `ob1`, `cm`, - and `ucx`. `ob1` uses BTL ("Byte Transfer Layer") - components for each supported network. `cm` uses MTL ("Matching - Transport Layer") components for each supported network. `ucx` uses - the OpenUCX transport. - - * `ob1` supports a variety of networks that can be used in - combination with each other: - * OpenFabrics: InfiniBand, iWARP, and RoCE - * Loopback (send-to-self) - * Shared memory - * TCP - * SMCUDA - * Cisco usNIC - * uGNI (Cray Gemini, Aries) - * shared memory (XPMEM, Linux CMA, Linux KNEM, and - copy-in/copy-out shared memory) - - * `cm` supports a smaller number of networks (and they cannot be - used together), but may provide better overall MPI performance: - * Intel Omni-Path PSM2 (version 11.2.173 or later) - * Intel True Scale PSM (QLogic InfiniPath) - * OpenFabrics Interfaces ("libfabric" tag matching) - * Portals 4 - - * UCX is the [Unified Communication X (UCX) communication - library](https://www.openucx.org/). This is an open-source - project developed in collaboration between industry, laboratories, - and academia to create an open-source production grade - communication framework for data centric and high-performance - applications. The UCX library can be downloaded from repositories - (e.g., Fedora/RedHat yum repositories). The UCX library is also - part of Mellanox OFED and Mellanox HPC-X binary distributions. - - UCX currently supports: - - * OpenFabrics Verbs (including InfiniBand and RoCE) - * Cray's uGNI - * TCP - * Shared memory - * NVIDIA CUDA drivers - - While users can manually select any of the above transports at run - time, Open MPI will select a default transport as follows: - - 1. If InfiniBand devices are available, use the UCX PML. - 1. If PSM, PSM2, or other tag-matching-supporting Libfabric - transport devices are available (e.g., Cray uGNI), use the `cm` - PML and a single appropriate corresponding `mtl` module. - 1. Otherwise, use the `ob1` PML and one or more appropriate `btl` - modules. - - Users can override Open MPI's default selection algorithms and force - the use of a specific transport if desired by setting the `pml` MCA - parameter (and potentially the `btl` and/or `mtl` MCA parameters) at - run-time: - - ``` - shell$ mpirun --mca pml ob1 --mca btl [comma-delimted-BTLs] ... - or - shell$ mpirun --mca pml cm --mca mtl [MTL] ... - or - shell$ mpirun --mca pml ucx ... - ``` - - There is a known issue when using UCX with very old Mellanox - Infiniband HCAs, in particular HCAs preceding the introduction of - the ConnectX product line, which can result in Open MPI crashing in - MPI_Finalize. This issue is addressed by UCX release 1.9.0 and - newer. - -* The main OpenSHMEM network model is `ucx`; it interfaces directly - with UCX. - -* In prior versions of Open MPI, InfiniBand and RoCE support was - provided through the `openib` BTL and `ob1` PML plugins. Starting - with Open MPI 4.0.0, InfiniBand support through the `openib` plugin - is both deprecated and superseded by the `ucx` PML component. The - `openib` BTL was removed in Open MPI v5.0.0. - - While the `openib` BTL depended on `libibverbs`, the UCX PML depends - on the UCX library. - - Once installed, Open MPI can be built with UCX support by adding - `--with-ucx` to the Open MPI configure command. Once Open MPI is - configured to use UCX, the runtime will automatically select the - `ucx` PML if one of the supported networks is detected (e.g., - InfiniBand). It's possible to force using UCX in the `mpirun` or - `oshrun` command lines by specifying any or all of the following mca - parameters: `--mca pml ucx` for MPI point-to-point operations, - `--mca spml ucx` for OpenSHMEM support, and `--mca osc ucx` for MPI - RMA (one-sided) operations. - -* The `usnic` BTL is support for Cisco's usNIC device ("userspace NIC") - on Cisco UCS servers with the Virtualized Interface Card (VIC). - Although the usNIC is accessed via the OpenFabrics Libfabric API - stack, this BTL is specific to Cisco usNIC devices. - -* uGNI is a Cray library for communicating over the Gemini and Aries - interconnects. - -* Linux `knem` support is used when the `sm` (shared memory) BTL is - compiled with knem support (see the `--with-knem` configure option) - and the `knem` Linux module is loaded in the running kernel. If the - `knem` Linux kernel module is not loaded, the `knem` support is (by - default) silently deactivated during Open MPI jobs. - - See https://knem.gforge.inria.fr/ for details on Knem. - -* Linux Cross-Memory Attach (CMA) or XPMEM is used by the `sm` shared - memory BTL when the CMA/XPMEM libraries are installed, - respectively. Linux CMA and XPMEM are similar (but different) - mechanisms for Open MPI to utilize single-copy semantics for shared - memory. - -* The OFI MTL does not support sending messages larger than the active - Libfabric provider's `max_msg_size`. If you receive an error - message about sending too large of a message when using the OFI MTL, - please reach out to your networking vendor to ask them to support a - larger `max_msg_size` for tagged messages. - -### Open MPI Extensions - -An MPI "extensions" framework is included in Open MPI, but is not -enabled by default. See the "Open MPI API Extensions" section below -for more information on compiling and using MPI extensions. - -The following extensions are included in this version of Open MPI: - -1. `pcollreq`: Provides routines for persistent collective - communication operations and persistent neighborhood collective - communication operations, which are planned to be included in - MPI-4.0. The function names are prefixed with `MPIX_` instead of - `MPI_`, like `MPIX_Barrier_init`, because they are not - standardized yet. Future versions of Open MPI will switch to the - `MPI_` prefix once the MPI Standard which includes this feature is - published. See their man page for more details. -1. `shortfloat`: Provides MPI datatypes `MPIX_C_FLOAT16`, - `MPIX_SHORT_FLOAT`, `MPIX_SHORT_FLOAT`, and - `MPIX_CXX_SHORT_FLOAT_COMPLEX` if corresponding language types are - available. See `ompi/mpiext/shortfloat/README.txt` for details. -1. `affinity`: Provides the `OMPI_Affinity_str()` API, which returns - a string indicating the resources which a process is bound. For - more details, see its man page. -1. `cuda`: When the library is compiled with CUDA-aware support, it - provides two things. First, a macro - `MPIX_CUDA_AWARE_SUPPORT`. Secondly, the function - `MPIX_Query_cuda_support()` that can be used to query for support. -1. `example`: A non-functional extension; its only purpose is to - provide an example for how to create other extensions. - - -## Building Open MPI - -If you have checked out a ***developer's copy*** of Open MPI (i.e., -you cloned from Git), you really need to read the `HACKING` file -before attempting to build Open MPI. Really. - -If you have downloaded a tarball, then things are much simpler. -Open MPI uses a traditional `configure` script paired with `make` to -build. Typical installs can be of the pattern: - -``` -shell$ ./configure [...options...] -shell$ make [-j N] all install - (use an integer value of N for parallel builds) -``` - -There are many available `configure` options (see `./configure --help` -for a full list); a summary of the more commonly used ones is included -below. - -***NOTE:*** if you are building Open MPI on a network filesystem, the -machine you on which you are building *must* be time-synchronized with -the file server. Specifically: Open MPI's build system *requires* -accurate filesystem timestamps. If your `make` output includes -warning about timestamps in the future or runs GNU Automake, Autoconf, -and/or Libtool, this is *not normal*, and you may have an invalid -build. Ensure that the time on your build machine is synchronized -with the time on your file server, or build on a local filesystem. -Then remove the Open MPI source directory and start over (e.g., by -re-extracting the Open MPI tarball). - -Note that for many of Open MPI's `--with-FOO` options, Open MPI will, -by default, search for header files and/or libraries for `FOO`. If -the relevant files are found, Open MPI will built support for `FOO`; -if they are not found, Open MPI will skip building support for `FOO`. -However, if you specify `--with-FOO` on the configure command line and -Open MPI is unable to find relevant support for `FOO`, configure will -assume that it was unable to provide a feature that was specifically -requested and will abort so that a human can resolve out the issue. - -Additionally, if a search directory is specified in the form -`--with-FOO=DIR`, Open MPI will: - -1. Search for `FOO`'s header files in `DIR/include`. -2. Search for `FOO`'s library files: - 1. If `--with-FOO-libdir=` was specified, search in - ``. - 1. Otherwise, search in `DIR/lib`, and if they are not found - there, search again in `DIR/lib64`. -3. If both the relevant header files and libraries are found: - 1. Open MPI will build support for `FOO`. - 1. If the root path where the FOO libraries are found is neither - `/usr` nor `/usr/local`, Open MPI will compile itself with - RPATH flags pointing to the directory where FOO's libraries - are located. Open MPI does not RPATH `/usr/lib[64]` and - `/usr/local/lib[64]` because many systems already search these - directories for run-time libraries by default; adding RPATH for - them could have unintended consequences for the search path - ordering. - - -### Installation Options - -* `--prefix=DIR`: - Install Open MPI into the base directory named `DIR`. Hence, Open - MPI will place its executables in `DIR/bin`, its header files in - `DIR/include`, its libraries in `DIR/lib`, etc. - -* `--disable-shared`: - By default, Open MPI and OpenSHMEM build shared libraries, and all - components are built as dynamic shared objects (DSOs). This switch - disables this default; it is really only useful when used with - `--enable-static`. Specifically, this option does *not* imply - `--enable-static`; enabling static libraries and disabling shared - libraries are two independent options. - -* `--enable-static`: - Build MPI and OpenSHMEM as static libraries, and statically link in - all components. Note that this option does *not* imply - `--disable-shared`; enabling static libraries and disabling shared - libraries are two independent options. - - Be sure to read the description of `--without-memory-manager`, - below; it may have some effect on `--enable-static`. - -* `--disable-wrapper-rpath`: - By default, the wrapper compilers (e.g., `mpicc`) will enable - "rpath" support in generated executables on systems that support it. - That is, they will include a file reference to the location of Open - MPI's libraries in the application executable itself. This means - that the user does not have to set `LD_LIBRARY_PATH` to find Open - MPI's libraries (e.g., if they are installed in a location that the - run-time linker does not search by default). - - On systems that utilize the GNU `ld` linker, recent enough versions - will actually utilize "runpath" functionality, not "rpath". There - is an important difference between the two: - - 1. "rpath": the location of the Open MPI libraries is hard-coded into - the MPI/OpenSHMEM application and cannot be overridden at - run-time. - 1. "runpath": the location of the Open MPI libraries is hard-coded into - the MPI/OpenSHMEM application, but can be overridden at run-time - by setting the `LD_LIBRARY_PATH` environment variable. - - For example, consider that you install Open MPI vA.B.0 and - compile/link your MPI/OpenSHMEM application against it. Later, you - install Open MPI vA.B.1 to a different installation prefix (e.g., - `/opt/openmpi/A.B.1` vs. `/opt/openmpi/A.B.0`), and you leave the old - installation intact. - - In the rpath case, your MPI application will always use the - libraries from your A.B.0 installation. In the runpath case, you - can set the `LD_LIBRARY_PATH` environment variable to point to the - A.B.1 installation, and then your MPI application will use those - libraries. - - Note that in both cases, however, if you remove the original A.B.0 - installation and set `LD_LIBRARY_PATH` to point to the A.B.1 - installation, your application will use the A.B.1 libraries. - - This rpath/runpath behavior can be disabled via - `--disable-wrapper-rpath`. - - If you would like to keep the rpath option, but not enable runpath - a different configure option is avalabile - `--disable-wrapper-runpath`. - -* `--enable-dlopen`: - Enable loading of Open MPI components as standalone Dynamic - Shared Objects (DSOs) that are loaded at run-time. This option is - enabled by default. - - The opposite of this option, --disable-dlopen, causes the following: - - 1. Open MPI will not attempt to open any DSOs at run-time. - 1. configure behaves as if the --enable-mca-static argument was set. - 1. configure will ignore the --enable-mca-dso argument. - - See the description of --enable-mca-static / --enable-mca-dso for - more information. - - Note that this option does *not* change how Open MPI's libraries - (libmpi, for example) will be built. You can change whether Open - MPI builds static or dynamic libraries via the - --enable|disable-static and --enable|disable-shared arguments. - -* `--enable-mca-dso[=LIST]` and `--enable-mca-static[=LIST]` - These two options, along with --enable-mca-no-build, govern the - behavior of how Open MPI's frameworks and components are built. - - The --enable-mca-dso option specifies which frameworks and/or - components are built as Dynamic Shared Objects (DSOs). - Specifically, DSOs are built as "plugins" outside of the core Open - MPI libraries, and are loaded by Open MPI at run time. - - The --enable-mca-static option specifies which frameworks and/or - components are built as part of the core Open MPI libraries (i.e., - they are not built as DSOs, and therefore do not need to be - separately discovered and opened at run time). - - Both options can be used one of two ways: - - 1. --enable-mca-OPTION (with no value) - 1. --enable-mca-OPTION=LIST - - --enable-mca-OPTION=no or --disable-mca-OPTION are both legal - options, but have no impact on the selection logic described below. - Only affirmative options change the selection process. - - LIST is a comma-delimited list of Open MPI frameworks and/or - framework+component tuples. Examples: - - * "btl" specifies the entire BTL framework - * "btl-tcp" specifies just the TCP component in the BTL framework - * "mtl,btl-tcp" specifies the entire MTL framework and the TCP - component in the BTL framework - - Open MPI's configure script uses the values of these two options - when evaluating each component to determine how it should be built - by evaluating these conditions in order: - - 1. If an individual component's build behavior has been specified - via these two options, configure uses that behavior. - 1. Otherwise, if the component is in a framework whose build - behavior has been specified via these two options, configure uses - that behavior. - 1. Otherwise, configure uses the global default build behavior. - - At each level of the selection process, if the component is - specified to be built as both a static and dso component, the static - option will win. - - Note that as of Open MPI v5.0.0, configure's global default is to - build all components as static (i.e., part of the Open MPI core - libraries, not as DSO's). Prior to Open MPI v5.0.0, the global - default behavior was to build most components as DSOs. - - Also note that if the --disable-dlopen option is specified, then - Open MPI will not be able to search for DSOs at run time, and the - value of the --enable-mca-dso option will be silently ignored. - - Some examples: - - 1. Default to building all components as static (i.e., as part of - the Open MPI core libraries -- no DSOs): - - $ ./configure - - 1. Build all components as static, except the TCP BTL, which will be - built as a DSO: - - $ ./configure --enable-mca-dso=btl-tcp - - 1. Build all components as static, except all BTL components, which - will be built as DSOs: - - $ ./configure --enable-mca-dso=btl - - 1. Build all components as static, except all MTL components and the - TCP BTL component, which will be built as DSOs: - - $ ./configure --enable-mca-dso=mtl,btl-tcp - - 1. Build all BTLs as static, except the TCP BTL, as the - option is more specific than the - option: - - $ ./configure --enable-mca-dso=btl --enable-mca-static=btl-tcp - - 1. Build the TCP BTL as static, because the static option at the - same level always wins: - - $ ./configure --enable-mca-dso=btl-tcp --enable-mca-static=btl-tcp - -* `--enable-mca-no-build=LIST`: - Comma-separated list of `-` pairs that will not be - built. For example, `--enable-mca-no-build=btl-portals,oob-ud` will - disable building the portals BTL and the ud OOB component. - -* `--disable-show-load-errors-by-default`: - Set the default value of the `mca_base_component_show_load_errors` - MCA variable: the `--enable` form of this option sets the MCA - variable to true, the `--disable` form sets the MCA variable to - false. The MCA `mca_base_component_show_load_errors` variable can - still be overridden at run time via the usual MCA-variable-setting - mechanisms; this configure option simply sets the default value. - - The `--disable` form of this option is intended for Open MPI - packagers who tend to enable support for many different types of - networks and systems in their packages. For example, consider a - packager who includes support for both the FOO and BAR networks in - their Open MPI package, both of which require support libraries - (`libFOO.so` and `libBAR.so`). If an end user only has BAR - hardware, they likely only have `libBAR.so` available on their - systems -- not `libFOO.so`. Disabling load errors by default will - prevent the user from seeing potentially confusing warnings about - the FOO components failing to load because `libFOO.so` is not - available on their systems. - - Conversely, system administrators tend to build an Open MPI that is - targeted at their specific environment, and contains few (if any) - components that are not needed. In such cases, they might want - their users to be warned that the FOO network components failed to - load (e.g., if `libFOO.so` was mistakenly unavailable), because Open - MPI may otherwise silently failover to a slower network path for MPI - traffic. - -* `--with-platform=FILE`: - Load configure options for the build from `FILE`. Options on the - command line that are not in `FILE` are also used. Options on the - command line and in `FILE` are replaced by what is in `FILE`. - -* `--with-libmpi-name=STRING`: - Replace `libmpi.*` and `libmpi_FOO.*` (where `FOO` is one of the - fortran supporting libraries installed in lib) with `libSTRING.*` - and `libSTRING_FOO.*`. This is provided as a convenience mechanism - for third-party packagers of Open MPI that might want to rename - these libraries for their own purposes. This option is *not* - intended for typical users of Open MPI. - - -### Networking support / options - -* `--with-fca=DIR`: - Specify the directory where the Mellanox FCA library and - header files are located. - - FCA is the support library for Mellanox switches and HCAs. - -* `--with-hcoll=DIR`: - Specify the directory where the Mellanox hcoll library and header - files are located. This option is generally only necessary if the - hcoll headers and libraries are not in default compiler/linker - search paths. - - hcoll is the support library for MPI collective operation offload on - Mellanox ConnectX-3 HCAs (and later). - -* `--with-knem=DIR`: - Specify the directory where the knem libraries and header files are - located. This option is generally only necessary if the knem headers - and libraries are not in default compiler/linker search paths. - - knem is a Linux kernel module that allows direct process-to-process - memory copies (optionally using hardware offload), potentially - increasing bandwidth for large messages sent between messages on the - same server. See [the Knem web site](https://knem.gforge.inria.fr/) - for details. - -* `--with-libfabric=DIR`: - Specify the directory where the OpenFabrics Interfaces `libfabric` - library and header files are located. This option is generally only - necessary if the libfabric headers and libraries are not in default - compiler/linker search paths. - - Libfabric is the support library for OpenFabrics Interfaces-based - network adapters, such as Cisco usNIC, Intel True Scale PSM, Cray - uGNI, etc. - -* `--with-libfabric-libdir=DIR`: - Look in directory for the libfabric libraries. By default, Open MPI - will look in `DIR/lib` and `DIR/lib64`, which covers most cases. - This option is only needed for special configurations. - -* `--with-portals4=DIR`: - Specify the directory where the Portals4 libraries and header files - are located. This option is generally only necessary if the Portals4 - headers and libraries are not in default compiler/linker search - paths. - - Portals is a low-level network API for high-performance networking - on high-performance computing systems developed by Sandia National - Laboratories, Intel Corporation, and the University of New Mexico. - The Portals 4 Reference Implementation is a complete implementation - of Portals 4, with transport over InfiniBand verbs and UDP. - -* `--with-portals4-libdir=DIR`: - Location of libraries to link with for Portals4 support. - -* `--with-portals4-max-md-size=SIZE` and - `--with-portals4-max-va-size=SIZE`: - Set configuration values for Portals 4 - -* `--with-psm=`: - Specify the directory where the QLogic InfiniPath / Intel True Scale - PSM library and header files are located. This option is generally - only necessary if the PSM headers and libraries are not in default - compiler/linker search paths. - - PSM is the support library for QLogic InfiniPath and Intel TrueScale - network adapters. - -* `--with-psm-libdir=DIR`: - Look in directory for the PSM libraries. By default, Open MPI will - look in `DIR/lib` and `DIR/lib64`, which covers most cases. This - option is only needed for special configurations. - -* `--with-psm2=DIR`: - Specify the directory where the Intel Omni-Path PSM2 library and - header files are located. This option is generally only necessary - if the PSM2 headers and libraries are not in default compiler/linker - search paths. - - PSM is the support library for Intel Omni-Path network adapters. - -* `--with-psm2-libdir=DIR`: - Look in directory for the PSM2 libraries. By default, Open MPI will - look in `DIR/lib` and `DIR/lib64`, which covers most cases. This - option is only needed for special configurations. - -* `--with-ucx=DIR`: - Specify the directory where the UCX libraries and header files are - located. This option is generally only necessary if the UCX headers - and libraries are not in default compiler/linker search paths. - -* `--with-ucx-libdir=DIR`: - Look in directory for the UCX libraries. By default, Open MPI will - look in `DIR/lib` and `DIR/lib64`, which covers most cases. This - option is only needed for special configurations. - -* `--with-usnic`: - Abort configure if Cisco usNIC support cannot be built. - - -### Run-time system support - -* `--enable-mpirun-prefix-by-default`: - This option forces the `mpirun` command to always behave as if - `--prefix $prefix` was present on the command line (where `$prefix` - is the value given to the `--prefix` option to configure). This - prevents most `rsh`/`ssh`-based users from needing to modify their - shell startup files to set the `PATH` and/or `LD_LIBRARY_PATH` for - Open MPI on remote nodes. Note, however, that such users may still - desire to set `PATH` -- perhaps even in their shell startup files -- - so that executables such as `mpicc` and `mpirun` can be found - without needing to type long path names. - -* `--with-alps`: - Force the building of for the Cray Alps run-time environment. If - Alps support cannot be found, configure will abort. - -* `--with-lsf=DIR`: - Specify the directory where the LSF libraries and header files are - located. This option is generally only necessary if the LSF headers - and libraries are not in default compiler/linker search paths. - - LSF is a resource manager system, frequently used as a batch - scheduler in HPC systems. - -* `--with-lsf-libdir=DIR`: - Look in directory for the LSF libraries. By default, Open MPI will - look in `DIR/lib` and `DIR/lib64`, which covers most cases. This - option is only needed for special configurations. - -* `--with-slurm`: - Force the building of SLURM scheduler support. - -* `--with-sge`: - Specify to build support for the Oracle Grid Engine (OGE) resource - manager and/or the Open Grid Engine. OGE support is disabled by - default; this option must be specified to build OMPI's OGE support. - - The Oracle Grid Engine (OGE) and open Grid Engine packages are - resource manager systems, frequently used as a batch scheduler in - HPC systems. It used to be called the "Sun Grid Engine", which is - why the option is still named `--with-sge`. - -* `--with-tm=DIR`: - Specify the directory where the TM libraries and header files are - located. This option is generally only necessary if the TM headers - and libraries are not in default compiler/linker search paths. - - TM is the support library for the Torque and PBS Pro resource - manager systems, both of which are frequently used as a batch - scheduler in HPC systems. - - -### Miscellaneous support libraries - -* `--with-libevent(=VALUE)` - This option specifies where to find the libevent support headers and - library. The following `VALUE`s are permitted: - - * `internal`: Use Open MPI's internal copy of libevent. - * `external`: Use an external Libevent installation (rely on default - compiler and linker paths to find it) - * ``: Same as `internal`. - * `DIR`: Specify the location of a specific libevent - installation to use - - By default (or if `--with-libevent` is specified with no `VALUE`), - Open MPI will build and use the copy of libevent that it has in its - source tree. However, if the `VALUE` is `external`, Open MPI will - look for the relevant libevent header file and library in default - compiler / linker locations. Or, `VALUE` can be a directory tree - where the libevent header file and library can be found. This - option allows operating systems to include Open MPI and use their - default libevent installation instead of Open MPI's bundled - libevent. - - libevent is a support library that provides event-based processing, - timers, and signal handlers. Open MPI requires libevent to build; - passing --without-libevent will cause configure to abort. - -* `--with-libevent-libdir=DIR`: - Look in directory for the libevent libraries. This option is only - usable when building Open MPI against an external libevent - installation. Just like other `--with-FOO-libdir` configure - options, this option is only needed for special configurations. - -* `--with-hwloc(=VALUE)`: - hwloc is a support library that provides processor and memory - affinity information for NUMA platforms. It is required by Open - MPI. Therefore, specifying `--with-hwloc=no` (or `--without-hwloc`) - is disallowed. - - By default (i.e., if `--with-hwloc` is not specified, or if - `--with-hwloc` is specified without a value), Open MPI will first try - to find/use an hwloc installation on the current system. If Open - MPI cannot find one, it will fall back to build and use the internal - copy of hwloc included in the Open MPI source tree. - - Alternatively, the `--with-hwloc` option can be used to specify - where to find the hwloc support headers and library. The following - `VALUE`s are permitted: - - * `internal`: Only use Open MPI's internal copy of hwloc. - * `external`: Only use an external hwloc installation (rely on - default compiler and linker paths to find it). - * `DIR`: Only use the specific hwloc installation found in - the specified directory. - -* `--with-hwloc-libdir=DIR`: - Look in directory for the hwloc libraries. This option is only - usable when building Open MPI against an external hwloc - installation. Just like other `--with-FOO-libdir` configure options, - this option is only needed for special configurations. - -* `--disable-hwloc-pci`: - Disable building hwloc's PCI device-sensing capabilities. On some - platforms (e.g., SusE 10 SP1, x86-64), the libpci support library is - broken. Open MPI's configure script should usually detect when - libpci is not usable due to such brokenness and turn off PCI - support, but there may be cases when configure mistakenly enables - PCI support in the presence of a broken libpci. These cases may - result in `make` failing with warnings about relocation symbols in - libpci. The `--disable-hwloc-pci` switch can be used to force Open - MPI to not build hwloc's PCI device-sensing capabilities in these - cases. - - Similarly, if Open MPI incorrectly decides that libpci is broken, - you can force Open MPI to build hwloc's PCI device-sensing - capabilities by using `--enable-hwloc-pci`. - - hwloc can discover PCI devices and locality, which can be useful for - Open MPI in assigning message passing resources to MPI processes. - -* `--with-libltdl=DIR`: - Specify the directory where the GNU Libtool libltdl libraries and - header files are located. This option is generally only necessary - if the libltdl headers and libraries are not in default - compiler/linker search paths. - - Note that this option is ignored if `--disable-dlopen` is specified. - -* `--disable-libompitrace`: - Disable building the simple `libompitrace` library (see note above - about libompitrace) - -* `--with-valgrind(=DIR)`: - Directory where the valgrind software is installed. If Open MPI - finds Valgrind's header files, it will include additional support - for Valgrind's memory-checking debugger. - - Specifically, it will eliminate a lot of false positives from - running Valgrind on MPI applications. There is a minor performance - penalty for enabling this option. - - -### MPI Functionality - -* `--with-mpi-param-check(=VALUE)`: - Whether or not to check MPI function parameters for errors at - runtime. The following `VALUE`s are permitted: - - * `always`: MPI function parameters are always checked for errors - * `never`: MPI function parameters are never checked for errors - * `runtime`: Whether MPI function parameters are checked depends on - the value of the MCA parameter `mpi_param_check` (default: yes). - * `yes`: Synonym for "always" (same as `--with-mpi-param-check`). - * `no`: Synonym for "never" (same as `--without-mpi-param-check`). - - If `--with-mpi-param` is not specified, `runtime` is the default. - -* `--disable-mpi-thread-multiple`: - Disable the MPI thread level `MPI_THREAD_MULTIPLE` (it is enabled by - default). - -* `--enable-mpi-java`: - Enable building of an ***EXPERIMENTAL*** Java MPI interface - (disabled by default). You may also need to specify - `--with-jdk-dir`, `--with-jdk-bindir`, and/or `--with-jdk-headers`. - See [README.JAVA.md](README.JAVA.md) for details. - - Note that this Java interface is ***INCOMPLETE*** (meaning: it does - not support all MPI functionality) and ***LIKELY TO CHANGE***. The - Open MPI developers would very much like to hear your feedback about - this interface. See [README.JAVA.md](README.JAVA.md) for more - details. - -* `--enable-mpi-fortran(=VALUE)`: - By default, Open MPI will attempt to build all 3 Fortran bindings: - `mpif.h`, the `mpi` module, and the `mpi_f08` module. The following - `VALUE`s are permitted: - - * `all`: Synonym for `yes`. - * `yes`: Attempt to build all 3 Fortran bindings; skip - any binding that cannot be built (same as - `--enable-mpi-fortran`). - * `mpifh`: Only build `mpif.h` support. - * `usempi`: Only build `mpif.h` and `mpi` module support. - * `usempif08`: Build `mpif.h`, `mpi` module, and `mpi_f08` - module support. - * `none`: Synonym for `no`. - * `no`: Do not build any MPI Fortran support (same as - `--disable-mpi-fortran`). This is mutually exclusive - with building the OpenSHMEM Fortran interface. - -* `--enable-mpi-ext(=LIST)`: - Enable Open MPI's non-portable API extensions. `LIST` is a - comma-delmited list of extensions. If no `LIST` is specified, all - of the extensions are enabled. - - See the "Open MPI API Extensions" section for more details. - -* `--disable-mpi-io`: - Disable built-in support for MPI-2 I/O, likely because an - externally-provided MPI I/O package will be used. Default is to use - the internal framework system that uses the ompio component and a - specially modified version of ROMIO that fits inside the romio - component - -* `--disable-io-romio`: - Disable the ROMIO MPI-IO component - -* `--with-io-romio-flags=FLAGS`: - Pass `FLAGS` to the ROMIO distribution configuration script. This - option is usually only necessary to pass - parallel-filesystem-specific preprocessor/compiler/linker flags back - to the ROMIO system. - -* `--disable-io-ompio`: - Disable the ompio MPI-IO component - -* `--enable-sparse-groups`: - Enable the usage of sparse groups. This would save memory - significantly especially if you are creating large - communicators. (Disabled by default) - - -### OpenSHMEM Functionality - -* `--disable-oshmem`: - Disable building the OpenSHMEM implementation (by default, it is - enabled). - -* `--disable-oshmem-fortran`: - Disable building only the Fortran OpenSHMEM bindings. Please see - the "Compiler Notes" section herein which contains further - details on known issues with various Fortran compilers. - - -### Miscellaneous Functionality - -* `--without-memory-manager`: - Disable building Open MPI's memory manager. Open MPI's memory - manager is usually built on Linux based platforms, and is generally - only used for optimizations with some OpenFabrics-based networks (it - is not *necessary* for OpenFabrics networks, but some performance - loss may be observed without it). - - However, it may be necessary to disable the memory manager in order - to build Open MPI statically. - -* `--with-ft=TYPE`: - Specify the type of fault tolerance to enable. Options: LAM - (LAM/MPI-like), cr (Checkpoint/Restart). Fault tolerance support is - disabled unless this option is specified. - -* `--enable-peruse`: - Enable the PERUSE MPI data analysis interface. - -* `--enable-heterogeneous`: - Enable support for running on heterogeneous clusters where data - types are equivalent sizes across nodes, but may have differing - endian representations. Heterogeneous support is disabled by - default because it imposes a minor performance penalty. - - Note that the MPI standard *does not guarantee that all - heterogeneous communication will function properly*, **especially - when the conversion between the different representations leads to - loss of accuracy or range.** For example, if a message with a - 16-bit integer datatype is sent with value 0x10000 to a receiver - where the same integer datatype is only 8 bits, the value will be - truncated at the receiver. Similarly, problems can occur if a - floating point datatype in one MPI process uses X1 bits for its - mantissa and Y1 bits for its exponent, but the same floating point - datatype in another MPI process uses X2 and Y2 bits, respectively - (where X1 != X2 and/or Y1 != Y2). Type size differences like this - can lead to unexpected behavior. - - Open MPI's heterogeneous support correctly handles endian - differences between datatype representations that are otherwise - compatible. - -* `--with-wrapper-cflags=CFLAGS` -* `--with-wrapper-cxxflags=CXXFLAGS` -* `--with-wrapper-fflags=FFLAGS` -* `--with-wrapper-fcflags=FCFLAGS` -* `--with-wrapper-ldflags=LDFLAGS` -* `--with-wrapper-libs=LIBS`: - Add the specified flags to the default flags that are used in Open - MPI's "wrapper" compilers (e.g., `mpicc` -- see below for more - information about Open MPI's wrapper compilers). By default, Open - MPI's wrapper compilers use the same compilers used to build Open - MPI and specify a minimum set of additional flags that are necessary - to compile/link MPI applications. These configure options give - system administrators the ability to embed additional flags in - OMPI's wrapper compilers (which is a local policy decision). The - meanings of the different flags are: - - `CFLAGS`: Flags passed by the `mpicc` wrapper to the C compiler - `CXXFLAGS`: Flags passed by the `mpic++` wrapper to the C++ compiler - `FCFLAGS`: Flags passed by the `mpifort` wrapper to the Fortran compiler - `LDFLAGS`: Flags passed by all the wrappers to the linker - `LIBS`: Flags passed by all the wrappers to the linker - - There are other ways to configure Open MPI's wrapper compiler - behavior; see [the Open MPI FAQ](https://www.open-mpi.org/faq/) for - more information. - -There are many other options available -- see `./configure --help`. - -Changing the compilers that Open MPI uses to build itself uses the -standard Autoconf mechanism of setting special environment variables -either before invoking configure or on the configure command line. -The following environment variables are recognized by configure: - -* `CC`: C compiler to use -* `CFLAGS`: Compile flags to pass to the C compiler -* `CPPFLAGS`: Preprocessor flags to pass to the C compiler -* `CXX`: C++ compiler to use -* `CXXFLAGS`: Compile flags to pass to the C++ compiler -* `CXXCPPFLAGS`: Preprocessor flags to pass to the C++ compiler -* `FC`: Fortran compiler to use -* `FCFLAGS`: Compile flags to pass to the Fortran compiler -* `LDFLAGS`: Linker flags to pass to all compilers -* `LIBS`: Libraries to pass to all compilers (it is rarely - necessary for users to need to specify additional `LIBS`) -* `PKG_CONFIG`: Path to the `pkg-config` utility - -For example: - -``` -shell$ ./configure CC=mycc CXX=myc++ FC=myfortran ... -``` - -***NOTE:*** We generally suggest using the above command line form for -setting different compilers (vs. setting environment variables and -then invoking `./configure`). The above form will save all variables -and values in the `config.log` file, which makes post-mortem analysis -easier if problems occur. - -Note that if you intend to compile Open MPI with a `make` other than -the default one in your `PATH`, then you must either set the `$MAKE` -environment variable before invoking Open MPI's `configure` script, or -pass `MAKE=your_make_prog` to configure. For example: - -``` -shell$ ./configure MAKE=/path/to/my/make ... -``` - -This could be the case, for instance, if you have a shell alias for -`make`, or you always type `gmake` out of habit. Failure to tell -`configure` which non-default `make` you will use to compile Open MPI -can result in undefined behavior (meaning: don't do that). - -Note that you may also want to ensure that the value of -`LD_LIBRARY_PATH` is set appropriately (or not at all) for your build -(or whatever environment variable is relevant for your operating -system). For example, some users have been tripped up by setting to -use a non-default Fortran compiler via the `FC` environment variable, -but then failing to set `LD_LIBRARY_PATH` to include the directory -containing that non-default Fortran compiler's support libraries. -This causes Open MPI's `configure` script to fail when it tries to -compile / link / run simple Fortran programs. - -It is required that the compilers specified be compile and link -compatible, meaning that object files created by one compiler must be -able to be linked with object files from the other compilers and -produce correctly functioning executables. - -Open MPI supports all the `make` targets that are provided by GNU -Automake, such as: - -* `all`: build the entire Open MPI package -* `install`: install Open MPI -* `uninstall`: remove all traces of Open MPI from the $prefix -* `clean`: clean out the build tree - -Once Open MPI has been built and installed, it is safe to run `make -clean` and/or remove the entire build tree. - -VPATH and parallel builds are fully supported. - -Generally speaking, the only thing that users need to do to use Open -MPI is ensure that `PREFIX/bin` is in their `PATH` and `PREFIX/lib` is -in their `LD_LIBRARY_PATH`. Users may need to ensure to set the -`PATH` and `LD_LIBRARY_PATH` in their shell setup files (e.g., -`.bashrc`, `.cshrc`) so that non-interactive `rsh`/`ssh`-based logins -will be able to find the Open MPI executables. - - -## Open MPI Version Numbers and Binary Compatibility - -Open MPI has two sets of version numbers that are likely of interest -to end users / system administrator: - -1. Software version number -1. Shared library version numbers - -Both are predicated on Open MPI's definition of "backwards -compatibility." - -***NOTE:*** The version numbering conventions were changed with the -release of v1.10.0. Most notably, Open MPI no longer uses an -"odd/even" release schedule to indicate feature development vs. stable -releases. See the README in releases prior to v1.10.0 for more -information (e.g., -https://github.com/open-mpi/ompi/blob/v1.8/README#L1392-L1475). - - -### Backwards Compatibility - -Open MPI version Y is backwards compatible with Open MPI version X -(where Y>X) if users can: - -* Compile an MPI/OpenSHMEM application with version X, - `mpirun`/`oshrun` it with version Y, and get the same - user-observable behavior. -* Invoke `ompi_info` with the same CLI options in versions X and Y and - get the same user-observable behavior. - -Note that this definition encompasses several things: - -* Application Binary Interface (ABI) -* MPI / OpenSHMEM run time system -* `mpirun` / `oshrun` command line options -* MCA parameter names / values / meanings - -However, this definition only applies when the same version of Open -MPI is used with all instances of the runtime and MPI / OpenSHMEM -processes in a single MPI job. If the versions are not exactly the -same everywhere, Open MPI is not guaranteed to work properly in any -scenario. - -Backwards compatibility tends to work best when user applications are -dynamically linked to one version of the Open MPI / OSHMEM libraries, -and can be updated at run time to link to a new version of the Open -MPI / OSHMEM libraries. - -For example, if an MPI / OSHMEM application links statically against -the libraries from Open MPI vX, then attempting to launch that -application with `mpirun` / `oshrun` from Open MPI vY is not guaranteed to -work (because it is mixing vX and vY of Open MPI in a single job). - -Similarly, if using a container technology that internally bundles all -the libraries from Open MPI vX, attempting to launch that container -with `mpirun` / `oshrun` from Open MPI vY is not guaranteed to work. - -### Software Version Number - -Official Open MPI releases use the common "A.B.C" version identifier -format. Each of the three numbers has a specific meaning: - -* Major: The major number is the first integer in the version string - Changes in the major number typically indicate a significant - change in the code base and/or end-user functionality, and also - indicate a break from backwards compatibility. Specifically: Open - MPI releases with different major version numbers are not - backwards compatibale with each other. - - ***CAVEAT:*** This rule does not extend to versions prior to v1.10.0. - Specifically: v1.10.x is not guaranteed to be backwards - compatible with other v1.x releases. - -* Minor: The minor number is the second integer in the version string. - Changes in the minor number indicate a user-observable change in the - code base and/or end-user functionality. Backwards compatibility - will still be preserved with prior releases that have the same major - version number (e.g., v2.5.3 is backwards compatible with v2.3.1). - -* Release: The release number is the third integer in the version - string. Changes in the release number typically indicate a bug fix - in the code base and/or end-user functionality. For example, if - there is a release that only contains bug fixes and no other - user-observable changes or new features, only the third integer will - be increased (e.g., from v4.3.0 to v4.3.1). - -The "A.B.C" version number may optionally be followed by a Quantifier: - -* Quantifier: Open MPI version numbers sometimes have an arbitrary - string affixed to the end of the version number. Common strings - include: - * aX: Indicates an alpha release. X is an integer indicating the - number of the alpha release (e.g., v1.10.3a5 indicates the 5th - alpha release of version 1.10.3). - * bX: Indicates a beta release. X is an integer indicating the - number of the beta release (e.g., v1.10.3b3 indicates the 3rd beta - release of version 1.10.3). - * rcX: Indicates a release candidate. X is an integer indicating the - number of the release candidate (e.g., v1.10.3rc4 indicates the - 4th release candidate of version 1.10.3). - -Nightly development snapshot tarballs use a different version number -scheme; they contain three distinct values: - -* The git branch name from which the tarball was created. -* The date/timestamp, in `YYYYMMDDHHMM` format. -* The hash of the git commit from which the tarball was created. - -For example, a snapshot tarball filename of -`openmpi-v2.x-201703070235-e4798fb.tar.gz` indicates that this tarball -was created from the v2.x branch, on March 7, 2017, at 2:35am GMT, -from git hash e4798fb. - -### Shared Library Version Number - -The GNU Libtool official documentation details how the versioning -scheme works. The quick version is that the shared library versions -are a triple of integers: (current,revision,age), or `c:r:a`. This -triple is not related to the Open MPI software version number. There -are six simple rules for updating the values (taken almost verbatim -from the Libtool docs): - -1. Start with version information of `0:0:0` for each shared library. -1. Update the version information only immediately before a public - release of your software. More frequent updates are unnecessary, - and only guarantee that the current interface number gets larger - faster. -1. If the library source code has changed at all since the last - update, then increment revision (`c:r:a` becomes `c:r+1:a`). -1. If any interfaces have been added, removed, or changed since the - last update, increment current, and set revision to 0. -1. If any interfaces have been added since the last public release, - then increment age. -1. If any interfaces have been removed since the last public release, - then set age to 0. - -Here's how we apply those rules specifically to Open MPI: - -1. The above rules do not apply to MCA components (a.k.a. "plugins"); - MCA component `.so` versions stay unspecified. -1. The above rules apply exactly as written to the following libraries - starting with Open MPI version v1.5 (prior to v1.5, `libopen-pal` - and `libopen-rte` were still at `0:0:0` for reasons discussed in bug - ticket #2092 https://svn.open-mpi.org/trac/ompi/ticket/2092): - * `libopen-rte` - * `libopen-pal` - * `libmca_common_*` -1. The following libraries use a slightly modified version of the - above rules: rules 4, 5, and 6 only apply to the official MPI and - OpenSHMEM interfaces (functions, global variables). The rationale - for this decision is that the vast majority of our users only care - about the official/public MPI/OpenSHMEM interfaces; we therefore - want the `.so` version number to reflect only changes to the - official MPI/OpenSHMEM APIs. Put simply: non-MPI/OpenSHMEM API / - internal changes to the MPI-application-facing libraries are - irrelevant to pure MPI/OpenSHMEM applications. - * `libmpi` - * `libmpi_mpifh` - * `libmpi_usempi_tkr` - * `libmpi_usempi_ignore_tkr` - * `libmpi_usempif08` - * `libmpi_cxx` - * `libmpi_java` - * `liboshmem` - - -## Checking Your Open MPI Installation - -The `ompi_info` command can be used to check the status of your Open -MPI installation (located in `PREFIX/bin/ompi_info`). Running it with -no arguments provides a summary of information about your Open MPI -installation. - -Note that the `ompi_info` command is extremely helpful in determining -which components are installed as well as listing all the run-time -settable parameters that are available in each component (as well as -their default values). - -The following options may be helpful: - -* `--all`: Show a *lot* of information about your Open MPI - installation. -* `--parsable`: Display all the information in an easily - `grep`/`cut`/`awk`/`sed`-able format. -* `--param FRAMEWORK COMPONENT`: - A `FRAMEWORK` value of `all` and a `COMPONENT` value of `all` will - show all parameters to all components. Otherwise, the parameters of - all the components in a specific framework, or just the parameters - of a specific component can be displayed by using an appropriate - FRAMEWORK and/or COMPONENT name. -* `--level LEVEL`: - By default, `ompi_info` only shows "Level 1" MCA parameters -- - parameters that can affect whether MPI processes can run - successfully or not (e.g., determining which network interfaces to - use). The `--level` option will display all MCA parameters from - level 1 to `LEVEL` (the max `LEVEL` value is 9). Use `ompi_info - --param FRAMEWORK COMPONENT --level 9` to see *all* MCA parameters - for a given component. See "The Modular Component Architecture - (MCA)" section, below, for a fuller explanation. - -Changing the values of these parameters is explained in the "The -Modular Component Architecture (MCA)" section, below. - -When verifying a new Open MPI installation, we recommend running six -tests: - -1. Use `mpirun` to launch a non-MPI program (e.g., `hostname` or - `uptime`) across multiple nodes. -1. Use `mpirun` to launch a trivial MPI program that does no MPI - communication (e.g., the `hello_c` program in the `examples/` - directory in the Open MPI distribution). -1. Use `mpirun` to launch a trivial MPI program that sends and - receives a few MPI messages (e.g., the `ring_c` program in the - `examples/` directory in the Open MPI distribution). -1. Use `oshrun` to launch a non-OpenSHMEM program across multiple - nodes. -1. Use `oshrun` to launch a trivial MPI program that does no OpenSHMEM - communication (e.g., `hello_shmem.c` program in the `examples/` - directory in the Open MPI distribution.) -1. Use `oshrun` to launch a trivial OpenSHMEM program that puts and - gets a few messages (e.g., the `ring_shmem.c` in the `examples/` - directory in the Open MPI distribution.) - -If you can run all six of these tests successfully, that is a good -indication that Open MPI built and installed properly. - - -## Open MPI API Extensions - -Open MPI contains a framework for extending the MPI API that is -available to applications. Each extension is usually a standalone set -of functionality that is distinct from other extensions (similar to -how Open MPI's plugins are usually unrelated to each other). These -extensions provide new functions and/or constants that are available -to MPI applications. - -WARNING: These extensions are neither standard nor portable to other -MPI implementations! - -### Compiling the extensions - -Open MPI extensions are all enabled by default; they can be disabled -via the `--disable-mpi-ext` command line switch. - -Since extensions are meant to be used by advanced users only, this -file does not document which extensions are available or what they -do. Look in the ompi/mpiext/ directory to see the extensions; each -subdirectory of that directory contains an extension. Each has a -README file that describes what it does. - -### Using the extensions - -To reinforce the fact that these extensions are non-standard, you must -include a separate header file after `` to obtain the function -prototypes, constant declarations, etc. For example: - -```c -#include -#if defined(OPEN_MPI) && OPEN_MPI -#include -#endif - -int main() { - MPI_Init(NULL, NULL); - -#if defined(OPEN_MPI) && OPEN_MPI - { - char ompi_bound[OMPI_AFFINITY_STRING_MAX]; - char current_binding[OMPI_AFFINITY_STRING_MAX]; - char exists[OMPI_AFFINITY_STRING_MAX]; - OMPI_Affinity_str(OMPI_AFFINITY_LAYOUT_FMT, ompi_bound, - current_bindings, exists); - } -#endif - MPI_Finalize(); - return 0; -} -``` - -Notice that the Open MPI-specific code is surrounded by the `#if` -statement to ensure that it is only ever compiled by Open MPI. - -The Open MPI wrapper compilers (`mpicc` and friends) should -automatically insert all relevant compiler and linker flags necessary -to use the extensions. No special flags or steps should be necessary -compared to "normal" MPI applications. - - -## Compiling Open MPI Applications - -Open MPI provides "wrapper" compilers that should be used for -compiling MPI and OpenSHMEM applications: - -* C: `mpicc`, `oshcc` -* C++: `mpiCC`, `oshCC` (or `mpic++` if your filesystem is case-insensitive) -* Fortran: `mpifort`, `oshfort` - -For example: - -``` -shell$ mpicc hello_world_mpi.c -o hello_world_mpi -g -shell$ -``` - -For OpenSHMEM applications: - -``` -shell$ oshcc hello_shmem.c -o hello_shmem -g -shell$ -``` - -All the wrapper compilers do is add a variety of compiler and linker -flags to the command line and then invoke a back-end compiler. To be -specific: the wrapper compilers do not parse source code at all; they -are solely command-line manipulators, and have nothing to do with the -actual compilation or linking of programs. The end result is an MPI -executable that is properly linked to all the relevant libraries. - -Customizing the behavior of the wrapper compilers is possible (e.g., -changing the compiler [not recommended] or specifying additional -compiler/linker flags); see the Open MPI FAQ for more information. - -Alternatively, Open MPI also installs `pkg-config(1)` configuration -files under `$libdir/pkgconfig`. If `pkg-config` is configured to find -these files, then compiling / linking Open MPI programs can be -performed like this: - -``` -shell$ gcc hello_world_mpi.c -o hello_world_mpi -g \ - `pkg-config ompi-c --cflags --libs` -shell$ -``` - -Open MPI supplies multiple `pkg-config(1)` configuration files; one -for each different wrapper compiler (language): - -* `ompi`: Synonym for `ompi-c`; Open MPI applications using the C - MPI bindings -* `ompi-c`: Open MPI applications using the C MPI bindings -* `ompi-cxx`: Open MPI applications using the C MPI bindings -* `ompi-fort`: Open MPI applications using the Fortran MPI bindings - -The following `pkg-config(1)` configuration files *may* be installed, -depending on which command line options were specified to Open MPI's -configure script. They are not necessary for MPI applications, but -may be used by applications that use Open MPI's lower layer support -libraries. - -* `opal`: Open Portable Access Layer applications - - -## Running Open MPI Applications - -Open MPI supports both `mpirun` and `mpiexec` (they are exactly -equivalent) to launch MPI applications. For example: - -``` -shell$ mpirun -np 2 hello_world_mpi -or -shell$ mpiexec -np 1 hello_world_mpi : -np 1 hello_world_mpi -``` - -are equivalent. - -The `rsh` launcher (which defaults to using `ssh`) accepts a -`--hostfile` parameter (the option `--machinefile` is equivalent); you -can specify a `--hostfile` parameter indicating a standard -`mpirun`-style hostfile (one hostname per line): - -``` -shell$ mpirun --hostfile my_hostfile -np 2 hello_world_mpi -``` - -If you intend to run more than one process on a node, the hostfile can -use the "slots" attribute. If "slots" is not specified, a count of 1 -is assumed. For example, using the following hostfile: - -``` -shell$ cat my_hostfile -node1.example.com -node2.example.com -node3.example.com slots=2 -node4.example.com slots=4 -``` - -``` -shell$ mpirun --hostfile my_hostfile -np 8 hello_world_mpi -``` - -will launch `MPI_COMM_WORLD` rank 0 on node1, rank 1 on node2, ranks 2 -and 3 on node3, and ranks 4 through 7 on node4. - -Other starters, such as the resource manager / batch scheduling -environments, do not require hostfiles (and will ignore the hostfile -if it is supplied). They will also launch as many processes as slots -have been allocated by the scheduler if no "-np" argument has been -provided. For example, running a SLURM job with 8 processors: - -``` -shell$ salloc -n 8 mpirun a.out -``` - -The above command will reserve 8 processors and run 1 copy of mpirun, -which will, in turn, launch 8 copies of a.out in a single -`MPI_COMM_WORLD` on the processors that were allocated by SLURM. - -Note that the values of component parameters can be changed on the -`mpirun` / `mpiexec` command line. This is explained in the section -below, "The Modular Component Architecture (MCA)". - -Open MPI supports `oshrun` to launch OpenSHMEM applications. For -example: - -``` -shell$ oshrun -np 2 hello_world_oshmem -``` - -OpenSHMEM applications may also be launched directly by resource -managers such as SLURM. For example, when OMPI is configured -`--with-pmix` and `--with-slurm`, one may launch OpenSHMEM applications -via `srun`: - -``` -shell$ srun -N 2 hello_world_oshmem -``` - -## The Modular Component Architecture (MCA) - -The MCA is the backbone of Open MPI -- most services and functionality -are implemented through MCA components. - -### MPI layer frameworks - -Here is a list of all the component frameworks in the MPI layer of -Open MPI: - -* `bml`: BTL management layer -* `coll`: MPI collective algorithms -* `fbtl`: file byte transfer layer: abstraction for individual blocking and non-blocking read and write operations -* `fcoll`: Collective read and write operations for MPI I/O. -* `fs`: File system functions for MPI I/O. -* `hook`: Make calls at various points of MPI process life-cycle. -* `io`: MPI I/O -* `mtl`: Matching transport layer, used for MPI point-to-point - messages on some types of networks -* `op`: Back end computations for intrinsic MPI_Op operators -* `osc`: MPI one-sided communications -* `pml`: MPI point-to-point management layer -* `part`: MPI Partitioned communication. -* `sharedfp`: shared file pointer operations for MPI I/O -* `topo`: MPI topology routines -* `vprotocol`: Protocols for the "v" PML - -### OpenSHMEM component frameworks - -* `atomic`: OpenSHMEM atomic operations -* `memheap`: OpenSHMEM memory allocators that support the - PGAS memory model -* `scoll`: OpenSHMEM collective operations -* `spml`: OpenSHMEM "pml-like" layer: supports one-sided, - point-to-point operations -* `sshmem`: OpenSHMEM shared memory backing facility - -### Miscellaneous frameworks: - -* `allocator`: Memory allocator -* `backtrace`: Debugging call stack backtrace support -* `btl`: Point-to-point Byte Transfer Layer -* `dl`: Dynamic loading library interface -* `hwloc`: Hardware locality (hwloc) versioning support -* `if`: OS IP interface support -* `installdirs`: Installation directory relocation services -* `memchecker`: Run-time memory checking -* `memcpy`: Memory copy support -* `memory`: Memory management hooks -* `mpool`: Memory pooling -* `patcher`: Symbol patcher hooks -* `pmix`: Process management interface (exascale) -* `rcache`: Memory registration cache -* `reachable`: Reachability matrix between endpoints of a given pair of hosts -* `shmem`: Shared memory support (NOT related to OpenSHMEM) -* `smsc`: Shared Memory Single Copy -* `threads`: Thread management and support. -* `timer`: High-resolution timers - -### Back-end run-time environment (PRTE): - -See: https://github.com/openpmix/prrte - -### Framework notes - -Each framework typically has one or more components that are used at -run-time. For example, the `btl` framework is used by the MPI layer -to send bytes across different types underlying networks. The `tcp` -`btl`, for example, sends messages across TCP-based networks; the -`ucx` `pml` sends messages across InfiniBand-based networks. - -Each component typically has some tunable parameters that can be -changed at run-time. Use the `ompi_info` command to check a component -to see what its tunable parameters are. For example: - -``` -shell$ ompi_info --param btl tcp -``` - -shows some of the parameters (and default values) for the `tcp` `btl` -component (use `--level` to show *all* the parameters; see below). - -Note that `ompi_info` only shows a small number a component's MCA -parameters by default. Each MCA parameter has a "level" value from 1 -to 9, corresponding to the MPI-3 MPI_T tool interface levels. In Open -MPI, we have interpreted these nine levels as three groups of three: - -1. End user / basic -1. End user / detailed -1. End user / all -1. Application tuner / basic -1. Application tuner / detailed -1. Application tuner / all -1. MPI/OpenSHMEM developer / basic -1. MPI/OpenSHMEM developer / detailed -1. MPI/OpenSHMEM developer / all - -Here's how the three sub-groups are defined: - -1. End user: Generally, these are parameters that are required for - correctness, meaning that someone may need to set these just to - get their MPI/OpenSHMEM application to run correctly. -1. Application tuner: Generally, these are parameters that can be - used to tweak MPI application performance. -1. MPI/OpenSHMEM developer: Parameters that either don't fit in the - other two, or are specifically intended for debugging / - development of Open MPI itself. - -Each sub-group is broken down into three classifications: - -1. Basic: For parameters that everyone in this category will want to - see. -1. Detailed: Parameters that are useful, but you probably won't need - to change them often. -1. All: All other parameters -- probably including some fairly - esoteric parameters. - -To see *all* available parameters for a given component, specify that -ompi_info should use level 9: - -``` -shell$ ompi_info --param btl tcp --level 9 -``` - -These values can be overridden at run-time in several ways. At -run-time, the following locations are examined (in order) for new -values of parameters: - -1. `PREFIX/etc/openmpi-mca-params.conf`: - This file is intended to set any system-wide default MCA parameter - values -- it will apply, by default, to all users who use this Open - MPI installation. The default file that is installed contains many - comments explaining its format. - -1. `$HOME/.openmpi/mca-params.conf`: - If this file exists, it should be in the same format as - `PREFIX/etc/openmpi-mca-params.conf`. It is intended to provide - per-user default parameter values. - -1. environment variables of the form `OMPI_MCA_` set equal to a - `VALUE`: - - Where `` is the name of the parameter. For example, set the - variable named `OMPI_MCA_btl_tcp_frag_size` to the value 65536 - (Bourne-style shells): - - ``` - shell$ OMPI_MCA_btl_tcp_frag_size=65536 - shell$ export OMPI_MCA_btl_tcp_frag_size - ``` - -4. the `mpirun`/`oshrun` command line: `--mca NAME VALUE` - - Where is the name of the parameter. For example: - - ``` - shell$ mpirun --mca btl_tcp_frag_size 65536 -np 2 hello_world_mpi - ``` - -These locations are checked in order. For example, a parameter value -passed on the `mpirun` command line will override an environment -variable; an environment variable will override the system-wide -defaults. - -Each component typically activates itself when relevant. For example, -the usNIC component will detect that usNIC devices are present and -will automatically be used for MPI communications. The SLURM -component will automatically detect when running inside a SLURM job -and activate itself. And so on. - -Components can be manually activated or deactivated if necessary, of -course. The most common components that are manually activated, -deactivated, or tuned are the `btl` components -- components that are -used for MPI point-to-point communications on many types common -networks. - -For example, to *only* activate the `tcp` and `self` (process loopback) -components are used for MPI communications, specify them in a -comma-delimited list to the `btl` MCA parameter: - -``` -shell$ mpirun --mca btl tcp,self hello_world_mpi -``` - -To add shared memory support, add `sm` into the command-delimited list -(list order does not matter): - -``` -shell$ mpirun --mca btl tcp,sm,self hello_world_mpi -``` - -(there used to be a `vader` BTL for shared memory support; it was -renamed to `sm` in Open MPI v5.0.0, but the alias `vader` still works -as well) - -To specifically deactivate a specific component, the comma-delimited -list can be prepended with a `^` to negate it: - -``` -shell$ mpirun --mca btl ^tcp hello_mpi_world -``` - -The above command will use any other `btl` component other than the -`tcp` component. - - -## Questions? Problems? - -Found a bug? Got a question? Want to make a suggestion? Want to -contribute to Open MPI? Please let us know! - -When submitting questions and problems, be sure to include as much -extra information as possible. [See the community help web -page](https://www.open-mpi.org/community/help/) for details on all the -information that we request in order to provide assistance: - -The best way to report bugs, send comments, or ask questions is to -sign up on the user's and/or developer's mailing list (for user-level -and developer-level questions; when in doubt, send to the user's -list): - -* users@lists.open-mpi.org -* devel@lists.open-mpi.org - -Because of spam, only subscribers are allowed to post to these lists -(ensure that you subscribe with and post from exactly the same e-mail -address -- joe@example.com is considered different than -joe@mycomputer.example.com!). Visit these pages to subscribe to the -lists: - -* [Subscribe to the users mailing - list](https://lists.open-mpi.org/mailman/listinfo/users) -* [Subscribe to the developers mailing - list](https://lists.open-mpi.org/mailman/listinfo/devel) - -Make today an Open MPI day! +`The Open MPI Project `_ is an open source +implementation of the `Message Passing Interface (MPI) specification +`_ that is developed and maintained +by a consortium of academic, research, and industry partners. Open +MPI is therefore able to combine the expertise, technologies, and +resources from all across the High Performance Computing community in +order to build the best MPI library available. Open MPI offers +advantages for system and software vendors, application developers and +computer science researchers. + +## Official documentation + +The Open MPI documentation can be viewed in the following ways: + +1. Online at https://ompi.readthedocs.io/en/latest/ +1. In self-contained (i.e., suitable for local viewing, without an + internet connection) in official distribution tarballs under + `docs/_build/html/index.html`. + +## Building the documentation locally + +The source code for Open MPI's docs can be found in the Open MPI Git +repository under the `docs` folder. + +Developers who clone the Open MPI Git repository will not have the +HTML documentation and man pages by default; it must be built. +Instructions for how to build the Open MPI documentation can be found +here: +https://ompi.readthedocs.io/en/latest/developers/prerequisites.html#sphinx. diff --git a/config/Makefile.am b/config/Makefile.am index 3bc38a328b1..92aea16438f 100644 --- a/config/Makefile.am +++ b/config/Makefile.am @@ -9,7 +9,7 @@ # University of Stuttgart. All rights reserved. # Copyright (c) 2004-2005 The Regents of the University of California. # All rights reserved. -# Copyright (c) 2006-2021 Cisco Systems, Inc. All rights reserved. +# Copyright (c) 2006-2022 Cisco Systems, Inc. All rights reserved. # Copyright (c) 2010 Oracle and/or its affiliates. All rights # reserved. # Copyright (c) 2014-2015 Intel, Inc. All rights reserved. @@ -30,8 +30,6 @@ EXTRA_DIST = \ opal_mca_priority_sort.pl \ find_common_syms \ getdate.sh \ - make_manpage.pl \ - md2nroff.pl \ from-savannah/upstream-config.guess \ from-savannah/upstream-config.sub \ extract-3rd-party-configure.pl diff --git a/config/make_manpage.pl b/config/make_manpage.pl deleted file mode 100755 index 28c01887e80..00000000000 --- a/config/make_manpage.pl +++ /dev/null @@ -1,73 +0,0 @@ -#!/usr/bin/env perl -# -# Copyright (c) 2015 Research Organization for Information Science -# and Technology (RIST). All rights reserved. -# Copyright (c) 2015 Cisco Systems, Inc. All rights reserved. -# Copyright (c) 2020 Intel, Inc. All rights reserved. -# $COPYRIGHT$ -# -# Subroutine to generate a bunch of Fortran declarations and symbols -# - -use strict; - -use Getopt::Long; - -my $package_name; -my $package_version; -my $ompi_date; -my $opal_date; -my $fortran = '1'; -my $f08 = '1'; -my $input; -my $output; -my $help_arg = 0; - -&Getopt::Long::Configure("bundling"); -my $ok = Getopt::Long::GetOptions("package-name=s" => \$package_name, - "package-version=s" => \$package_version, - "ompi-date=s" => \$ompi_date, - "opal-date=s" => \$opal_date, - "fortran!" => \$fortran, - "f08!" => \$f08, - "input=s" => \$input, - "output=s" => \$output); - -if ($help_arg || !$ok || - !defined($input) || - !defined($output) || - !defined($package_name) || - !defined($package_version) || - !defined($ompi_date) || - !defined($opal_date)) { - print "Usage: $0 --package-name= --package-version= --ompi-date= --opal-date= --input= --output= [--nocxx] [ --nofortran] [--nof08]\n"; - exit(1 - $ok); -} - -open(FILE, $input) || - die "Can't open $input"; -my $file; -$file .= $_ - while(); -close(FILE); - -$file =~ s/#PACKAGE_NAME#/$package_name/g; -$file =~ s/#PACKAGE_VERSION#/$package_version/g; -$file =~ s/#OMPI_DATE#/$ompi_date/g; -$file =~ s/#OPAL_DATE#/$opal_date/g; - -if ($fortran == 0) { - $file =~ s/\n\.SH Fortran Syntax.+?\n\.SH/\n\.SH/s; -} - -if ($f08 == 0) { - $file =~ s/\n\.SH Fortran 2008 Syntax.+?\n\.SH/\n\.SH/s; -} - -open(FILE, ">$output") || - die "Can't open $output"; -print FILE $file; -close(FILE); - -exit(0); - diff --git a/config/md2nroff.pl b/config/md2nroff.pl deleted file mode 100755 index a8ee21d449a..00000000000 --- a/config/md2nroff.pl +++ /dev/null @@ -1,130 +0,0 @@ -#!/usr/bin/env perl -# -# Copyright (c) 2020 Cisco Systems, Inc. All rights reserved. -# $COPYRIGHT$ -# -# Additional copyrights may follow -# -# $HEADER$ -# - -use strict; - -use IPC::Open3; -use File::Basename; -use Getopt::Long; - -#-------------------------------------------------------------------------- - -my $source_arg; -my $dest_arg; -my $pandoc_arg = "pandoc"; -my $help_arg; -my $verbose_arg; - -my $ok = Getopt::Long::GetOptions("source=s" => \$source_arg, - "dest=s" => \$dest_arg, - "pandoc=s" => \$pandoc_arg, - "help" => \$help_arg, - "verbose" => \$verbose_arg); - -if (!$source_arg || !$dest_arg) { - print("Must specify --source and --dest\n"); - $ok = 0; -} - -if (!$ok || $help_arg) { - print "Invalid command line argument.\n\n" - if (!$ok); - print "Options: - --source FILE Source Markdown filename - --dest FILE Destination nroff file - --pandoc FILE Location of pandoc executable - --help This help list - --verbose Be verbose when running\n"; - exit($ok ? 0 : 1); -} - -#-------------------------------------------------------------------------- - -# Read in the source -die "Error: $source_arg does not exist" - if (! -f $source_arg); - -my $source_content; -open(FILE, $source_arg) || - die "Can't open $source_arg"; -$source_content .= $_ - while(); -close(FILE); - -#-------------------------------------------------------------------------- - -# Figure out the section of man page -die "Cannot figure out man page section from source filename" - if (!($source_arg =~ m/(\d+).md$/)); -my $man_section = $1; - -my $shortfile = basename($source_arg); -$shortfile =~ s/\.$man_section\.md$//; - -#-------------------------------------------------------------------------- - -my ($sec,$min,$hour,$mday,$mon,$year,$wday,$yday,$isdst) = localtime(); -my $today = sprintf("%04d-%02d-%02d", ($year+1900), $mon, $mday); - -# Run opal_get_version.sh to get the OMPI version. -my $config_dir = dirname($0); -my $get_version = "$config_dir/opal_get_version.sh"; -my $VERSION_file = "$config_dir/../VERSION"; -my $out = `$get_version $VERSION_file --full`; -chomp($out); - -# Pandoc does not handle markdown links in output nroff properly, so -# just remove all links. Specifically: some versions of Pandoc ignore -# the links, but others handle it badly. -$source_content =~ s/\[(.+)\]\((.+)\)/\1/g; - -# Add the pandoc header -$source_content = "--- -section: $man_section -title: $shortfile -header: Open MPI -footer: $today ---- - -$source_content"; - -#-------------------------------------------------------------------------- - -print("*** Processing: $source_arg --> $dest_arg\n") - if ($verbose_arg); - -my $pid = open3(my $child_stdin, my $child_stdout, my $child_stderr, - "$pandoc_arg -s --from=markdown --to=man"); -print $child_stdin $source_content; -close($child_stdin); -my $pandoc_rendered; -$pandoc_rendered .= $_ - while(<$child_stdout>); -close($child_stdout); -close($child_stderr) - if ($child_stderr); -waitpid($pid, 0); - -print("Writing new file $dest_arg\n") - if ($verbose_arg); - -# Make the target directory if it does not exist (needed for VPATH -# builds) -my $dest_dir = dirname($dest_arg); -mkdir($dest_dir) - if (! -d $dest_dir); - -# Write the output file -open(FILE, ">$dest_arg") || - die "Can't open $dest_arg for writing"; -print FILE $pandoc_rendered; -close(FILE); - -exit(0); diff --git a/config/ompi_config_files.m4 b/config/ompi_config_files.m4 index 18f0e7c184c..119d0ddabaf 100644 --- a/config/ompi_config_files.m4 +++ b/config/ompi_config_files.m4 @@ -1,6 +1,6 @@ # -*- shell-script -*- # -# Copyright (c) 2009-2020 Cisco Systems, Inc. All rights reserved +# Copyright (c) 2009-2022 Cisco Systems, Inc. All rights reserved. # Copyright (c) 2017-2020 Research Organization for Information Science # and Technology (RIST). All rights reserved. # Copyright (c) 2018 Los Alamos National Security, LLC. All rights @@ -49,9 +49,6 @@ AC_DEFUN([OMPI_CONFIG_FILES],[ ompi/mpi/fortran/mpiext-use-mpi-f08/Makefile ompi/mpi/tool/Makefile - ompi/mpi/man/man3/Makefile - ompi/mpi/man/man5/Makefile - ompi/tools/ompi_info/Makefile ompi/tools/wrappers/Makefile ompi/tools/wrappers/mpicc-wrapper-data.txt diff --git a/config/opal_setup_sphinx.m4 b/config/opal_setup_sphinx.m4 new file mode 100644 index 00000000000..0e9ba632663 --- /dev/null +++ b/config/opal_setup_sphinx.m4 @@ -0,0 +1,98 @@ +dnl -*- shell-script -*- +dnl +dnl Copyright (c) 2020-2022 Cisco Systems, Inc. All rights reserved. +dnl +dnl $COPYRIGHT$ +dnl +dnl Additional copyrights may follow +dnl +dnl $HEADER$ +dnl + +AC_DEFUN([OPAL_SETUP_SPHINX],[ + OPAL_VAR_SCOPE_PUSH([summary_msg sphinx_result opal_install_docs sphinx_target_version sphinx_found_version]) + + # This option is probably only helpful to developers: have + # configure fail if Sphinx is not found (i.e., if you don't have + # the ability to use Sphinx to build the HTML docs and man pages). + AC_ARG_ENABLE([sphinx], + [AS_HELP_STRING([--enable-sphinx], + [Force configure to fail if Sphinx is not found (Sphinx is used to build the Open MPI and OpenSHMEM HTML docs and man pages). This option is likely only useful for Open MPI developers; end users who are building from Open MPI distribution tarballs do ***not*** need to have Sphinx installed])]) + + # Quick check to see if we have already-built docs (e.g., if we're + # in a tarball vs. a fresh git clone). + AC_MSG_CHECKING([if pre-built docs are available]) + AS_IF([test -f "$srcdir/docs/_build/man/MPI_T.5"], + [opal_install_docs=1 + AC_MSG_RESULT([yes])], + [opal_install_docs=0 + AC_MSG_RESULT([no])]) + + # To generate HTML docs + man pages, we need Sphinx. If we have + # Sphinx, then we're able to both build and install the docs + # (potentially overriding opal_install_docs from what it was set + # above). + AC_PATH_PROG([SPHINX_BUILD], [sphinx-build], []) + + # If the user requested to disable sphinx, then pretend we didn't + # find it. + AS_IF([test "$enable_sphinx" = "no"], + [SPHINX_BUILD=]) + + # If we found Sphinx, check to ensure that it's a recent enough + # version. + AS_IF([test -n "$SPHINX_BUILD"], + [[sphinx_target_version=`sed -n -e 's/sphinx[><=]*\([0-9\.]\)/\1/p' $srcdir/docs/requirements.txt`] + sphinx_found_version=`$SPHINX_BUILD --version 2>&1 | cut -d\ -f2` + AC_MSG_CHECKING([if Sphinx version is high enough ($sphinx_found_version >= $sphinx_target_version)]) + AS_VERSION_COMPARE([$sphinx_found_version], + [$sphinx_target_version], + [sphinx_result=lesser], + [sphinx_result=equal], + [sphinx_result=greater]) + AS_IF([test "$sphinx_result" = "lesser"], + [SPHINX_BUILD= + AC_MSG_RESULT([no])], + [ # If we're building, we're also installing, regardless of + # whether we found pre-build docs or not (above). + opal_install_docs=1 + AC_MSG_RESULT([yes])]) + ]) + + AS_IF([test -z "$SPHINX_BUILD"], + [OPAL_MAKEDIST_DISABLE="$OPAL_MAKEDIST_DISABLE Sphinx/Documentation" + AC_MSG_NOTICE([Could not find a suitable sphinx-build on your system.]) + AC_MSG_NOTICE([You will not be able to build a distribution tarball.]) + ]) + + AS_IF([test $opal_install_docs -eq 0], + [AC_MSG_WARN([*** You will not have documentation installed.]) + AC_MSG_WARN([*** See the following URL for more information:]) + dnl Note that we have to double escape the string below + dnl so that the # it contains coesn't confuse the Autotools + AC_MSG_WARN([[*** https://ompi.readthedocs.io/en/latest/developers/prerequisites.html#sphinx]]) + ]) + + # If --enable-sphinx was specified and we did not find Sphinx, + # abort. This is likely only useful to prevent "oops!" moments + # from Open MPI developers. + AS_IF([test -z "$SPHINX_BUILD" && test "$enable_sphinx" = "yes"], + [AC_MSG_WARN([Sphinx was not found, but --enable-sphinx was specified]) + AC_MSG_ERROR([Cannot continue])]) + + # Construct a summary message. Due SUMMARY_ADD's implementation, + # do *not* include a comma. + AS_IF([test -n "$SPHINX_BUILD"], + [ # If we found Sphinx, we always both build and install. + summary_msg="building and installing"], + [AS_IF([test $opal_install_docs -eq 1], + [summary_msg="installing packaged docs"], + [summary_msg="no documentation available"])]) + + OPAL_SUMMARY_ADD([Miscellaneous], [HTML docs and man pages], [], + [$summary_msg]) + + AM_CONDITIONAL([OPAL_BUILD_DOCS], [test -n "$SPHINX_BUILD"]) + AM_CONDITIONAL([OPAL_INSTALL_DOCS], [test $opal_install_docs -eq 1]) + OPAL_VAR_SCOPE_POP +]) diff --git a/configure.ac b/configure.ac index 09fb8f594a6..61a2ec6bffe 100644 --- a/configure.ac +++ b/configure.ac @@ -1069,10 +1069,10 @@ AS_IF([test -z "$LEX" || \ ]) # -# Setup man page processing +# Setup HTML and man page processing # -OPAL_SETUP_MAN_PAGES +OPAL_SETUP_SPHINX # # File system case sensitivity @@ -1503,6 +1503,8 @@ AC_MSG_RESULT([$LIBS]) AC_CONFIG_FILES([ Makefile + docs/Makefile + config/Makefile contrib/Makefile diff --git a/contrib/dist/make-html-man-pages.pl b/contrib/dist/make-html-man-pages.pl deleted file mode 100755 index 58f7679638c..00000000000 --- a/contrib/dist/make-html-man-pages.pl +++ /dev/null @@ -1,278 +0,0 @@ -#!/usr/bin/env perl -# -# Copyright (c) 2010 Cisco Systems, Inc. -# -# Script to generate PHP-ized files of Open MPI tarball-installed man -# pages. Run it from the top-level directory of an Open MPI tarball -# or source checkout. It will: -# -# - run autogen if necessary -# - run configure -# - run make install -# - frob the generated man pages a bit -# - generate PHP versions of the man pages -# -# The PHP can then be copied to the OMPI web site. -# - -use strict; -use File::Find; -use File::Basename; -use Cwd; - -sub absoluteize { - my ($dir) = shift; - - mkdir_p($dir) - if (! -d $dir); - - my $start = cwd(); - chdir($dir); - $dir = cwd(); - chdir($start); - - return $dir; -} - -sub mkdir_p { - my ($dir) = @_; - if (!mkdir($dir)) { - mkdir_p(dirname($dir)); - mkdir($dir) || die "Can't make directory $dir"; - } -} - -# Check that we have what we need -if (!(-f "VERSION" && -f "ompi/include/mpi.h.in")) { - print "Run this script from the top-level Open MPI directory\n"; - exit(1); -} - -# Setup -my @files; -my $pwd = Cwd::cwd(); -print "PWD: $pwd\n"; -my $basedir = "$pwd/man-page-generator"; -my $prefix = "$basedir/install"; -my $mandir = absoluteize("$prefix/share/man"); -my $outdir_base = absoluteize("$basedir/php"); - -# Remove old results -system("rm -rf $basedir"); - -# Configure, build, and install so that we get a full set of man pages - -sub doit { - my @cmd = @_; - print "Running: @cmd\n"; - my $ret = system(@cmd); - die "Failed to run (@cmd)" - if (-1 == $ret); - $ret = $ret >> 8; - die "Command failed (@cmd) with status $ret" - if ($ret != 0); -} - -# Autogen if we don't have a configure script -doit("./autogen.pl") - if (! -x "configure"); -doit("./configure --prefix=$prefix --enable-mpi-ext=all --without-cs-fs"); - -# Find this OMPI's version -my $version = `fgrep PACKAGE_VERSION opal/include/opal_config.h | cut -d\\\" -f2`; -chomp($version); -print "Open MPI version: $version\n"; - -# Build so that we get fresh man pages -doit("make clean"); -doit("make -j 4 install"); - -# Find all *.[0-9] files in the installed mandir tree. -&File::Find::find( - sub { - push(@files, $File::Find::name) if (-f $_ && $_ =~ /\.[1-9]$/); - }, $mandir); - -# Must cd into the $mandir directory because some of the man pages -# refer to other man pages by "man/" relative path names. -chdir($mandir); - -my %dirs; -my $outfiles; - -# Generate a PHP file for each man page. -foreach my $file (@files) { - my $b = basename($file); - $b =~ m/(.*)\.([0-9])$/; - - my $name = $1; - my $section = $2; - - my $outdir = "$outdir_base/man$section"; - my $outfile = "$outdir/$b.php"; - $dirs{$outdir} = ""; - push(@{$outfiles->{$section}}, { - name => $name, - file => "man$section/$b.php", - }); - - mkdir_p($outdir) - if (! -d $outdir); - - print "*** Generating: $name ($section)\n"; - - # man2html clips many of our man pages -- it just stops halfway - # through the file. Weird. - #print "man $file | man2html -bare -botm 4 -topm 4\n"; - open(CMD, "rman -f HTML -r off -p $file|") || die("Can't open command"); - my $text; - $text .= $_ - while (); - close(CMD); - - # Post-process the text: - # Remove ... - # Remove - # Remove - # Remove - # Remove ... - # Remove and - # Remove and - - $text =~ s/.*<\/head>//is; - $text =~ s///is; - $text =~ s///i; - $text =~ s/<\/html>//i; - $text =~ s///i; - $text =~ s/<\/body>//i; - - # Remove carriage returns, extra white space, and double blank - # lines - $text =~ s/\r//g; - $text =~ s/[ \t]+\n/\n/g; - $text =~ s/\n{3,}/\n\n/g; - - # Cross-link to any other man pages that we might have. Search - # through the string for MPI_ and look for any corresponding - # man pages in @files. Sequentially replace MPI_ with - # $replaced so that we can find all the MPI_'s (we'll - # put the "MPI_" back when we're ). - my $replace = "ZAZZZERZAZ_"; - - # This is a doozy of a regexp (see perlre(1)). Look for MPI_ - # cases that aren't followed by .[0-9].php (i.e., not the href - # clause of an A HTML tag). - while ($text =~ m/^(.*\W)MPI_(\w+(?!\.[0-9]\.php))(\W.*)$/s) { - my $comp = lc("mpi_$2"); -# print "Found: $2 -- looking for $comp: "; - - my $prefix = $1; - my $meat = $2; - my $suffix = $3; - - my $replaced = 0; - foreach my $f2 (@files) { - # If we have another file that matches the regexp that we - # just pulled out from the text, *and* that file is not - # the same file that we're already processing (i.e., don't - # link to myself!), then link to it. - if (basename(lc($f2)) =~ /^$comp\.[0-9]/ && $f2 ne $file) { - # Hard-coded to link only to MPI API functions in - # section 3 (i.e., ../man3/). - my $link_file = "../man3/" . basename($f2) . ".php"; -# print "Linked to $link_file!\n"; - $text = "$prefix$replace$meat$suffix"; - $replaced = 1; - last; - } - } - if (!$replaced) { -# print "Not linked\n"; - $text = "$prefix$replace$meat$suffix"; - } - } - # Now replace the $replaced back with MPI_. - $text =~ s/$replace/MPI_/g; - - # Obscure any email addresses in there; don't want to give the - # spammers any new fodder! - $text =~ s/(\W)[\w\.\-]+@[\w.\-]+(\W)/$1email-address-removed$2/g; - - # Setup meta name: make the MPI name be all caps if we're in - # section 3 and it has an MPI_ prefix. - my $meta_name = $name; - if (3 == $section && $name =~ /^MPI_/) { - $meta_name = uc($name); - } - - # Now we're left with what we want. Output the PHP page. - # Write the output PHP file with our own header and footer, - # suitable for www.open-mpi.org. - unlink($outfile); - open(FILE, ">$outfile") || die "Can't open $outfile"; - print FILE ' -

« Return to documentation listing

-' . $text . ' -

« Return to documentation listing

-$dir/index.php") || die "Can't open $dir/index.php"; - print FILE '.inc file -my $file = "$outdir_base/data-$version.inc"; -print "Writing $file...\n"; -open(FILE, ">$file") || die "Can't open $file"; -print FILE '{$section}}) { - push(@f, $file->{name}); - } - # Now output the sorted filenames - foreach my $file (sort(@f)) { - print FILE ", " - if (!$first); - $first = 0; - print FILE '"' . $file . '"'; - } - print FILE ");\n\n"; -} -close(FILE); - -# Print the top-level engine file for this version (it will use the -# data-.inc file). -open(FILE, ">$outdir_base/index.php") || die "Can't open $outdir_base/index.php"; -print FILE '` for more details. + +Improving filesystem performance at scale +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Prior to v5.0.0, Open MPI compiled a large number of plugins as +individual dynamic shared objects (DSOs) |mdash| i.e., individual +files in the filesystem. Many of these DSOs would be opened by each +MPI process at run time. + +This could cause filesystem congestion, particularly when Open MPI is +installed on a network filesystem and a large job is launched: many +nodes will simultaneously communicate with the file server(s), and +potentially need to transfer a large number of small(ish) files. + +Starting with v5.0.0, by default, Open MPI's plugins are no longer +built as DSOs. As such, Open MPI typically only opens a small number +of shared libraries at launch time. Even if Open MPI is installed on +a network filesystem, these libraries are likely to be cached on nodes +over time, and therefore generate a fairly small amount network +filesystem traffic when MPI jobs are launched. + +In short: Open MPI |ompi_ver|'s impact on network filesystems is +greatly diminished compared to prior versions. Compiling fully-static +applications to eliminate the open-every-DSO-file-at-launch-time +behavior is no longer necessary. + +Other reasons fully-static applications are bad +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Here are a few other reasons that fully-static MPI applications are +sub-optimal: + +#. When applications link all of their dependencies statically, the + operating system cannot share code between multiple copies of the + process. + + For example, if you launch N copies of your fully-statically-linked + MPI application on a node, it will consume (N * + size_of_the_application) bytes of RAM. Alternatiely, launching N + copies of a dynamically-linked MPI application |mdash| where each + of the copies have the same dependent libraries |mdash| will only + load each shared dependent library into RAM once. + + In other words: *using dynamic linking saves memory.* + +#. If you disable Open MPI's ``dlopen`` functionality (which is + necessary to create *fully*-static MPI applications), you lose the + following: + + * CUDA, because |mdash| among other reasons |mdash| the CUDA + library is dynamically loaded at run-time via ``dlopen(3)``. + + * Memory manager functionality, which is important for OS-bypass + networks such as InfiniBand. + +Are you convinced yet? *Please try to avoid building fully-static MPI +applications if at all possible.* + + +Building fully-static MPI applications +-------------------------------------- + +.. caution:: If, after reading all of the above, you are still of the + mind that you want to build fully-static MPI + applications, be aware that fully static linking is not + for the meek, and it is not recommended. But it is + possible, with some caveats. + +#. You must have static libraries available for *everything* to which + your program links. This includes Open MPI; you must have used the + ``--enable-static`` option to Open MPI's ``configure`` or otherwise + have available the static versions of the Open MPI libraries. + + .. note:: Some Linux distributions may not have static versions of + popular Linux libraries by default (e.g., libnuma), or + require additional RPMs to be installed to get the + equivalent static libraries. + +#. Open MPI must have been built without a memory manager. This means + that Open MPI must have been configured with the + ``--without-memory-manager`` flag. This is irrelevant on some + platforms for which Open MPI does not have a memory manager, but on + some platforms it is necessary (Linux when using many OS-bypass + networks). It is harmless to use this flag on platforms where Open + MPI does not have a memory manager. + + .. important:: Not including memory manager support can lead to + lower performance when Open MPI is used with + OS-bypass networks. + +This is how to configure Open MPI to build fully-static libraries on +Linux: + +.. code-block:: sh + + shell$ ./configure --without-memory-manager --disable-dlopen \ + --enable-static --disable-shared ... + +The ``--disable-shared`` flag is optional; it will prevent Open MPI +from *also* building shared libraries. + +Alternatively, you could build Open MPI with as many static libraries +as possible, but still preserve ``dlopen`` functionality by omitting +the ``--disable-dlopen`` flag: + +.. code-block:: sh + + shell$ ./configure --without-memory-manager \ + --enable-static --disable-shared ... + +This gives you a *mostly* static build of Open MPI, but has the +advantage of preserving at least some dynamic libraries. + +Including whole archives +^^^^^^^^^^^^^^^^^^^^^^^^ + +Some systems may have additional constraints about their support +libraries that require additional steps to produce working +fully-static MPI applications. For example, any library that has its +own run-time plugin system (i.e., that opens dynamically shared +objects ("DSOs") at run time) will have additional complications in +producting fully-static builds. + +In such cases, you generally want to run ``mpicc ... --showme`` to see +the compiler / linker commands that Open MPI's wrapper commands will +use, and then augment those commands with linker arguments for the +static versions of the DSO plugins that you will need at run time. + +For example, if you have ``libfoo.a`` that dynamically loads +``plugin.so`` at run time, you'll need to have a ``plugin.a`` and +|mdash| assuming the GNU linker |mdash| add arguments similar to the +following: + +* ``-static``: Tell the linker to generate a static executable. +* ``-Wl,--whole-archive -lfoo /path/to/plugin.a -Wl,--no-whole-archive``: + Tell the linker to include the entire ``foo`` library and the entire + ``plugin.a`` archive in the executable. + +You can either add these arguments on the command line manually, or +you can :ref:`modify the default behavior of the wrapper compilers +` to hide this complexity from end +users (but be aware that if you modify the wrapper compilers' default +behavior, *all* users will be creating static applications!). diff --git a/docs/building-apps/customizing-wrappers.rst b/docs/building-apps/customizing-wrappers.rst new file mode 100644 index 00000000000..65a750c064d --- /dev/null +++ b/docs/building-apps/customizing-wrappers.rst @@ -0,0 +1,122 @@ +.. _label-customizing-wrapper-compiler: + +Customizing wrapper compiler behavior +===================================== + +The Open MPI wrapper compilers are driven by text files that contain, +among other things, the flags that are passed to the underlying +compiler. These text files are generated automatically for Open MPI +and are customized for the compiler set that was selected when Open +MPI was configured; it is *not* recommended that users edit these +files. + +However, there are cases where it may be necessary or desirable to +edit these files and add to or subtract from the flags that Open MPI +selected. These files are installed in ``$pkgdatadir``, which +defaults to ``$prefix/share/openmpi/WRAPPER_NAME-wrapper-data.txt``. +Several environment variables are also available for run-time +replacement of the wrapper's default values (from the text files): + +.. note:: You may need to scroll right in the following table. + +.. list-table:: + :header-rows: 1 + + * - Wrapper compiler + - Compiler + - Preprocessor flags + - Compiler flags + - Linker flags + - Linker library flags + - Data file + + * - ``mpicc`` + - ``OMPI_CC`` + - ``OMPI_CPPFLAGS`` + - ``OMPI_CFLAGS`` + - ``OMPI_LDFLAGS`` + - ``OMPI_LIBS`` + - ``mpicc-wrapper-data.txt`` + + * - ``mpic++`` and ``mpiCC`` + - ``OMPI_CXX`` + - ``OMPI_CPPFLAGS`` + - ``OMPI_CXXFLAGS`` + - ``OMPI_LDFLAGS`` + - ``OMPI_LIBS`` + - ``mpic++-wrapper-data.txt`` and ``mpiCC-wrapper-data.txt``, + respectively + + * - ``mpifort`` + - ``OMPI_FC`` + - ``OMPI_CPPFLAGS`` + - ``OMPI_FCFLAGS`` + - ``OMPI_LDFLAGS`` + - ``OMPI_LIBS`` + - ``mpifort-wrapper-data.txt`` + +.. caution:: Note that changing the underlying compiler may not work + at all. + + For example, C++ and Fortran compilers are notoriously binary + incompatible with each other (sometimes even within multiple + releases of the same compiler). If you compile/install Open MPI + with C++ compiler vX.Y.Z and then use the ``OMPI_CXX`` environment + variable to change the ``mpicxx`` wrapper compiler to use the + vA.B.C C++ compiler, your application code may not compile and/or + link. The traditional method of using multiple different compilers + with Open MPI is to install Open MPI multiple times; each + installation should be built/installed with a different compiler. + This is annoying, but it is beyond the scope of Open MPI to be able + to fix. + +Note that the values of these fields can be directly influenced by +passing flags to Open MPI's ``configure`` script. :ref:`See this +section in the Installation guide ` for +more details. + +The files cited in the above table use fairly simplistic "key=value" +data formats. The following are several fields that are likely to be +of interest to end-users: + +* ``project_short``: Prefix for all environment variables. See + below. + +* ``compiler_env``: Specifies the base name of the environment + variable that can be used to override the wrapper's underlying + compiler at run-time. The full name of the environment variable is + of the form ``_``; see table above. + +* ``compiler_flags_env``: Specifies the base name of the environment + variable that can be used to override the wrapper's compiler flags + at run-time. The full name of the environment variable is of the + form ``_``; see table above. + +* ``compiler``: The executable name of the underlying compiler. + +* ``extra_includes``: Relative to ``$installdir``, a list of directories + to also list in the preprocessor flags to find header files. + +* ``preprocessor_flags``: A list of flags passed to the preprocessor. + +* ``compiler_flags``: A list of flags passed to the compiler. + +* ``linker_flags``: A list of flags passed to the linker. + +* ``libs``: A list of libraries passed to the linker. + +* ``required_file``: If non-empty, check for the presence of this file + before continuing. If the file is not there, the wrapper will abort + saying that the language is not supported. + +* ``includedir``: Directory containing Open MPI's header files. The + proper compiler "include" flag is prepended to this directory and + added into the preprocessor flags. + +* ``libdir``: Directory containing Open MPI's library files. The + proper compiler "include" flag is prepended to this directory and + added into the linker flags. + +* ``module_option``: This field only appears in ``mpifort``. It is + the flag that the Fortran compiler requires to declare where module + files are located. diff --git a/docs/building-apps/deprecation-warnings.rst b/docs/building-apps/deprecation-warnings.rst new file mode 100644 index 00000000000..c0801f5b661 --- /dev/null +++ b/docs/building-apps/deprecation-warnings.rst @@ -0,0 +1,40 @@ +Deprecation warnings while compiling MPI applications +===================================================== + +If you see deprecation warnings when compiling MPI applications, it is +because your application is symbols / functions that are deprecated in +MPI. For example: + +.. code-block:: sh + + shell$ mpicc deprecated-example.c -c + deprecated-example.c: In function 'foo': + deprecated-example.c:6:5: warning: 'MPI_Attr_delete' is deprecated: MPI_Attr_delete was deprecated in MPI-2.0; use MPI_Comm_delete_attr instead [-Wdeprecated-declarations] + MPI_Attr_delete(MPI_COMM_WORLD, 2); + ^~~~~~~~~~~~~~~ + In file included from deprecated-example.c:2: + /usr/local/openmpi/include/mpi.h:2601:20: note: declared here + OMPI_DECLSPEC int MPI_Attr_delete(MPI_Comm comm, int keyval) + ^~~~~~~~~~~~~~~ + +Note that the deprecation compiler warnings tells you how to upgrade +your code to avoid the deprecation warnings. In this example, it +advises you to use ``MPI_Comm_delete_attr()`` instead of +``MPI_Attr_delete()``. + +Also, note that even if Open MPI was configured with +``--enable-mpi1-compatibility`` to re-enable removed MPI-1 symbols, +you will still get compiler warnings when you use the removed symbols. +For example: + +.. code-block:: sh + + shell$ mpicc deleted-example.c -c + deleted-example.c: In function 'foo': + deleted-example.c:8:5: warning: 'MPI_Address' is deprecated: MPI_Address was removed in MPI-3.0; use MPI_Get_address instead. [-Wdeleted-declarations] + MPI_Address(buffer, &address); + ^~~~~~~~~~~ + In file included from deleted-example.c:2: + /usr/local/openmpi/include/mpi.h:2689:20: note: declared here + OMPI_DECLSPEC int MPI_Address(void *location, MPI_Aint *address) + ^~~~~~~~~~~ diff --git a/docs/building-apps/extracting-wrapper-flags.rst b/docs/building-apps/extracting-wrapper-flags.rst new file mode 100644 index 00000000000..2c04fbb52b8 --- /dev/null +++ b/docs/building-apps/extracting-wrapper-flags.rst @@ -0,0 +1,127 @@ +Extracting flags from the wrapper compilers +=========================================== + +If you cannot use the wrapper compilers for some reason, there are +multiple supported ways to extract the compiler/linker flags that you +will need. + +Using the ``--showme`` option +----------------------------- + +The wrapper compilers all support a ``--showme`` command line option +that will show what commands would have been invoked. + +.. code-block:: sh + + # Show the flags necessary to compile MPI C applications + shell$ mpicc --showme:compile + + # Show the flags necessary to link MPI C applications + shell$ mpicc --showme:link + + # Show all the flags necessary to build MPI C applications + shell$ mpicc --showme + +.. note:: If you pass ``--showme`` *and additional command line + parameters* to the wrapper compiler, sure to *also* pass in a + filename. Otherwise, the ``--showme`` functionality will not + display output as expected. + +.. warning:: It is almost never a good idea to hard-code these results + in a ``Makefile`` (or other build system). It is almost always + best to run (for example) ``mpicc --showme:compile`` in a dynamic + fashion to find out what you need. For example, GNU Make allows + running commands and assigning their results to variables: + + .. code-block:: make + + MPI_COMPILE_FLAGS = $(shell mpicc --showme:compile) + MPI_LINK_FLAGS = $(shell mpicc --showme:link) + + my_app: my_app.c + $(CC) $(MPI_COMPILE_FLAGS) my_app.c $(MPI_LINK_FLAGS) -o my_app + +Using ``pkg-config`` +-------------------- + +Alternatively, Open MPI also installs ``pkg-config(1)`` configuration +files under ``$libdir/pkgconfig``. If ``pkg-config`` is configured to +find these files (e.g., if you add ``$libdir/pkgconfig`` |mdash| which +is usually ``$prefix/lib/pkgconfig`` |mdash| to the +``PKG_CONFIG_PATH`` environment variable), then compiling / linking +Open MPI programs can be performed like this: + +.. code-block:: sh + + shell$ export PKG_CONFIG_PATH=/opt/openmpi/lib/pkgconfig + shell$ gcc hello_world_mpi.c -o hello_world_mpi -g \ + `pkg-config ompi-c --cflags --libs` + shell$ + +Open MPI supplies multiple ``pkg-config`` configuration files; one for +each different wrapper compiler (language): + +* ``ompi``: Synonym for ``ompi-c``; Open MPI applications using the C + MPI bindings +* ``ompi-c``: Open MPI applications using the C MPI bindings +* ``ompi-cxx``: Open MPI applications using the C MPI bindings +* ``ompi-fort``: Open MPI applications using the Fortran MPI bindings + +.. note:: Open MPI's ``pkg-config`` files *work properly*, but they + probably aren't *technically correct*. + + Specifically: Open MPI will list all of its dependent + libraries that are necessary to link an MPI application, + even if a given dependency has a ``.pc`` file and should + therefore be listed as a ``Requires`` and/or + ``Requires.private`` in Open MPI's ``.pc`` files. + + For example, Open MPI lists ``-lpmix`` in both ``Libs`` and + ``Libs.private``. But since PMIx provides its own + ``pmix.pc`` file, it would be more correct for Open MPI to + *not* list ``-lpmix`` in ``Libs`` / ``Libs.private``, and + instead include: + + .. code-block:: + + Requires: pmix + Requires.private: pmix + + The end result is likely immaterial, but we document this + just in case it ever becomes an issue someday. + + +Using ``ompi_info`` +------------------- + +This method is not directly suitable for getting all the compiler / +linker flags needed to compile MPI applications because it does not +include the relevant flags to find Open MPI's headers and libraries. +But it does show a breakdown of all other flags. + +.. code-block:: + + shell$ ompi_info --all | grep -i wrapper + Wrapper extra CFLAGS: + Wrapper extra CXXFLAGS: + Wrapper extra FFLAGS: + Wrapper extra FCFLAGS: + Wrapper extra LDFLAGS: + Wrapper extra LIBS: -lutil -lnsl -ldl -Wl,--export-dynamic -lm + +This installation is *only* adding options in the ``xLIBS`` areas of the +wrapper compilers; all other values are blank (remember: the ``-I``'s +and ``-L``'s are implicit). + +Note that the ``--parsable`` option can be used to obtain +machine-parsable versions of this output. For example: + +.. code-block:: + + shell$ ompi_info --all --parsable | grep wrapper:extra + option:wrapper:extra_cflags: + option:wrapper:extra_cxxflags: + option:wrapper:extra_fflags: + option:wrapper:extra_fcflags: + option:wrapper:extra_ldflags: + option:wrapper:extra_libs:-lutil -lnsl -ldl -Wl,--export-dynamic -lm diff --git a/docs/building-apps/index.rst b/docs/building-apps/index.rst new file mode 100644 index 00000000000..0ca44d4188f --- /dev/null +++ b/docs/building-apps/index.rst @@ -0,0 +1,15 @@ +Building MPI applications +========================= + +The simplest way to compile and link MPI applications is to use the +Open MPI "wrapper" compilers. + +.. toctree:: + :maxdepth: 1 + + quickstart + customizing-wrappers + extracting-wrapper-flags + removed-mpi-constructs + deprecation-warnings + building-static-apps diff --git a/docs/building-apps/quickstart.rst b/docs/building-apps/quickstart.rst new file mode 100644 index 00000000000..c4bd5dfd6c2 --- /dev/null +++ b/docs/building-apps/quickstart.rst @@ -0,0 +1,63 @@ +.. _label-quickstart-building-apps: + +Quick start: Building MPI applications +====================================== + +Although this section skips many details, it offers examples that will +probably work in many environments. + +.. caution:: Note that this section is a "Quick start" |mdash| it does + not attempt to be comprehensive or describe how to build Open MPI + in all supported environments. The examples below may therefore + not work exactly as shown in your environment. + + Please consult the other sections in this chapter for more details, + if necessary. + +Open MPI provides "wrapper" compilers that should be used for +compiling MPI and OpenSHMEM applications: + ++---------+--------------------------+ +| C | ``mpicc``, ``oshcc`` | ++---------+--------------------------+ +| C++ | ``mpiCC``, ``oshCC`` (or | +| | ``mpic++`` if your | +| | filesystem is | +| | case-insensitive) | ++---------+--------------------------+ +| Fortran | ``mpifort``, ``oshfort`` | ++---------+--------------------------+ + +.. caution:: The legacy names ``mpif77`` and ``mpif90`` still exist, + and are simply symbolic links to the ``mpifort`` wrapper + compiler. Users are strongly encouraged to update all + build scripts to use ``mpifort`` instead of ``mpif77`` + and ``mpif90``. + +The intent is that users can simply invoke the Open MPI wrapper +compiler instead of their usual language compiler. For example, +instead of invoking your usual C compiler to build your MPI C +appliance, use ``mpicc``: + +.. code-block:: sh + + shell$ mpicc hello_world_mpi.c -o hello_world_mpi -g + shell$ + +For OpenSHMEM applications: + +.. code-block:: sh + + shell$ oshcc hello_shmem.c -o hello_shmem -g + shell$ + +All the wrapper compilers do is add a variety of compiler and linker +flags to the command line and then invoke a back-end compiler. To be +specific: the wrapper compilers do not parse source code at all; they +are solely command-line manipulators, and have nothing to do with the +actual compilation or linking of programs. The end result is an MPI +executable that is properly linked to all the relevant libraries. + +.. caution:: It is *absolutely not sufficient* to simply add ``-lmpi`` + to your link line and assume that you will obtain a valid + Open MPI executable. diff --git a/docs/building-apps/removed-mpi-constructs.rst b/docs/building-apps/removed-mpi-constructs.rst new file mode 100644 index 00000000000..81229c7cb72 --- /dev/null +++ b/docs/building-apps/removed-mpi-constructs.rst @@ -0,0 +1,527 @@ +.. _label-removed-mpi-constructs: + +Removed MPI constructs +====================== + +.. error:: **TODO This section needs to be renamed/updated for the + 5.0.0 behavior.** + +Starting with v4.0.0, Open MPI |mdash| by default |mdash| removes the +prototypes from ``mpi.h`` for MPI symbols that were deprecated in 1996 +in the MPI-2.0 standard, and finally removed from the MPI-3.0 standard +(2012). + +Specifically, the following symbols (specified in the MPI +language-neutral names) are no longer prototyped in ``mpi.h`` by +default: + +.. note:: You may need to scroll right in the following table. + +.. list-table:: + :header-rows: 1 + + * - Removed symbol + + (click for more details, below) + - Replaced with + + (click to go to the corresponding man page) + - Deprecated + - Removed + + * - :ref:`MPI_ADDRESS ` + - :ref:`MPI_GET_ADDRESS ` + - MPI-2.0 (1996) + - MPI-3.0 (2012) + + * - :ref:`MPI_ERRHANDLER_CREATE ` + - :ref:`MPI_COMM_CREATE_ERRHANDLER ` + - MPI-2.0 (1996) + - MPI-3.0 (2012) + + * - :ref:`MPI_ERRHANDLER_GET ` + - :ref:`MPI_COMM_GET_ERRHANDLER ` + - MPI-2.0 (1996) + - MPI-3.0 (2012) + + * - :ref:`MPI_ERRHANDLER_SET ` + - :ref:`MPI_COMM_SET_ERRHANDLER ` + - MPI-2.0 (1996) + - MPI-3.0 (2012) + + * - :ref:`MPI_TYPE_EXTENT ` + - :ref:`MPI_TYPE_GET_EXTENT ` + - MPI-2.0 (1996) + - MPI-3.0 (2012) + + * - :ref:`MPI_TYPE_HINDEXED ` + - :ref:`MPI_TYPE_CREATE_HINDEXED ` + - MPI-2.0 (1996) + - MPI-3.0 (2012) + + * - :ref:`MPI_TYPE_HVECTOR ` + - :ref:`MPI_TYPE_CREATE_HVECTOR ` + - MPI-2.0 (1996) + - MPI-3.0 (2012) + + * - :ref:`MPI_TYPE_LB ` + - :ref:`MPI_TYPE_GET_EXTENT ` + - MPI-2.0 (1996) + - MPI-3.0 (2012) + + * - :ref:`MPI_TYPE_STRUCT ` + - :ref:`MPI_TYPE_CREATE_STRUCT ` + - MPI-2.0 (1996) + - MPI-3.0 (2012) + + * - :ref:`MPI_TYPE_UB ` + - :ref:`MPI_TYPE_GET_EXTENT ` + - MPI-2.0 (1996) + - MPI-3.0 (2012) + + * - :ref:`MPI_LB ` + - :ref:`MPI_TYPE_CREATE_RESIZED ` + - MPI-2.0 (1996) + - MPI-3.0 (2012) + + * - :ref:`MPI_UB ` + - :ref:`MPI_TYPE_CREATE_RESIZED ` + - MPI-2.0 (1996) + - MPI-3.0 (2012) + + * - :ref:`MPI_COMBINED_HINDEXED_INTEGER ` + - :ref:`MPI_COMBINER_HINDEXED ` + - MPI-2.0 (1996) + - MPI-3.0 (2012) + + * - :ref:`MPI_COMBINED_HVECTOR_INTEGER ` + - :ref:`MPI_COMBINER_HVECTOR ` + - MPI-2.0 (1996) + - MPI-3.0 (2012) + + * - :ref:`MPI_COMBINED_STRUCT_INTEGER ` + - :ref:`MPI_COMBINER_STRUCT ` + - MPI-2.0 (1996) + - MPI-3.0 (2012) + + * - :ref:`MPI_HANDLER_FUNCTION ` + - :ref:`MPI_COMM_ERRHANDLER_FUNCTION ` + - MPI-2.0 (1996) + - MPI-3.0 (2012) + +Although these symbols are no longer prototyped in ``mpi.h``, *they are +still present in the MPI library in Open MPI v4.0.x*. This enables +legacy MPI applications to *link and run* successfully with Open MPI +v4.0.x, even though they will fail to *compile*. + +.. warning:: The Open MPI team **strongly** encourages all + MPI application developers to stop using these constructs that were + first deprecated over 20 years ago, and finally removed from the MPI + specification in MPI-3.0 (in 2012). + +The FAQ items in this category +show how to update your application to stop using these removed +symbols. + +All that being said, if you are unable to immediately update your +application to stop using these removed MPI-1 symbols, you can +re-enable them in ``mpi.h`` by configuring Open MPI with the +``--enable-mpi1-compatibility`` flag. + +.. note:: Future releases of Open MPI may remove these symbols + altogether. + +Why on earth are you breaking the compilation of MPI applications? +------------------------------------------------------------------ + +.. error:: **TODO This section needs to be renamed/updated (or + deleted?) for the 5.0.0 behavior.** + +The Open MPI developer community decided to take a first step of +removing the prototypes for these symbols from ``mpi.h`` starting with +the Open MPI v4.0.x series for the following reasons: + +#. These symbols have been deprecated since *1996.* It's time to start + raising awareness for developers who are inadvertently still using + these removed symbols. +#. The MPI Forum removed these symbols from the MPI-3.0 specification + in 2012. This is a sign that the Forum itself recognizes that + these removed symbols are no longer needed. +#. Note that Open MPI *did not fully remove* these removed symbols: we + just made it slightly more painful to get to them. This is an + attempt to raise awareness so that MPI application developers can + update their applications (it's easy!). + +In short: the only way to finally be able to remove these removed +symbols from Open MPI someday is to have a "grace period" where the +MPI application developers are a) made aware that they are using +removed symbols, and b) educated how to update their applications. + +We, the Open MPI developers, recognize that your MPI application +failing to compile with Open MPI may be a nasty surprise. We +apologize for that. + +Our intent is simply to use this minor shock to raise awareness and +use it as an educational opportunity to show you how to update your +application (or direct your friendly neighborhood MPI application +developer to this FAQ) to stop using these removed MPI symbols. + +Thank you! + +.. _label-mpi-address: + +Stop using MPI_ADDRESS +---------------------- + +In C, the only thing that changed was the function name: +``MPI_Address()`` |rarrow| ``MPI_Get_address()``. Nothing else needs +to change: + +.. code-block:: c++ + + char buffer[30]; + MPI_Aint address; + + // Old way + MPI_Address(buffer, &address); + + // New way + MPI_Get_address(buffer, &address); + +In Fortran, the type of the parameter changed from ``INTEGER`` +$right_arrow ``INTEGER(KIND=MPI_ADDRESS_KIND)`` so that it can hold +larger values (e.g., 64 bit pointers): + +.. code-block:: Fortran + + USE mpi + REAL buffer + INTEGER ierror + INTEGER old_address + INTEGER(KIND = MPI_ADDRESS_KIND) new_address + + ! Old way + CALL MPI_ADDRESS(buffer, old_address, ierror) + + ! New way + CALL MPI_GET_ADDRESS(buffer, new_address, ierror) + +.. _label-mpi-errhandler-create: + +Stop using MPI_ERRHANDLER_CREATE +-------------------------------- + +In C, *effectively* the only thing that changed was the name of the +function: ``MPI_Errhandler_create()`` |rarrow| +``MPI_Comm_create_errhandler()``. + +*Technically*, the type of the first parameter also changed +(``MPI_Handler_function`` |rarrow| ``MPI_Comm_errhandler_function``), +but most applications do not use this type directly and may not even +notice the change. + +.. code-block:: c++ + + void my_errhandler_function(MPI_Comm *comm, int *code, ...) + { + // Do something useful to handle the error + } + + void some_function(void) + { + MPI_Errhandler my_handler; + + // Old way + MPI_Errhandler_create(my_errhandler_function, &my_handler); + + // New way + MPI_Comm_create_errhandler(my_errhandler_function, &my_handler); + } + +In Fortran, only the subroutine name changed: +``MPI_ERRHANDLER_CREATE`` |rarrow| ``MPI_COMM_CREATE_ERRHANDLER``. + +.. code-block:: Fortran + + USE mpi + EXTERNAL my_errhandler_function + INTEGER ierror + INTEGER my_handler + + ! Old way + CALL MPI_ERRHANDLER_CREATE(my_errhandler_function, my_handler, ierror) + + ! Old way + CALL MPI_COMM_CREATE_ERRHANDLER(my_errhandler_function, my_handler, ierror) + +.. _label-mpi-errhandler-get: + +Stop using MPI_ERRHANDLER_GET +----------------------------- + +In both C and Fortran, the only thing that changed with regards to +``MPI_ERRHANDLER_GET`` is the name: ``MPI_ERRHANDLER_GET`` |rarrow| +``MPI_COMM_GET_ERRHANDLER``. + +All parameter types stayed the same. + +.. _label-mpi-errhandler-set: + +Stop using MPI_ERRHANDLER_SET +----------------------------- + +In both C and Fortran, the only thing that changed with regards to +``MPI_ERRHANDLER_SET`` is the name: ``MPI_ERRHANDLER_SET`` |rarrow| +``MPI_COMM_SET_ERRHANDLER``. + +All parameter types stayed the same. + +.. _label-mpi-type-hindexed: + +Stop using MPI_TYPE_HINDEXED +---------------------------- + +In both C and Fortran, *effectively* the only change is the name of +the function: ``MPI_TYPE_HINDEXED`` |rarrow| +``MPI_TYPE_CREATE_HINDEXED``. + +In C, the new function also has a ``const`` attribute on the two array +parameters, but most applications won't notice the difference. + +All other parameter types stayed the same. + +.. code-block:: c++ + + int count = 2; + int block_lengths[] = { 1, 2 }; + MPI_Aint displacements[] = { 0, sizeof(int) }; + MPI_Datatype newtype; + + // Old way + MPI_Type_hindexed(count, block_lengths, displacements, MPI_INT, &newtype); + + // New way + MPI_Type_create_hindexed(count, block_lengths, displacements, MPI_INT, &newtype); + +.. _label-mpi-type-hvector: + +Stop using MPI_TYPE_HVECTOR +--------------------------- + +In both C and Fortran, the only change is the name of the function: +``MPI_TYPE_HVECTOR`` |rarrow| ``MPI_TYPE_CREATE_HVECTOR``. + +All parameter types stayed the same. + +.. _label-mpi-type-struct: + +Stop using MPI_TYPE_STRUCT +-------------------------- + +In both C and Fortran, *effectively* the only change is the name of +the function: ``MPI_TYPE_STRUCT`` |rarrow| ``MPI_TYPE_CREATE_STRUCT``. + +In C, the new function also has a ``const`` attribute on the three +array parameters, but most applications won't notice the difference. + +All other parameter types stayed the same. + +.. code-block:: c++ + + int count = 2; + int block_lengths[] = { 1, 2 }; + MPI_Aint displacements[] = { 0, sizeof(int) }; + MPI_Datatype datatypes[] = { MPI_INT, MPI_DOUBLE }; + MPI_Datatype newtype; + + // Old way + MPI_Type_struct(count, block_lengths, displacements, datatypes, &newtype); + + // New way + MPI_Type_create_struct(count, block_lengths, displacements, datatypes, &newtype); + +.. _label-mpi-type-extent: + +Stop using MPI_TYPE_EXTENT +-------------------------- + +In both C and Fortran, the ``MPI_TYPE_EXTENT`` function is superseded +by the slightly-different ``MPI_TYPE_GET_EXTENT`` function: the new +function also returns the lower bound. + +.. code-block:: c++ + + MPI_Aint lb; + MPI_Aint extent; + + // Old way + MPI_Type_extent(MPI_INT, &extent); + + // New way + MPI_Type_get_extent(MPI_INT, &lb, &extent); + +.. _label-mpi-type-lb: + +Stop using MPI_TYPE_LB +---------------------- + +In both C and Fortran, the ``MPI_TYPE_LB`` function is superseded by +the slightly-different ``MPI_TYPE_GET_EXTENT`` function: the new +function also returns the extent. + +.. code-block:: c++ + + MPI_Aint lb; + MPI_Aint extent; + + // Old way + MPI_Type_lb(MPI_INT, &lb); + + // New way + MPI_Type_get_extent(MPI_INT, &lb, &extent); + +.. _label-mpi-type-ub: + +Stop using MPI_TYPE_UB +---------------------- + +In both C and Fortran, the ``MPI_TYPE_UB`` function is superseded by +the slightly-different ``MPI_TYPE_GET_EXTENT`` function: the new +function returns the lower bound and the extent, which can be used to +compute the upper bound. + +.. code-block:: c++ + + MPI_Aint lb, ub; + MPI_Aint extent; + + // Old way + MPI_Type_ub(MPI_INT, &ub); + + // New way + MPI_Type_get_extent(MPI_INT, &lb, &extent); + ub = lb + extent + +Note the ``ub`` calculation after calling ``MPI_Type_get_extent()``. + +.. _label-mpi-lb-ub: + +Stop using MPI_LB / MPI_UB +-------------------------- + +The ``MPI_LB`` and ``MPI_UB`` positional markers were fully replaced +with ``MPI_TYPE_CREATE_RESIZED`` in MPI-2.0. + +Prior to MPI-2.0, ``MPI_UB`` and ``MPI_LB`` were intended to be used +as input to ``MPI_TYPE_STRUCT`` (which, itself, has been deprecated +and renamed to ``MPI_TYPE_CREATE_STRUCT``). The same end effect can +now be achieved with ``MPI_TYPE_CREATE_RESIZED``. For example, using +the old method: + +.. code-block:: c++ + + int count = 3; + int block_lengths[] = { 1, 1, 1 }; + MPI_Aint displacements[] = { -2, 0, 10 }; + MPI_Datatype datatypes[] = { MPI_LB, MPI_INT, MPI_UB }; + MPI_Datatype newtype; + + MPI_Type_struct(count, block_lengths, displacements, datatypes, &newtype); + MPI_Type_commit(&newtype); + + MPI_Aint ub, lb, extent; + MPI_Type_lb(newtype, &lb); + MPI_Type_ub(newtype, &ub); + MPI_Type_extent(newtype, &extent); + printf("OLD: LB=%d, UB=%d, extent=%d\n", + lb, ub, extent); + +If we run the above, we get an output of: + +.. code-block:: + + OLD: LB=-2, UB=10, extent=12 + +The ``MPI_TYPE_RESIZED`` function allows us to take any arbitrary +datatype and set the lower bound and extent directly (which indirectly +sets the upper bound), without needing to setup the arrays and +computing the displacements necessary to invoke +``MPI_TYPE_CREATE_STRUCT``. + +Aside from the ``printf`` statement, the following example is exactly +equivalent to the prior example (:ref:`see the MPI_TYPE_UB section +` for a mapping of ``MPI_TYPE_UB`` to +``MPI_TYPE_GET_EXTENT``): + +.. code-block:: c++ + + MPI_Datatype newtype; + + MPI_Type_create_resized(MPI_INT, -2, 12, &newtype); + MPI_Type_commit(&newtype); + + MPI_Aint ub, lb, extent; + MPI_Type_get_extent(newtype, &lb, &extent); + ub = lb + extent; + printf("NEW: LB=%d, UB=%d, extent=%d\n", + lb, ub, extent); + +If we run the above, we get an output of: + +.. code-block:: + + NEW: LB=-2, UB=10, extent=12 + +.. _label-mpi-combiner-fortran-integers: + +Stop using MPI_COMBINER_HINDEXED_INTEGER, MPI_COMBINER_HVECTOR_INTEGER, and MPI_COMBINER_STRUCT_INTEGER +------------------------------------------------------------------------------------------------------- + +The ``MPI_COMBINER_HINDEXED_INTEGER``, +``MPI_COMBINER_HVECTOR_INTEGER``, and ``MPI_COMBINER_STRUCT_INTEGER`` +constants could previously be returned from ``MPI_TYPE_GET_ENVELOPE``. + +Starting with MPI-3.0, these values will never be returned. Instead, +they will just return the same names, but without the ``_INTEGER`` +suffix. Specifically: + +* ``MPI_COMBINER_HINDEXED_INTEGER`` |rarrow| ``MPI_COMBINER_HINDEXED`` +* ``MPI_COMBINER_HVECTOR_INTEGER`` |rarrow| ``MPI_COMBINER_HVECTOR`` +* ``MPI_COMBINER_STRUCT_INTEGER`` |rarrow| ``MPI_COMBINER_STRUCT`` + +If your Fortran code is using any of the ``_INTEGER``-suffixed names, +you can just delete the ``_INTEGER`` suffix. + +.. _label-mpi-handler-function: + +Stop using MPI_Handler_function +------------------------------- + +The ``MPI_Handler_function`` C type is only used in the +deprecated/removed function ``MPI_Errhandler_create()``, as described +:ref:`in the MPI_ERRHANDLER_CREATE section +`. + +Most MPI applications likely won't use this type at all. But if they +do, they can simply use the new, exactly-equivalent type name (i.e., +the return type, number, and type of parameters didn't change): +``MPI_Comm_errhandler_function``. + +.. code-block:: c++ + + void my_errhandler_function(MPI_Comm *comm, int *code, ...) + { + // Do something useful to handle the error + } + + void some_function(void) + { + // Old way + MPI_Handler_function *old_ptr = my_errhandler_function; + + // New way + MPI_Comm_errhandler_function *new_ptr = my_errhandler_function; + } + +The ``MPI_Handler_function`` type isn't used at all in the Fortran +bindings. diff --git a/docs/conf.py b/docs/conf.py new file mode 100644 index 00000000000..54abe622f2d --- /dev/null +++ b/docs/conf.py @@ -0,0 +1,129 @@ +# Configuration file for the Sphinx documentation builder. +# +# This file only contains a selection of the most common options. For a full +# list see the documentation: +# https://www.sphinx-doc.org/en/master/usage/configuration.html + +# -- Path setup -------------------------------------------------------------- + +# If extensions (or modules to document with autodoc) are in another directory, +# add these directories to sys.path here. If the directory is relative to the +# documentation root, use os.path.abspath to make it absolute, like shown here. +# +# import os +# import sys +# sys.path.insert(0, os.path.abspath('.')) + +# -- Project information ----------------------------------------------------- + +import datetime +year = datetime.datetime.now().year + +project = 'Open MPI' +copyright = f'2003-{year}, The Open MPI Community' +author = 'The Open MPI Community' + +# The full version, including alpha/beta/rc tags +# Read the Open MPI version from the VERSION file +with open("../VERSION") as fp: + ompi_lines = fp.readlines() + +ompi_data = dict() +for ompi_line in ompi_lines: + if '#' in ompi_line: + ompi_line, _ = ompi_line.split("#") + ompi_line = ompi_line.strip() + + if '=' not in ompi_line: + continue + + ompi_key, ompi_val = ompi_line.split("=") + ompi_data[ompi_key.strip()] = ompi_val.strip() + +# "release" is a sphinx config variable -- assign it to the computed +# Open MPI version number. +series = f"{ompi_data['major']}.{ompi_data['minor']}.x" +release = f"{ompi_data['major']}.{ompi_data['minor']}.{ompi_data['release']}{ompi_data['greek']}" + + +# -- General configuration --------------------------------------------------- + +# Add any Sphinx extension module names here, as strings. They can be +# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom +# ones. +import sphinx_rtd_theme +extensions = ['recommonmark', "sphinx_rtd_theme"] + +# Add any paths that contain templates here, relative to this directory. +templates_path = ['_templates'] + +# List of patterns, relative to source directory, that match files and +# directories to ignore when looking for source files. +# This pattern also affects html_static_path and html_extra_path. +exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store', 'venv', 'py*/**'] + + +# -- Options for HTML output ------------------------------------------------- + +# The theme to use for HTML and HTML Help pages. See the documentation for +# a list of builtin themes. +# +html_theme = 'sphinx_rtd_theme' + +# Add any paths that contain custom static files (such as style sheets) here, +# relative to this directory. They are copied after the builtin static files, +# so a file named "default.css" will overwrite the builtin "default.css". +#html_static_path = ['_static'] + +# -- Options for MAN output ------------------------------------------------- + +import os +import re + +# Dynamically find all the man pages and build the appropriate list of +# tuples so that we don't have to manually maintain it. + +def find_man_pages_top(): + def _doit(topdir): + for root, dirs, files in os.walk(topdir): + for filename in files: + # Parse filenames of the format a "foo.X.rst" + parts = re.search("^([^/]+?)\.([0-9]+)\.rst$", filename) + + # Skip files that do not match that format (e.g., + # "index.rst") + if parts is None: + continue + + base_name = parts.group(1) + section = int(parts.group(2)) + + full_filename_without_rst = f'{root}/{base_name}.{section}' + + # Append a tuple: (filename, name, description, authors, section) + # We leave description blank. + # We also leave authors blank, because "The Open MPI community" + # already shows up in the copyright section. + tuple = (full_filename_without_rst, base_name, '', '', section) + tuples.append(tuple) + + tuples = list() + _doit("man-openmpi") + _doit("man-openshmem") + + return tuples + +man_pages = find_man_pages_top() + +# -- Open MPI-specific options ----------------------------------------------- + +# This prolog is included in every file. Put common stuff here. + +rst_prolog = f""" +.. |mdash| unicode:: U+02014 .. Em dash +.. |rarrow| unicode:: U+02192 .. Right arrow + +.. |year| replace:: {year} +.. |ompi_ver| replace:: v{release} +.. |ompi_series| replace:: v{series} +""" diff --git a/docs/contributing.rst b/docs/contributing.rst new file mode 100644 index 00000000000..a4054a7a572 --- /dev/null +++ b/docs/contributing.rst @@ -0,0 +1,97 @@ +Contributing to Open MPI +======================== + +There are many ways to contribute. Here are a few: + +#. Subscribe to `the mailing lists + `_ and become + active in the discussions. +#. Obtain `a Git clone `_ of Open + MPI's code base and start looking through the code. + + .. note:: Be sure to see the :doc:`Developers guide + ` for technical details about the code + base and how to build it). + +#. Write your own components and contribute them back to the main code + base. +#. Contribute bug fixes and feature enhancements to the main code + base. +#. Provide testing resources: + + #. For Github Pull Request Continuous Integration (CI) + #. For nightly snapshot builds and testing + + +.. _contributing-open-source-label: + +Open source contributions +------------------------- + +All code contributions are submitted as pull requests on the `Open MPI +GitHub repository `_. + +We need to have an established intellectual property pedigree of the +code in Open MPI. This means being able to ensure that all code +included in Open MPI is free, open source, and able to be distributed +under :doc:`the BSD license `. + +Open MPI has therefore adopted requirements based on the signed-off-by +process as described in the `Submitting patches +`_ +section of the Linux kernel documentation. Each proposed contribution +to the Open MPI code base must include the text "Signed-off-by:" +followed by the contributor's name and email address. This is a +developer's certification that he or she has the right to submit the +patch for inclusion into the project, and indicates agreement to the +Developer's Certificate of Origin: + + By making a contribution to this project, I certify that: + + #. The contribution was created in whole or in part by me and I + have the right to submit it under the Open MPI open source + license; or + + #. The contribution is based upon previous work that, to the best + of my knowledge, is covered under an appropriate open source + license and I have the right under that license to submit that + work with modifications, whether created in whole or in part by + me, under the Open MPI open source license (unless I am + permitted to submit under a different license); or + + #. The contribution was provided directly to me by some other + person who certified (1) or (2) and I have not modified it. + + #. I understand and agree that this project and the contribution + are public and that a record of the contribution (including all + personal information I submit with it, including my sign-off) is + maintained indefinitely and may be redistributed consistent with + this project and the open source license(s) involved. + +Proposed contributions failing to include the "Signed-off-by:" +certification will not be accepted into any Open MPI code +repository. The community reserves the right to revert any commit +inadvertently made without the required certification. + +This policy prevents a situation where intellectual property gets into +the Open MPI code base and then someone later claims that we owe them +money for it. Open MPI is a free, open source code base. We intend +it to remain that way. + + +Closed source contributions +--------------------------- + +While we are creating free / open-source software, and we would prefer +if everyone's contributions to Open MPI were also free / open-source, +we certainly recognize that other organizations have different goals +from us. Such is the reality of software development in today's +global economy. + +As such, it is perfectly acceptable to make non-free / non-open-source +contributions to Open MPI. + +We obviously cannot accept such contributions into the main code base, +but you are free to distribute plugins, enhancements, etc. as you see +fit. Indeed, the :doc:`the BSD license ` is extremely +liberal in its redistribution provisions. diff --git a/docs/developers/autogen.rst b/docs/developers/autogen.rst new file mode 100644 index 00000000000..27ac8431f75 --- /dev/null +++ b/docs/developers/autogen.rst @@ -0,0 +1,32 @@ +Running ``autogen.pl`` +====================== + +You can now run OMPI's top-level ``autogen.pl`` script. This script +will invoke the GNU Autoconf, Automake, and Libtool commands in the +proper order and do a bunch of component discovery and housekeeping to +setup to run OMPI's top-level ``configure`` script. + +Running ``autogen.pl`` may take a few minutes, depending on your +system. It's not very exciting to watch. + +If you have a multi-processor system, enabling the multi-threaded +behavior in Automake 1.11 (or newer) can result in ``autogen.pl`` +running faster. Do this by setting the ``AUTOMAKE_JOBS`` environment +variable to the number of processors (threads) that you want it to use +before invoking ``autogen.pl``. For example (you can put this in your +shell startup files):: + + # For bash/sh/zsh: + export AUTOMAKE_JOBS=4 + + # For csh/tcsh: + set AUTOMAKE_JOBS 4 + +.. important:: You generally need to run ``autogen.pl`` whenever the + top-level file ``configure.ac`` changes, or any files in the + ``config/`` or ``/config/`` directories change (these + directories are where a lot of "include" files for Open MPI's + ``configure`` script live). + +.. note:: You do *NOT* need to re-run ``autogen.pl`` if you modify a + ``Makefile.am``. diff --git a/docs/developers/building-open-mpi.rst b/docs/developers/building-open-mpi.rst new file mode 100644 index 00000000000..ee09d294e20 --- /dev/null +++ b/docs/developers/building-open-mpi.rst @@ -0,0 +1,9 @@ +Building Open MPI +================= + +Once you have run ``autogen.pl`` successfully, you can configure and +build Open MPI just like end users do with official distribution Open +MPI tarballs. + +See the :doc:`general "Install Open MPI" documentation for more +details. ` diff --git a/docs/developers/compiler-pickyness.rst b/docs/developers/compiler-pickyness.rst new file mode 100644 index 00000000000..bab9db0900d --- /dev/null +++ b/docs/developers/compiler-pickyness.rst @@ -0,0 +1,29 @@ +Compiler Pickyness by Default +============================= + +If you are building Open MPI from a Git clone (i.e., there is a +``.git`` directory in your build tree), the default build includes +extra compiler pickyness, which will result in more compiler warnings +than in non-developer builds. Getting these extra compiler warnings +is helpful to Open MPI developers in making the code base as clean as +possible. + +Developers can disable this picky-by-default behavior by using the +``--disable-picky`` configure option. Also note that extra-picky +compiles do *not* happen automatically when you do a VPATH build +(e.g., if ``.git`` is in your source tree, but not in your build +tree). + +Prior versions of Open MPI would automatically activate a lot of +(performance-reducing) debugging code by default if ``.git`` was found +in your build tree. This is no longer true. You can manually enable +these (performance-reducing) debugging features in the Open MPI code +base with these configure options: + +* ``--enable-debug`` +* ``--enable-mem-debug`` +* ``--enable-mem-profile`` + +.. note:: These options are really only relevant to those who are + developing Open MPI itself. They are not generally helpful for + debugging general MPI applications. diff --git a/docs/developers/frameworks.rst b/docs/developers/frameworks.rst new file mode 100644 index 00000000000..bc181d0bc82 --- /dev/null +++ b/docs/developers/frameworks.rst @@ -0,0 +1,218 @@ +.. _label-frameworks: + +Internal frameworks +=================== + +The Modular Component Architecture (MCA) is the backbone of Open MPI +-- most services and functionality are implemented through MCA +components. + +MPI layer frameworks +-------------------- + +Here is a list of all the component frameworks in the MPI layer of +Open MPI: + +* ``bml``: BTL management layer +* ``coll``: MPI collective algorithms +* ``fbtl``: point to point file byte transfer layer: abstraction for + individual read: collective read and write operations for MPI I/O +* ``fcoll``: collective file system functions for MPI I/O +* ``fs``: file system functions for MPI I/O +* ``hook``: Generic hooks into Open MPI +* ``io``: MPI I/O +* ``mtl``: Matching transport layer, used for MPI point-to-point + messages on some types of networks +* ``op``: Back end computations for intrinsic MPI_Op operators +* ``osc``: MPI one-sided communications +* ``pml``: MPI point-to-point management layer +* ``sharedfp``: shared file pointer operations for MPI I/O +* ``topo``: MPI topology routines +* ``vprotocol``: Protocols for the "v" PML + +OpenSHMEM component frameworks +------------------------------ + +* ``atomic``: OpenSHMEM atomic operations +* ``memheap``: OpenSHMEM memory allocators that support the + PGAS memory model +* ``scoll``: OpenSHMEM collective operations +* ``spml``: OpenSHMEM "pml-like" layer: supports one-sided, + point-to-point operations +* ``sshmem``: OpenSHMEM shared memory backing facility + +Miscellaneous frameworks +------------------------ + +* ``allocator``: Memory allocator +* ``backtrace``: Debugging call stack backtrace support +* ``btl``: Point-to-point Byte Transfer Layer +* ``dl``: Dynamic loading library interface +* ``hwloc``: Hardware locality (hwloc) versioning support +* ``if``: OS IP interface support +* ``installdirs``: Installation directory relocation services +* ``memchecker``: Run-time memory checking +* ``memcpy``: Memory copy support +* ``memory``: Memory management hooks +* ``mpool``: Memory pooling +* ``patcher``: Symbol patcher hooks +* ``pmix``: Process management interface (exascale) +* ``rcache``: Memory registration cache +* ``reachable``: Network reachability determination +* ``shmem``: Shared memory support (NOT related to OpenSHMEM) +* ``smsc``: Shared memory single-copy support +* ``threads``: OS and userspace thread support +* ``timer``: High-resolution timers + +Framework notes +--------------- + +Each framework typically has one or more components that are used at +run-time. For example, the ``btl`` framework is used by the MPI layer +to send bytes across different types underlying networks. The ``tcp`` +``btl``, for example, sends messages across TCP-based networks; the +``ucx`` ``pml`` sends messages across InfiniBand-based networks. + +Each component typically has some tunable parameters that can be +changed at run-time. Use the ``ompi_info`` command to check a component +to see what its tunable parameters are. For example: + +.. code-block:: sh + + shell$ ompi_info --param btl tcp + +shows some of the parameters (and default values) for the ``tcp`` ``btl`` +component (use ``--level`` to show *all* the parameters; see below). + +Note that ``ompi_info`` only shows a small number a component's MCA +parameters by default. Each MCA parameter has a "level" value from 1 +to 9, corresponding to the MPI-3 MPI_T tool interface levels. In Open +MPI, we have interpreted these nine levels as three groups of three: + +#. End user / basic +#. End user / detailed +#. End user / all +#. Application tuner / basic +#. Application tuner / detailed +#. Application tuner / all +#. MPI/OpenSHMEM developer / basic +#. MPI/OpenSHMEM developer / detailed +#. MPI/OpenSHMEM developer / all + +Here's how the three sub-groups are defined: + +#. End user: Generally, these are parameters that are required for + correctness, meaning that someone may need to set these just to + get their MPI/OpenSHMEM application to run correctly. +#. Application tuner: Generally, these are parameters that can be + used to tweak MPI application performance. +#. MPI/OpenSHMEM developer: Parameters that either don't fit in the + other two, or are specifically intended for debugging / + development of Open MPI itself. + +Each sub-group is broken down into three classifications: + +#. Basic: For parameters that everyone in this category will want to + see. +#. Detailed: Parameters that are useful, but you probably won't need + to change them often. +#. All: All other parameters -- probably including some fairly + esoteric parameters. + +To see *all* available parameters for a given component, specify that +ompi_info should use level 9: + +.. code-block:: sh + + shell$ ompi_info --param btl tcp --level 9 + +.. error:: TODO The following content seems redundant with the FAQ. + Additionally, information about how to set MCA params should be + prominently documented somewhere that is easy for users to find -- + not buried here in the developer's section. + +These values can be overridden at run-time in several ways. At +run-time, the following locations are examined (in order) for new +values of parameters: + +#. ``PREFIX/etc/openmpi-mca-params.conf``: + This file is intended to set any system-wide default MCA parameter + values -- it will apply, by default, to all users who use this Open + MPI installation. The default file that is installed contains many + comments explaining its format. + +#. ``$HOME/.openmpi/mca-params.conf``: + If this file exists, it should be in the same format as + ``PREFIX/etc/openmpi-mca-params.conf``. It is intended to provide + per-user default parameter values. + +#. environment variables of the form ``OMPI_MCA_`` set equal to a + ``VALUE``: + + Where ```` is the name of the parameter. For example, set the + variable named ``OMPI_MCA_btl_tcp_frag_size`` to the value 65536 + (Bourne-style shells): + + .. code-block:: sh + + shell$ OMPI_MCA_btl_tcp_frag_size=65536 + shell$ export OMPI_MCA_btl_tcp_frag_size + + .. error:: TODO Do we need content here about PMIx and PRTE env vars? + +#. the ``mpirun``/``oshrun`` command line: ``--mca NAME VALUE`` + + Where ```` is the name of the parameter. For example: + + .. code-block:: sh + + shell$ mpirun --mca btl_tcp_frag_size 65536 -np 2 hello_world_mpi + + .. error:: TODO Do we need content here about PMIx and PRTE MCA vars + and corresponding command line switches? + +These locations are checked in order. For example, a parameter value +passed on the ``mpirun`` command line will override an environment +variable; an environment variable will override the system-wide +defaults. + +Each component typically activates itself when relevant. For example, +the usNIC component will detect that usNIC devices are present and +will automatically be used for MPI communications. The Slurm +component will automatically detect when running inside a Slurm job +and activate itself. And so on. + +Components can be manually activated or deactivated if necessary, of +course. The most common components that are manually activated, +deactivated, or tuned are the ``btl`` components -- components that are +used for MPI point-to-point communications on many types common +networks. + +For example, to *only* activate the ``tcp`` and ``self`` (process loopback) +components are used for MPI communications, specify them in a +comma-delimited list to the ``btl`` MCA parameter: + +.. code-block:: sh + + shell$ mpirun --mca btl tcp,self hello_world_mpi + +To add shared memory support, add ``sm`` into the command-delimited list +(list order does not matter): + +.. code-block:: sh + + shell$ mpirun --mca btl tcp,sm,self hello_world_mpi + +.. note:: There used to be a ``vader`` ``btl`` component for shared + memory support; it was renamed to ``sm`` in Open MPI v5.0.0, + but the alias ``vader`` still works as well. + +To specifically deactivate a specific component, the comma-delimited +list can be prepended with a ``^`` to negate it: + +.. code-block:: sh + + shell$ mpirun --mca btl ^tcp hello_mpi_world + +The above command will use any other ``btl`` component other than the +``tcp`` component. diff --git a/docs/developers/git-clone.rst b/docs/developers/git-clone.rst new file mode 100644 index 00000000000..ccb79abd9ba --- /dev/null +++ b/docs/developers/git-clone.rst @@ -0,0 +1,20 @@ +Obtaining a Git clone +===================== + +Open MPI's Git repositories are `hosted at GitHub +`_. + +#. First, you will need a Git client. We recommend getting the latest + version available. If you do not have the command ``git`` in your + path, you will likely need to download and install Git. +#. `ompi `_ is the main Open MPI + repository where most active development is done. Git clone this + repository. Note that the use of the ``--recursive`` CLI option is + necessary because Open MPI uses Git submodules:: + + shell$ git clone --recursive https://github.com/open-mpi/ompi.git + +Note that Git is natively capable of using many forms of web +proxies. If your network setup requires the user of a web proxy, +`consult the Git documentation for more details +`_. diff --git a/docs/developers/gnu-autotools.rst b/docs/developers/gnu-autotools.rst new file mode 100644 index 00000000000..7d519c03c02 --- /dev/null +++ b/docs/developers/gnu-autotools.rst @@ -0,0 +1,312 @@ +.. _developers-installing-autotools-label: + +Manually installing the GNU Autootools +====================================== + +There is enough detail in building the GNU Autotools that it warrants +its own section. + +.. note:: As noted above, you only need to read/care about this + section if you are building Open MPI from a Git clone. End + users installing an Open MPI distribution tarball do *not* + need to have the GNU Autotools installed. + +Use a package manager +--------------------- + +It is recommended that you use your Linux distribution's package +manager, or Homebrew or MacPorts on macOS to install recent versions +of GNU Autoconf, Automake, and Libtool. + +If you cannot, keep reading in this section to see how to build and +install these applications manually (i.e., download the source from +the internet and build/install it yourself). + +Autotools versions +------------------ + +The following tools are required for developers to compile Open MPI +from its repository sources (users who download Open MPI tarballs do +not need these tools - they are only required for developers working +on the internals of Open MPI itself): + +.. list-table:: + :header-rows: 1 + + * - Software package + - Notes + - URL + + * - GNU m4 + - See version chart below + - https://ftp.gnu.org/gnu/m4/ + * - GNU Autoconf + - See version chart below + - https://ftp.gnu.org/gnu/autoconf/ + * - GNU Automake + - See version chart below + - https://ftp.gnu.org/gnu/automake/ + * - GNU Libtool + - See version chart below + - https://ftp.gnu.org/gnu/libtool/ + +The table below lists the versions that are used to make nightly +snapshot and official release Open MPI tarballs. Other versions of the +tools *may* work for some (but almost certainly not all) platforms; +the ones listed below are the versions that we know work across an +extremely wide variety of platforms and environments. + +To strengthen the above point: the core Open MPI developers typically +use very, very recent versions of the GNU tools. There are known bugs +in older versions of the GNU tools that Open MPI no longer compensates +for (it seemed senseless to indefinitely support patches for ancient +versions of Autoconf, for example). + +.. warning:: You **will** have problems if you do not use recent + versions of the GNU Autotools. + +That being said, ``autogen.pl`` and ``configure.ac`` scripts tend to +be a bit lenient and enforce slightly older minimum versions than the +ones listed below. This is because such older versions still make +usable Open MPI builds on many platforms - especially Linux on x86_64 +with GNU compilers - and are convenient for developers whose Linux +distribution may not have as recent as the versions listed below (but are +recent enough to produce a working version for their platform). + +To be clear: the versions listed below are required to support a wide +variety of platforms and environments, and are used to make nightly +and official release tarballs. When building Open MPI, YMMV when using +versions older than those listed below |mdash| especially if you are +not building on Linux x86_64 with the GNU compilers. + +Using older versions is unsupported. If you run into problems, upgrade +to at least the versions listed below. + +.. note:: You may need to scroll right in the following table. + +.. list-table:: + :header-rows: 1 + + * - Open MPI + - M4 + - Autoconf + - Automake + - Libtool + - Flex + - Sphinx + + * - v1.0.x + - NA + - 2.58 - 2.59 + - 1.7 - 1.9.6 + - 1.5.16 - 1.5.22 + - 2.5.4 + - NA + * - v1.1.x + - NA + - 2.59 + - 1.9.6 + - 1.5.16 - 1.5.22 + - 2.5.4 + - NA + * - v1.2.x + - NA + - 2.59 + - 1.9.6 + - 1.5.22 - 2.1a + - 2.5.4 + - NA + * - v1.3.x + - 1.4.11 + - 2.63 + - 1.10.1 + - 2.2.6b + - 2.5.4 + - NA + * - v1.4.x + - 1.4.11 + - 2.63 + - 1.10.3 + - 2.2.6b + - 2.5.4 + - NA + * - v1.5.x for x=0-4 + - 1.4.13 + - 2.65 + - 1.11.1 + - 2.2.6b + - 2.5.4 + - NA + * - v1.5.x for x>=5 + - 1.4.16 + - 2.68 + - 1.11.3 + - 2.4.2 + - 2.5.35 + - NA + * - v1.6.x + - 1.4.16 + - 2.68 + - 1.11.3 + - 2.4.2 + - 2.5.35 + - NA + * - v1.7.x + - 1.4.16 + - 2.69 + - 1.12.2 + - 2.4.2 + - 2.5.35 + - NA + * - v1.8.x + - 1.4.16 + - 2.69 + - 1.12.2 + - 2.4.2 + - 2.5.35 + - NA + * - v1.10.x + - 1.4.16 + - 2.69 + - 1.12.2 + - 2.4.2 + - 2.5.35 + - NA + * - v2.0.x through v4.y + - 1.4.17 + - 2.69 + - 1.15 + - 2.4.6 + - 2.5.35 + - NA + * - v5.0.x + - 1.4.17 + - 2.69 + - 1.15 + - 2.4.6 + - 2.5.35 + - 4.2.0 + * - Git master + - 1.4.17 + - 2.69 + - 1.15 + - 2.4.6 + - 2.5.35 + - 4.2.0 + +Checking your versions +---------------------- + +You can check what versions of the Autotools you have installed with +the following: + +.. code-block:: sh + + shell$ m4 --version + shell$ autoconf --version + shell$ automake --version + shell$ libtoolize --version + +Installing the GNU Autotools from source +---------------------------------------- + +.. note:: Most operating system packaging systems (to include Homebrew + and MacPorts on MacOS) install recent-enough versions of the + GNU Autotools. You should generally only install the GNU + Autotools manually if you can't use your operating system + packaging system to install them for you. + +The GNU Autotools sources can be can be downloaded from: + +* https://ftp.gnu.org/gnu/autoconf/ +* https://ftp.gnu.org/gnu/automake/ +* https://ftp.gnu.org/gnu/libtool/ +* And if you need it: https://ftp.gnu.org/gnu/m4/ + +It is certainly easiest to download/build/install all four of these +tools together. But note that Open MPI has no specific m4 +requirements; it is only listed here because Autoconf requires minimum +versions of GNU m4. Hence, you may or may not *need* to actually +install a new version of GNU m4. That being said, if you are confused +or don't know, just install the latest GNU m4 with the rest of the GNU +Autotools and everything will work out fine. + + +Build and install ordering +-------------------------- + +You must build and install the GNU Autotools in the following order: + +#. m4 +#. Autoconf +#. Automake +#. Libtool + +.. important:: You *must* install the last three tools (Autoconf, + Automake, Libtool) into the same prefix directory. + These three tools are somewhat inter-related, and if + they're going to be used together, they *must* share a + common installation prefix. + +You can install m4 anywhere as long as it can be found in the path; +it may be convenient to install it in the same prefix as the other +three. Or you can use any recent-enough m4 that is in your path. + +.. warning:: It is *strongly* encouraged that you do **not** install + your new versions over the OS-installed versions. This could cause + other things on your system to break. Instead, install into + ``$HOME/local``, or ``/usr/local``, or wherever else you tend to + install "local" kinds of software. + + In doing so, be sure to prefix your ``$PATH`` with the directory + where they are installed. For example, if you install into + ``$HOME/local``, you may want to edit your shell startup file + (``.bashrc``, ``.cshrc``, ``.tcshrc``, etc.) to have something + like + + .. code-block:: sh + + # For bash/sh: + export PATH=$HOME/local/bin:$PATH + # For csh/tcsh: + set path = ($HOME/local/bin $path) + + Ensure to set your ``$PATH`` *before* you configure/build/install + the four packages. + +All four packages require two simple commands to build and +install: + +.. code-block:: sh + + shell$ cd M4_DIRECTORY + shell$ ./configure --prefix=PREFIX + shell$ make all install + +.. important:: If you are using a shell that does not automatically + re-index the ``$PATH`` (e.g., the ``csh`` or ``tcsh`` + shells), be sure to run the ``rehash`` command before + you install the next package so that the executables + that were just installed can be found by the next + package. + +.. code-block:: sh + + # Make $PATH be re-indexed if necessary, e.g., via "rehash" + shell$ cd AUTOCONF_DIRECTORY + shell$ ./configure --prefix=PREFIX + shell$ make all install + +.. code-block:: sh + + # Make $PATH be re-indexed if necessary, e.g., via "rehash" + shell$ cd AUTOMAKE_DIRECTORY + shell$ ./configure --prefix=PREFIX + shell$ make all install + +.. code-block:: sh + + # Make $PATH be re-indexed if necessary, e.g., via "rehash" + shell$ cd LIBTOOL_DIRECTORY + shell$ ./configure --prefix=PREFIX + shell$ make all install diff --git a/docs/developers/index.rst b/docs/developers/index.rst new file mode 100644 index 00000000000..82e68c26b1d --- /dev/null +++ b/docs/developers/index.rst @@ -0,0 +1,24 @@ +Developer's guide +================= + +This section is here for those who are building/exploring the +internals of Open MPI in its source code form, most likely through a +developer's tree (i.e., a Git clone). + +If you are writing MPI applications that simply *use* Open MPI, you +probably don't need to read this section. + +.. toctree:: + :maxdepth: 1 + + prerequisites + git-clone + compiler-pickyness + autogen + building-open-mpi + terminology + source-code-tree-layout + frameworks + gnu-autotools + sphinx + rst-for-markdown-expats.rst diff --git a/docs/developers/prerequisites.rst b/docs/developers/prerequisites.rst new file mode 100644 index 00000000000..08b652eb09c --- /dev/null +++ b/docs/developers/prerequisites.rst @@ -0,0 +1,113 @@ +Prerequisites +============= + +Compilers +--------- + +Although it should probably be assumed, you'll need a C compiler that +supports C99. + +You'll also need a Fortran compiler if you want to build the Fortran +MPI bindings (the more recent the Fortran compiler, the better), and a +Java compiler if you want to build the (unofficial) Java MPI bindings. + +GNU Autotools +------------- + +When building Open MPI from its repository sources, the GNU Autotools +must be installed (i.e., `GNU Autoconf +`_, `GNU Automake +`_, and `GNU Libtool +`_). + +.. note:: The GNU Autotools are *not* required when building Open MPI + from distribution tarballs. Open MPI distribution tarballs + are bootstrapped such that end-users do not need to have the + GNU Autotools installed. + +You can generally install GNU Autoconf, Automake, and Libtool via your +Linux distribution native package system, or via Homebrew or MacPorts +on MacOS. This usually "just works." + +If you run into problems with the GNU Autotools, or need to download / +build them manually, see the :ref:`how to build and install GNU +Autotools section ` for much +more detail. + + +Flex +---- + +`Flex `_ is used during the +compilation of a developer's checkout (it is not used to build +official distribution tarballs). Other flavors of lex are *not* +supported: given the choice of making parsing code portable between +all flavors of lex and doing more interesting work on Open MPI, we +greatly prefer the latter. + +Note that no testing has been performed to see what the minimum +version of Flex is required by Open MPI. We suggest that you use +v2.5.35 at the earliest. + +For now, Open MPI will allow developer builds with Flex 2.5.4. This +is primarily motivated by the fact that RedHat/CentOS 5 ships with +Flex 2.5.4. It is likely that someday Open MPI developer builds will +require Flex version >=2.5.35. + +Note that the ``flex``-generated code generates some compiler warnings +on some platforms, but the warnings do not seem to be consistent or +uniform on all platforms, compilers, and flex versions. As such, we +have done little to try to remove those warnings. + +If you do not have Flex installed and cannot easily install it via +your operating system's packaging system (to include Homebrew or +MacPorts on MacOS), see `the Flex Github repository +`_. + + +Sphinx +------ + +`Sphinx `_ is used to generate both the +HTML version of the documentation (that you are reading right now) and +the nroff man pages. + +Official Open MPI distribution tarballs contain pre-built HTML +documentation and man pages. This means that -- similar to the GNU +Autotools -- end users do not need to have Sphinx installed, but will +still have both the HTML documentation and man pages installed as part +of the normal configure / build / install process. + +However, the HTML documentation and man pages are *not* stored in Open +MPI's Git repository; only the ReStructred Text source code of the +documentation is in the Git repository. Hence, if you are building +Open MPI from a Git clone, you will need Sphinx (and some Python +modules) in order to build the HTML documentation and man pages. + +.. important:: Most systems do not have Sphinx and/or the required + Python modules installed by default. :ref:`See the + Installing Sphinx section + ` for details on + how to install Sphinx and the required Python modules. + +If ``configure`` is able to find Sphinx and the required Python +modules, it will automatically generate the HTML documentation and man +pages during the normal build procedure (i.e., during ``make all``). +If ``configure`` is *not* able to find Sphinx and/or the required +Python modules, it will simply skip building the documentation. + +.. note:: If you have built/installed Open MPI from a Git clone and + unexpectedly did not have the man pages installed, it is + likely that you do not have Sphinx and/or the required + Python modules available. + + :ref:`See the Installing Sphinx section + ` for details on how + to install Sphinx and the required Python modules. + +.. important:: ``make dist`` will fail if ``configure`` did not find + Sphinx and/or the required Python modules. + Specifically: if ``make dist`` is not able to generate + the most up-to-date HTML documentation and man pages, + you cannot build a distribution tarball. **This is an + intentional design decision.** diff --git a/docs/developers/rst-for-markdown-expats.rst b/docs/developers/rst-for-markdown-expats.rst new file mode 100644 index 00000000000..c574adfced8 --- /dev/null +++ b/docs/developers/rst-for-markdown-expats.rst @@ -0,0 +1,374 @@ +.. _developers-rst-for-markdown-expats: + +ReStructured Text for those who know Markdown +============================================= + +You can think of RST as "Markdown, but much better". Meaning: + +#. RST is basically just as simple as Markdown +#. But RST is both more precise, and has more available formatting + constructs (without getting crazy complicated) + +The full Sphinx / RST documentation is available here: +https://www.sphinx-doc.org/en/master/index.html + +Translating Markdown to RST +--------------------------- + +If you're familiar with Markdown, here's some tips to get you started +in RST: + +* Whitespace and indenting + + * MD: Whitespace and indenting generally doesn't matter in most + cases. It does matter with bullets and sub bullets, but the rules + get pretty weird, and vary between different Markdown renderers. + + * RST: As noted above, **indenting matters**. A lot. Just like + Python. In general, you indent all RST text to keep it within the + same level. For example, all this text would be a single + paragraph + + **Blank lines also matter**. A lot. You use blank lines to + delimit sections within an indenting level. For example, the + blank line before this paragraph denotes a paragraph break. + + .. note:: RST was created by the Python community. Hence, + whitespace is quite important. + + * Indenting matters + * Blank lines between content matter + + Using a blank line and outdenting indicates the end of the previous + item. For example, this paragraph is not part of the bulleted list. + +* Fixed width font + + * MD: Use single quotes: + + .. code-block:: + + `hello world` + + * RST: Use a pair of single quotes: + + .. code-block:: + + ``hello world`` + +* Italics + + * MD: ``*hello world*`` or ``_hello world_`` + * RST: ``*hello world*`` + +* Boldface + + * MD: ``**hello world**`` + * RST: Same as MD + +* Chapter and section delimiters + + * MD: Either use one or more pound signs (#, ##, ###) to the left of + the line of text, or underline the line of text with pound signs + + * RST: Have a single line of text, underlined by non-ASCII + characters. + + * The length of the underlying *must* be at least as long as the + line of text + * Which non-ASCII character is used for the underlying does not + matter, but the order in which they are used denotes chapters + / sections / subsections / etc. In these OMPI docs, the + underline characters we use are: + + .. code-block:: + + Chapter 1: hello world + ====================== + + .. code-block:: + + Section 1: hello world + ---------------------- + + .. code-block:: + + Subsection 1: hello world + ^^^^^^^^^^^^^^^^^^^^^^^^^ + + .. code-block:: + + Subsubsection 1: hello world + ```````````````````````````` + + Meaning: underlines made of = denotes chapters, underlines + made of - denotes sections, and underlines made of ^ denotes + subsections. + +* Multi-line code/fixed-width font + + * MD: Use three single quotes to delimit blocks of text. Optionally + include a token keyword to denote the syntax highlighting to use + inside that block. + + .. code-block:: + + ```c + int main() { printf("Hello world\n"); return 0 } + ``` + + * RST: Use ``.. code-block:: KEYWORD`` to start a section of code. + + .. code-block:: + + .. code-block:: c + + int main() { printf("Hello world\n"); return 0 } + + * KEYWORD indicates which syntax highlighting to use (e.g., ``c``, + ``c++`` ``make``, ``sh``, ``ini``, ``Fortran``, ``diff``, ``python``, ``java``, + ... etc.). + * KEYWORD can be omitted if no specific highlighting is to be + used. + * There *MUST* be a blank line after the ``code-block`` line. + * The lines in the block must be indented to the same column as + the ``c`` in ``code-block``. For example: + + .. code-block:: + + .. code-block:: sh + + shell$ tar xf openmpi-.tar.bz2 + shell$ cd openmpi- + shell$ ./configure --prefix= |& tee config.out + + Note that the code block will be rendered at the same level as + where the first ``.`` of ``.. code-block::`` starts. In this + case, the example code block will be rendered in the bulleted + item. + + Whereas this parargraph and code block will be outside of the + above bulleted list: + + .. code-block:: sh + + shell$ tar xf openmpi-.tar.bz2 + shell$ cd openmpi- + shell$ ./configure --prefix= |& tee config.out + + The code-block can contain blank lines. + + The code-block is terminated by a blank line and then outdent back + to the same level as the first ``.`` in ``.. code-block::``. + +* Un-numbered bullets + + * MD: Start lines with ``*`` or ``-`` + * RST: Start lines with ``*``. You can wrap lines at the same + indenting level to make paragraphs in the same bullet. + + Having a blank line and then more text at the same indenting level + makes another paragraph in the same bullet. You even put other + directives in this same indenting level. + + * For example, you can start a sub bullet. + + This text is the next paragraph in the same sub bullet. + + .. code-block:: + + This is a verbatim code block within this same sub bullet. + More about code-blocks below. + + This is the next paragraph (after the code block) in the same + sub bullet. + + * If you start a new bullet, that terminates the previous bullet. + + You ***MUST*** put blank lines between bullets! + +* Numbered bullets: + + * MD: Start lines with ``#`` + * RST: Start lines with ``#.`` + + .. important:: Yes, the trailing ``.`` is important + + For example: + + .. code-block:: + + #. Item number 1 + #. The second item + #. A third item + + All the same rules for indentation apply as described above. + +* Comments + + * MD: Enclose content in ```` (i.e., HTML comments, + but they are included in the output) + * RST: Start a line with two periods and a space. + + For example, the following block is a comment, and will not be + included in the output: + + .. code-block:: + + .. Hello world. This is a comment. This whole block is a + comment. You can leave it here in the final document, and it + will not be included in the rendered output. + + Your comment can even include blank lines. You terminate a + comment -- just like most other things in RST -- by a blank + line and then outdenting back out to the same column as the + first ".". + + This line is no longer part of the comment. + +* Including files + + * MD: You cannot include files in Markdown. + * RST: Use the ``.. include:: FILENAME`` directive. For example: + + .. code-block:: + + .. include:: features-extensions.rst + .. include:: features-java.rst + + Those directives include those 2 files right here in this RST + file. Chapter/section/subsection delimiters will be continued in + those files as part of rendering this file. + +* Hyperlinks to URLs + + * MD: + + .. code-block:: + + [this is the link text](https://example.com/) + + * RST: + + .. code-block:: + + `this is the link text `_ + + .. important:: Yes, the trailing underscore in RST is important. + It's a little weird, but you'll cope. + +* Hyperlinks to anchors: + + * MD: I forget offhand how to make anchors and links to them in MD. + * RST: Use the ``:ref:`` directive. + + Make an anchor like this: + + .. code-block:: + + .. _ANCHOR_NAME: + + It *must* start with and underscore and end with a colon. + + I've typically used anchor names that end in ``-label`` to make it + blatantly obvious that it's a label. For example: + + .. code-block:: + + .. _building-and-installing-section-label: + + Then you can use the ``:ref:`` directive: + + .. code-block:: + + be sure to see :ref:`the VPATH build section + `. + +* Hyperlinks to other (RST) pages + + * MD: + + .. code-block:: + + (link text)[page_name] + + * RST: Use the ``:doc:`` directive. + + General format: + + .. code-block:: + + :doc:`link text ` + + For example: + + .. code-block:: + + You should read :doc:`the Developer's Guide `. + + The page path is relative to the ``docs`` dir in the OMPI git tree. + +* Macros + + * MD: There are no macros in Markdown. + * RST: We have defined a few OMPI-specific macros in RST. You can + insert these macros anywhere in RST content text. + + ``|ompi_ver|`` is the full Open MPI version number, including + alpha/beta/rc/greek denotation. For example ``5.0.0rc1``. + + ``|ompi_series|`` is the major/minor Open MPI version, e.g., + ``5.0.x``. + + .. important:: Never hard-code the Open MPI version number or + series! Always use the above macros. + + ``|mdash|`` is a unicode long dash, an "em" dash. Use it instead + of ``--``. + + ``|rarrow|`` is a unicode right arrow. Use it instead of ``->`` + or ``-->``. + +* Brightly-colored boxes. + + * MD: There are no brightly-colored boxes in MD. + + * RST: You can use various directives to make brightly-colored + "note" boxes (Called admonitions) in RST. For example: + + .. important:: a green box with a "!" icon + + Standard indenting rules apply for the content in the box. You + can have multiple lines and multiple paragraphs, for example. + + Yippee. + + * You can even have bullets. + + .. code-block:: + + You can even have code blocks inside the bullet inside the + caution box. + + * All the standard indenting rules apply. + + .. hint:: a green box with a "!" icon + + .. note:: a blue box with a "!" icon + + .. caution:: an orange box with a "!" icon + + .. attention:: an orange box with a "!" icon + + .. warning:: an orange box with a "!" icon + + .. error:: a red box with a "!" icon + + .. danger:: a red box with a "!" icon + + .. admonition:: Custom title + :class: tip + + Custom text for this custom admonition. Note that the ``:class: `` + will change the coloring to the color for the basic admonition of that + type. diff --git a/docs/developers/source-code-tree-layout.rst b/docs/developers/source-code-tree-layout.rst new file mode 100644 index 00000000000..bf8a10212eb --- /dev/null +++ b/docs/developers/source-code-tree-layout.rst @@ -0,0 +1,88 @@ +Source code tree layout +======================= + +There are a few notable top-level directories in the source +tree: + +* The main sub-projects: + + * ``oshmem``: Top-level OpenSHMEM code base + * ``ompi``: The Open MPI code base + * ``opal``: The OPAL code base + +* ``config``: M4 scripts supporting the top-level ``configure`` script + ``mpi.h`` +* ``etc``: Some miscellaneous text files +* ``docs``: Source code for Open MPI documentation +* ``examples``: Trivial MPI / OpenSHMEM example programs +* ``3rd-party``: Included copies of required core libraries (either + via Git submodules in Git clones or via binary tarballs). + + .. note:: While it may be considered unusual, we include binary + tarballs (instead of Git submodules) for 3rd party projects that + are: + + #. Needed by Open MPI for correct operation, and + #. Not universally included in OS distributions, and + #. Rarely updated. + +Each of the three main source directories (``oshmem``, ``ompi``, and +``opal``) generate at least a top-level library named ``liboshmem``, +``libmpi``, and ``libopen-pal``, respectively. They can be built as +either static or shared libraries. Executables are also produced in +subdirectories of some of the trees. + +Each of the sub-project source directories have similar (but not +identical) directory structures under them: + +* ``class``: C++-like "classes" (using the OPAL class system) + specific to this project +* ``include``: Top-level include files specific to this project +* ``mca``: MCA frameworks and components specific to this project +* ``runtime``: Startup and shutdown of this project at runtime +* ``tools``: Executables specific to this project (currently none in + OPAL) +* ``util``: Random utility code + +There are other top-level directories in each of the sub-projects, +each having to do with specific logic and code for that project. For +example, the MPI API implementations can be found under +``ompi/mpi/LANGUAGE``, where ``LANGUAGE`` is ``c`` or ``fortran``. + +The layout of the ``mca`` trees are strictly defined. They are of the +form: + +.. code-block:: text + + PROJECT/mca/FRAMEWORK/COMPONENT + +To be explicit: it is forbidden to have a directory under the ``mca`` +trees that does not meet this template (with the exception of ``base`` +directories, explained below). Hence, only framework and component +code can be in the ``mca`` trees. + +That is, framework and component names must be valid directory names +(and C variables; more on that later). For example, the TCP BTL +component is located in ``opal/mca/btl/tcp/``. + +The name ``base`` is reserved; there cannot be a framework or component +named ``base``. Directories named ``base`` are reserved for the +implementation of the MCA and frameworks. Here are a few examples (as +of the |ompi_series| source tree): + +.. code-block:: sh + + # Main implementation of the MCA + opal/mca/base + + # Implementation of the btl framework + opal/mca/btl/base + + # Implementation of the sysv framework + oshmem/mcs/sshmem/sysv + + # Implementation of the pml framework + ompi/mca/pml/base + +Under these mandated directories, frameworks and/or components may have +arbitrary directory structures, however. diff --git a/docs/developers/sphinx.rst b/docs/developers/sphinx.rst new file mode 100644 index 00000000000..de390598cbd --- /dev/null +++ b/docs/developers/sphinx.rst @@ -0,0 +1,99 @@ +.. _developers-installing-sphinx-label: + +Installing Sphinx +================= + +The Sphinx documentation recommends installing Sphinx (and its +required Python dependencies) via ``pip``, which typically requires +connectivity to the general internet. + +.. note:: If you are running on MacOS, you may be tempted to use + Homebrew or MacPorts to install Sphinx. The Sphinx documentation + recommends **against** this. Instead, you should use ``pip`` to + install Sphinx. + +There are three general ways to install Sphinx; you only need one of +them. + +Install Sphinx in a Python virtual environment +---------------------------------------------- + +The preferred method of installing Sphinx for Open MPI documentation +development is to install Sphinx in a Python virtual environment. +This places Sphinx in a sandbox that will not conflict with other +``pip``-installed Python modules. This example installs Sphinx and +other Python modules in the ``ompi-docs-venv`` tree under your Open +MPI Git clone directory: + +.. code-block:: sh + + # Create the Python virtual environment + shell$ cd TOP_OF_OPEN_MPI_GIT_CLONE + shell$ python3 -m venv ompi-docs-venv + # Or: python3 -m virtualenv ompi-docs-venv + # Or: virtualenv --python=python3 ompi-docs-venv + + # Activate the virtual environment + shell$ . ./ompi-docs-venv/bin/activate + + # Notice that the shell prompt changes + # Now install the required Python modules + (ompi-docs-venv) shell$ pip3 install -r docs/requirements.txt + # Or: python3 -m pip install install -r docs/requirements.txt + +Note that sourcing the ``activate`` script will change your prompt to +put the name of your virtual environment directory at the front, just +as a visual reminder that you are operating in a Python virtual +environment. You can run ``deactivate`` to leave the virtual +environment. + +.. important:: You will need to source the ``activate`` script to put + Sphinx in your ``PATH`` (e.g., *before* running Open + MPI's ``configure`` and build steps). + +Install Sphinx globally +----------------------- + +If Python virtual environments are not desirable on your system, you +can install Sphinx globally on your system (you may need to run with +root privileges): + +.. code-block:: sh + + shell$ cd TOP_OF_OPEN_MPI_GIT_CLONE + shell$ pip3 install -r docs/requirements.txt + # Or: python3 -m pip install install -r docs/requirements.txt + +This will install Sphinx and some Python modules required for building +the Open MPI documentation in a system-wide location. + +This will likely install the ``sphinx-build`` executable in a location +that is already in your ``PATH``. If the location is not already in +your ``PATH``, then you need to add it to your ``PATH`` *before* +running Open MPI's `configure` and build steps. + + +Install Sphinx locally +---------------------- + +If you cannot or do not want to install Sphinx globally on your +system, the following will install Sphinx somewhere under your +``$HOME``. It is the same ``pip`` command as shown above, but with +the addition of the ``--user`` flag (you should not need ``root`` +permissions to run this command): + +.. code-block:: sh + + shell$ cd TOP_OF_OPEN_MPI_GIT_CLONE + shell$ pip3 install --user -r docs/requirements.txt + # Or: python3 -m pip install install -r docs/requirements.txt + +This will install Sphinx and some Python modules required for building +the Open MPI documentation in a system-wide location. + +You will likely need to find the location where ``sphinx-build`` was +installed and add it to your ``PATH``. + +.. note:: On MacOS, look for ``sphinx-build`` under + ``$HOME/Library/Python/VERSION/bin`` (where ``VERSION`` is + the version number of Python). diff --git a/docs/developers/terminology.rst b/docs/developers/terminology.rst new file mode 100644 index 00000000000..7dae0053df0 --- /dev/null +++ b/docs/developers/terminology.rst @@ -0,0 +1,98 @@ +Open MPI terminology +==================== + +Open MPI is a large project containing many different +sub-systems and a relatively large code base. Let's first cover some +fundamental terminology in order to make the rest of the discussion +easier. + +Open MPI has multiple main sections of code: + +* *OSHMEM:* The OpenSHMEM API and supporting logic +* *OMPI:* The MPI API and supporting logic +* *OPAL:* The Open Portable Access Layer (utility and "glue" code) + +There are strict abstraction barriers in the code between these +sections. That is, they are compiled into separate libraries: +``liboshmem``, ``libmpi``, ``libopen-pal`` with a strict dependency order: +OSHMEM depends on OMPI, OMPI depends on OPAL. For example, MPI +executables are linked with: + +.. code-block:: sh + + shell$ mpicc myapp.c -o myapp + # This actually turns into: + shell$ cc myapp.c -o myapp -lmpi ... + +More system-level libraries may listed after ``-lmpi``, but you get +the idea. ``libmpi`` will implicitly pull ``libopen-pal`` into the +overall link step. + +Strictly speaking, these are not "layers" in the classic software +engineering sense (even though it is convenient to refer to them as +such). They are listed above in dependency order, but that does not +mean that, for example, the OMPI code must go through the +OPAL code in order to reach the operating system or a network +interface. + +As such, this code organization more reflects abstractions and +software engineering, not a strict hierarchy of functions that must be +traversed in order to reach a lower layer. For example, OMPI can +directly call the operating system as necessary (and not go through +OPAL). Indeed, many top-level MPI API functions are quite performance +sensitive; it would not make sense to force them to traverse an +arbitrarily deep call stack just to move some bytes across a network. + +Note that Open MPI also uses some third-party libraries for core +functionality: + +* PMIx +* PRRTE +* Libevent +* Hardware Locality ("hwloc") + +These are discussed in detail in the :ref:`required support libraries +section `. + +Here's a list of terms that are frequently used in discussions about +the Open MPI code base: + +* *MCA:* The Modular Component Architecture (MCA) is the foundation + upon which the entire Open MPI project is built. It provides all the + component architecture services that the rest of the system uses. + Although it is the fundamental heart of the system, its + implementation is actually quite small and lightweight |mdash| it is + nothing like CORBA, COM, JINI, or many other well-known component + architectures. It was designed for HPC |mdash| meaning that it is small, + fast, and reasonably efficient |mdash| and therefore offers few services + other than finding, loading, and unloading components. + +* *Framework:* An MCA *framework* is a construct that is created for a + single, targeted purpose. It provides a public interface that is + used by external code, but it also has its own internal services. + :ref:`See the list of Open MPI frameworks in this version of Open + MPI `. An MCA framework uses the MCA's services + to find and load *components* at run-time |mdash| implementations of + the framework's interface. An easy example framework to discuss is + the MPI framework named ``btl``, or the Byte Transfer Layer. It is + used to send and receive data on different kinds of networks. + Hence, Open MPI has ``btl`` components for shared memory, + OpenFabrics interfaces, various protocols over Ethernet, etc. + +* *Component:* An MCA *component* is an implementation of a + framework's interface. Another common word for component is + "plugin". It is a standalone collection of code that can be bundled + into a unit that can be inserted into the Open MPI code base, either + at run-time and/or compile-time. + +* *Module:* An MCA *module* is an instance of a component (in the C++ + sense of the word "instance"; an MCA component is analogous to a C++ + class, and an MCA module is analogous to a C++ object). For example, + if a node running an Open MPI application has two Ethernet NICs, the + Open MPI application will contain one TCP ``btl`` component, but two + TCP ``btl`` modules. This difference between components and modules + is important because modules have private state; components do not. + +Frameworks, components, and modules can be dynamic or static. That is, +they can be available as plugins or they may be compiled statically +into libraries (e.g., ``libmpi``). diff --git a/docs/faq/building-open-mpi.rst b/docs/faq/building-open-mpi.rst new file mode 100644 index 00000000000..d7051c642ce --- /dev/null +++ b/docs/faq/building-open-mpi.rst @@ -0,0 +1,151 @@ +Building Open MPI +================= + +.. TODO How can I create a TOC just for this page here at the top? + +///////////////////////////////////////////////////////////////////////// + +How do I statically link to the libraries of Intel compiler suite? +------------------------------------------------------------------ + +The Intel compiler suite, by default, dynamically links its runtime libraries +against the Open MPI binaries and libraries. This can cause problems if the Intel +compiler libraries are installed in non-standard locations. For example, you might +get errors like: + +.. code-block:: + + error while loading shared libraries: libimf.so: cannot open shared object file: + No such file or directory + +To avoid such problems, you can pass flags to Open MPI's configure +script that instruct the Intel compiler suite to statically link its +runtime libraries with Open MPI: + +.. code-block:: + + shell$ ./configure CC=icc CXX=icpc FC=ifort LDFLAGS=-Wc,-static-intel ... + +///////////////////////////////////////////////////////////////////////// + +Why do I get errors about hwloc or libevent not found? +------------------------------------------------------ + +Sometimes you may see errors similar to the following when attempting +to build Open MPI: + +.. code-block:: + + ... + PPFC profile/pwin_unlock_f08.lo + PPFC profile/pwin_unlock_all_f08.lo + PPFC profile/pwin_wait_f08.lo + FCLD libmpi_usempif08.la + ld: library not found for -lhwloc + collect2: error: ld returned 1 exit status + make``2``: *** ``libmpi_usempif08.la`` Error 1 + +This error can happen when a number of factors occur together: + +#. If Open MPI's ``configure`` script chooses to use an "external" + installation of `hwloc `_ + and/or `Libevent `_ (i.e., outside of Open + MPI's source tree). +#. If Open MPI's ``configure`` script chooses C and Fortran compilers + from different suites/installations. + +Put simply: if the default search library search paths differ between +the C and Fortran compiler suites, the C linker may find a +system-installed ``libhwloc`` and/or ``libevent``, but the Fortran linker +may not. + +This may tend to happen more frequently starting with Open MPI v4.0.0 +on Mac OS because: + +#. In v4.0.0, Open MPI's ``configure`` script was changed to "prefer" + system-installed versions of hwloc and Libevent (vs. preferring the + hwloc and Libevent that are bundled in the Open MPI distribution + tarballs). +#. In MacOS, it is common for `Homebrew `_ or + `MacPorts `_ to install: + * hwloc and/or Libevent + * gcc and gfortran + +For example, as of July 2019, Homebrew: + +* Installs hwloc v2.0.4 under ``/usr/local`` +* Installs the Gnu C and Fortran compiler suites v9.1.0 under + ``/usr/local``. *However*, the C compiler executable is named ``gcc-9`` + (not ``gcc``!), whereas the Fortran compiler executable is + named ``gfortran``. + +These factors, taken together, result in Open MPI's ``configure`` +script deciding the following: + +* The C compiler is ``gcc`` (which is the MacOS-installed C + compiler). +* The Fortran compiler is ``gfortran`` (which is the + Homebrew-installed Fortran compiler). +* There is a suitable system-installed hwloc in ``/usr/local``, which + can be found -- by the C compiler/linker -- without specifying any + additional linker search paths. + +The careful reader will realize that the C and Fortran compilers are +from two entirely different installations. Indeed, their default +library search paths are different: + +* The MacOS-installed ``gcc`` will search ``/usr/local/lib`` by + default. +* The Homebrew-installed ``gfortran`` will *not* search + ``/usr/local/lib`` by default. + +Hence, since the majority of Open MPI's source code base is in C, it +compiles/links against hwloc successfully. But when Open MPI's +Fortran code for the ``mpi_f08`` module is compiled and linked, the +Homebrew-installed ``gfortran`` -- which does not search +``/usr/local/lib`` by default -- cannot find ``libhwloc``, and the link +fails. + +There are a few different possible solutions to this issue: + +#. The best solution is to always ensure that Open MPI uses a C and + Fortran compiler from the same suite/installation. This will + ensure that both compilers/linkers will use the same default + library search paths, and all behavior should be consistent. For + example, the following instructs Open MPI's ``configure`` script to + use ``gcc-9`` for the C compiler, which (as of July 2019) is the + Homebrew executable name for its installed C compiler: + + .. code-block:: sh + + shell$ ./configure CC=gcc-9 ... + + # You can be precise and specify an absolute path for the C + # compiler, and/or also specify the Fortran compiler: + shell$ ./configure CC=/usr/local/bin/gcc-9 FC=/usr/local/bin/gfortran ... + + Note that this will likely cause ``configure`` to *not* find the + Homebrew-installed hwloc, and instead fall back to using the + bundled hwloc in the Open MPI source tree. + +#. Alternatively, you can simply force ``configure`` to select the + bundled versions of hwloc and libevent, which avoids the issue + altogether: + + .. code-block:: sh + + shell$ ./configure --with-hwloc=internal --with-libevent=internal ... + +#. Finally, you can tell ``configure`` exactly where to find the + external hwloc library. This can have some unintended + consequences, however, because it will prefix both the C and + Fortran linker's default search paths with ``/usr/local/lib``: + + .. code-block:: sh + + shell$ ./configure --with-hwloc-libdir=/usr/local/lib ... + +Be sure to :ref:`see this section of the Installation guide +` for more +information about the bundled hwloc and/or Libevent +vs. system-installed versions. diff --git a/docs/faq/debugging.rst b/docs/faq/debugging.rst new file mode 100644 index 00000000000..1f74c0b66e0 --- /dev/null +++ b/docs/faq/debugging.rst @@ -0,0 +1,579 @@ +Parallel debugging +================== + +.. TODO How can I create a TOC just for this page here at the top? + +///////////////////////////////////////////////////////////////////////// + +How do I debug Open MPI processes in parallel? +---------------------------------------------- + +This is a difficult question. Debugging in serial can be +tricky: errors, uninitialized variables, stack smashing, etc. +Debugging in parallel adds multiple different dimensions to this +problem: a greater propensity for race conditions, asynchronous +events, and the general difficulty of trying to understand N processes +simultaneously executing |mdash| the problem becomes quite formidable. + +This FAQ section does not provide any definite solutions to +debugging in parallel. At best, it shows some general techniques and +a few specific examples that may be helpful to your situation. + +But there are various controls within Open MPI that can help with +debugging. These are probably the most valuable entries in this FAQ +section. + +///////////////////////////////////////////////////////////////////////// + +What tools are available for debugging in parallel? +--------------------------------------------------- + +There are two main categories of tools that can aid in +parallel debugging: + +* *Debuggers:* Both serial and parallel debuggers are useful. Serial + debuggers are what most programmers are used to (e.g., gdb), while + parallel debuggers can attach to all the individual processes in an + MPI job simultaneously, treating the MPI application as a single + entity. This can be an extremely powerful abstraction, allowing the + user to control every aspect of the MPI job, manually replicate race + conditions, etc. + +* *Profilers:* Tools that analyze your usage of MPI and display + statistics and meta information about your application's run. Some + tools present the information \"live\" (as it occurs), while others + collect the information and display it in a post mortem analysis. + +Both freeware and commercial solutions are available for each kind of +tool. + +///////////////////////////////////////////////////////////////////////// + +What controls does Open MPI have that aid in debugging? +------------------------------------------------------- + +Open MPI has a series of MCA parameters for the MPI layer +itself that are designed to help with debugging. These parameters can +be can be set in the +usual ways. MPI-level MCA parameters can be displayed by invoking +the following command: + +.. code-block:: sh + + # Use "--level 9" to see all the MCA parameters + # (the default is "--level 1"): + shell$ ompi_info --param mpi all --level 9 + +Here is a summary of the debugging parameters for the MPI layer: + +* ``mpi_param_check``: If set to true (any positive value), and when + Open MPI is compiled with parameter checking enabled (the default), + the parameters to each MPI function can be passed through a series + of correctness checks. Problems such as passing illegal values + (e.g., NULL or ``MPI_DATATYPE_NULL`` or other "bad" values) will be + discovered at run time and an MPI exception will be invoked (the + default of which is to print a short message and abort the entire + MPI job). If set to false, these checks are disabled, slightly + increasing performance. + +* ``mpi_show_handle_leaks``: If set to true (any positive value), + Open MPI will display lists of any MPI handles that were not freed before + ``MPI_FINALIZE`` (e.g., communicators, datatypes, requests, etc.) + +* ``mpi_no_free_handles``: If set to true (any positive value), do not + actually free MPI objects when their corresponding MPI "free" + function is invoked (e.g., do not free communicators when + ``MPI_COMM_FREE`` is invoked). This can be helpful in tracking down + applications that accidentally continue to use MPI handles after + they have been freed. + +* ``mpi_show_mca_params``: If set to true (any positive value), show a + list of all MCA parameters and their values during ``MPI_INIT``. + This can be quite helpful for reproducibility of MPI applications. + +* ``mpi_show_mca_params_file``: If set to a non-empty value, and if + the value of ``mpi_show_mca_params`` is true, then output the list + of MCA parameters to the filename value. If this parameter is an + empty value, the list is sent to ``stderr``. + +* ``mpi_keep_peer_hostnames``: If set to a true value (any positive + value), send the list of all hostnames involved in the MPI job to + every process in the job. This can help the specificity of error + messages that Open MPI emits if a problem occurs (i.e., Open MPI can + display the name of the peer host that it was trying to communicate + with), but it can somewhat slow down the startup of large-scale MPI + jobs. + +* ``mpi_abort_delay``: If nonzero, print out an identifying message + when ``MPI_ABORT`` is invoked showing the hostname and PID of the + process that invoked ``MPI_ABORT``, and then delay that many seconds + before exiting. A negative value means to delay indefinitely. This + allows a user to manually come in and attach a debugger when an + error occurs. Remember that the default MPI error handler |mdash| + ``MPI_ERRORS_ABORT`` |mdash| invokes ``MPI_ABORT``, so this + parameter can be useful to discover problems identified by + ``mpi_param_check``. + +* ``mpi_abort_print_stack``: If nonzero, print out a stack trace (on + supported systems) when ``MPI_ABORT`` is invoked. + +* ``mpi_ddt__debug``, where ```` can be one of ``pack``, + ``unpack``, ``position``, or ``copy``: These are internal debugging + features that are not intended for end users (but ``ompi_info`` will + report that they exist). + +///////////////////////////////////////////////////////////////////////// + +Do I need to build Open MPI with compiler/linker debugging flags (such as ``-g``) to be able to debug MPI applications? +----------------------------------------------------------------------------------------------------------------------- + +No. + +If you build Open MPI without compiler/linker debugging flags (such as +``-g``), you will not be able to step inside MPI functions +when you debug your MPI applications. However, this is likely what +you want |mdash| the internals of Open MPI are quite complex and you +probably don't want to start poking around in there. + +You'll need to compile your own applications with ``-g`` (or whatever +your compiler's equivalent is), but unless you have a need/desire to +be able to step into MPI functions to see the internals of Open MPI, +you do not need to build Open MPI with ``-g``. + +///////////////////////////////////////////////////////////////////////// + +Can I use serial debuggers (such as ``gdb``) to debug MPI applications? +----------------------------------------------------------------------- + +Yes; the Open MPI developers do this all the time. + +There are two common ways to use serial debuggers. + +Attach to individual MPI processes after they are running +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +For example, launch your MPI application as normal with ``mpirun``. +Then login to the node(s) where your application is running and use +the ``--pid`` option to ``gdb`` to attach to your application. + +An inelegant-but-functional technique commonly used with this method +is to insert the following code in your application where you want to +attach: + +.. code-block:: c + + { + volatile int i = 0; + char hostname[256]; + gethostname(hostname, sizeof(hostname)); + printf("PID %d on %s ready for attach\n", getpid(), hostname); + fflush(stdout); + while (0 == i) + sleep(5); + } + +This code will output a line to stdout outputting the name of the host +where the process is running and the PID to attach to. It will then +spin on the ``sleep()`` function forever waiting for you to attach +with a debugger. Using ``sleep()`` as the inside of the loop means +that the processor won't be pegged at 100% while waiting for you to +attach. + +Once you attach with a debugger, go up the function stack until you +are in this block of code (you'll likely attach during the +``sleep()``) then set the variable ``i`` to a nonzero value. With +GDB, the syntax is: + +.. code-block:: sh + + (gdb) set var i = 7 + +Then set a breakpoint after your block of code and continue execution +until the breakpoint is hit. Now you have control of your live MPI +application and use of the full functionality of the debugger. + +You can even add conditionals to only allow this "pause" in the +application for specific MPI processes (e.g., ``MPI_COMM_WORLD`` rank +0, or whatever process is misbehaving). + +Use ``mpirun`` to launch separate instances of serial debuggers +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +This technique launches a separate window for each MPI process in +``MPI_COMM_WORLD``, each one running a serial debugger (such as +``gdb``) that will launch and run your MPI application. Having a +separate window for each MPI process can be quite handy for low +process-count MPI jobs, but requires a bit of setup and configuration +that is outside of Open MPI to work properly. A naive approach would +be to assume that the following would immediately work: + +.. code-block:: sh + + shell$ mpirun -np 4 xterm -e gdb my_mpi_application + +If running on a personal computer, this will probably work. You can +also use `tmpi `_ to launch the +debuggers in separate ``tmux`` panes instead of separate ``xterm`` +windows, which has the advantage of synchronizing keyboard input +between all debugger instances. + +Unfortunately, the ``tmpi`` or ``xterm`` approaches likely *won't* +work on an computing cluster. Several factors must be considered: + +#. What launcher is Open MPI using? In an ``ssh``-based environment, + Open MPI will default to using ``ssh`` when it is available, + falling back to ``rsh`` when ``ssh`` cannot be found in the + ``$PATH``. But note that Open MPI closes the ``ssh`` (or ``rsh``) + sessions when the MPI job starts for scalability reasons. This + means that the built-in SSH X forwarding tunnels will be shut down + before the ``xterms`` can be launched. Although it is possible to + force Open MPI to keep its SSH connections active (to keep the X + tunneling available), we recommend using non-SSH-tunneled X + connections, if possible (see below). + +#. In non-``ssh`` environments (such as when using resource managers), + the environment of the process invoking ``mpirun`` may be copied to + all nodes. In this case, the ``DISPLAY`` environment variable may + not be suitable. + +#. Some operating systems default to disabling the X11 server from + listening for remote/network traffic. For example, see `this post + on the Open MPI user's mailing list + `_ + describing how to enable network access to the X11 server on Fedora + Linux. + +#. There may be intermediate firewalls or other network blocks that + prevent X traffic from flowing between the hosts where the MPI + processes (and ``xterm``) are running and the host connected to + the output display. + +The easiest way to get remote X applications (such as ``xterm``) to +display on your local screen is to forego the security of SSH-tunneled +X forwarding. In a closed environment such as an HPC cluster, this +may be an acceptable practice (indeed, you may not even have the +option of using SSH X forwarding if SSH logins to cluster nodes are +disabled), but check with your security administrator to be sure. + +If using non-encrypted X11 forwarding is permissible, we recommend the +following: + +#. For each non-local host where you will be running an MPI process, + add it to your X server's permission list with the ``xhost`` + command. For example: + + .. code-block:: sh + + shell$ cat my_hostfile + inky + blinky + stinky + clyde + shell$ for host in `cat my_hostfile` ; do xhost +host ; done + +#. Use the ``-x`` option to ``mpirun`` to export an appropriate + DISPLAY variable so that the launched X applications know where to + send their output. An appropriate value is *usually* (but not + always) the hostname containing the display where you want the + output and the ``:0`` (or ``:0.0``) suffix. For example: + + .. code-block:: sh + + shell$ hostname + arcade.example.come + shell$ mpirun -np 4 --hostfile my_hostfile \ + -x DISPLAY=arcade.example.com:0 xterm -e gdb my_mpi_application + + .. warning:: X traffic is fairly "heavy" |mdash| if you are + operating over a slow network connection, it may take + some time before the ``xterm`` windows appear on your + screen. + +#. If your ``xterm`` supports it, the ``-hold`` option may be useful. + ``-hold`` tells ``xterm`` to stay open even when the application + has completed. This means that if something goes wrong (e.g., + ``gdb`` fails to execute, or unexpectedly dies, or ...), the + ``xterm`` window will stay open, allowing you to see what happened, + instead of closing immediately and losing whatever error message + may have been output. + +#. When you have finished, you may wish to disable X11 network + permissions from the hosts that you were using. Use ``xhost`` + again to disable these permissions: + + .. code-block:: sh + + shell$ for host in `cat my_hostfile` ; do xhost -host ; done + +.. note:: ``mpirun`` will not complete until all the ``xterm`` + instances are complete. + +////////////////////////////////////////////////////////// + +My process dies without any output. Why? +----------------------------------------- + +There many be many reasons for this; the Open MPI Team strongly +encourages the use of tools (such as debuggers) whenever possible. + +One of the reasons, however, may come from inside Open MPI itself. If +your application fails due to memory corruption, Open MPI may +subsequently fail to output an error message before dying. +Specifically, starting with v1.3, Open MPI attempts to aggregate error +messages from multiple processes in an attempt to show unique error +messages only once (vs. one for each MPI process |mdash| which can be +unwieldy, especially when running large MPI jobs). + +However, this aggregation process requires allocating memory in the +MPI process when it displays the error message. If the process's +memory is already corrupted, Open MPI's attempt to allocate memory may +fail and the process will simply die, possibly silently. When Open +MPI does not attempt to aggregate error messages, most of its setup +work is done during MPI_INIT and no memory is allocated during the +"print the error" routine. It therefore almost always successfully +outputs error messages in real time |mdash| but at the expense that you'll +potentially see the same error message for *each* MPI process that +encountered the error. + +Hence, the error message aggregation is _usually_ a good thing, but +sometimes it can mask a real error. You can disable Open MPI's error +message aggregation with the ``orte_base_help_aggregate`` MCA +parameter. For example: + +.. code-block:: sh + + shell$ mpirun --mca orte_base_help_aggregate 0 ... + +////////////////////////////////////////////////////////// + +What is Memchecker? +------------------- + +The Memchecker allows MPI semantic +checking for your application (as well as internals of Open MPI), with +the help of memory checking tools such as the Memcheck of `the +Valgrind suite `_. + +///////////////////////////////////////////////////////////////////////// + +What kind of errors can Memchecker find? +---------------------------------------- + +Memchecker is implemented on the basis of the Memcheck tool from +Valgrind, so it takes all the advantages from it. Firstly, it checks +all reads and writes of memory, and intercepts calls to +malloc/new/free/delete. Most importantly, Memchecker is able to detect +the user buffer errors in both Non-blocking and One-sided +communications, e.g. reading or writing to buffers of active +non-blocking Recv-operations and writing to buffers of active +non-blocking Send-operations. + +Here are some example codes that Memchecker can detect: + +Accessing buffer under control of non-blocking communication: + +.. code-block:: c + + int buf; + MPI_Irecv(&buf, 1, MPI_INT, 1, 0, MPI_COMM_WORLD, &req); + // The following line will produce a memchecker warning + buf = 4711; + MPI_Wait (&req, &status); + +Wrong input parameters, e.g., wrong-sized send buffers: + +.. code-block:: c + + char *send_buffer; + send_buffer = malloc(5); + memset(send_buffer, 0, 5); + // The following line will produce a memchecker warning + MPI_Send(send_buffer, 10, MPI_CHAR, 1, 0, MPI_COMM_WORLD); + +Accessing a window in a one-sided communication: + +.. code-block:: c + + MPI_Get(A, 10, MPI_INT, 1, 0, 1, MPI_INT, win); + A[0] = 4711; + MPI_Win_fence(0, win); + +Uninitialized input buffers: + +.. code-block:: c + + char *buffer; + buffer = malloc(10); + // The following line will produce a memchecker warning + MPI_Send(buffer, 10, MPI_INT, 1, 0, MPI_COMM_WORLD); + +Usage of the uninitialized ``MPI_Status`` field in ``MPI_ERROR`` +structure: (the MPI-1 standard defines the ``MPI ERROR`` field to be +undefined for single-completion calls such as ``MPI_WAIT`` or +``MPI_TEST``, see MPI-1 p. 22): + +.. code-block:: c + + MPI_Wait(&request, &status); + // The following line will produce a memchecker warning + if (status.MPI_ERROR != MPI_SUCCESS) + return ERROR; + +///////////////////////////////////////////////////////////////////////// + +How do I build Open MPI with Memchecker support? +------------------------------------------------ + +To use Memchecker, you need Valgrind 3.2.0 or later, and have an Open +MPI that was configured with the ``--enable-memchecker`` and +``--enable-debug`` flags. + +.. note:: The Memchecker functionality is off by default, because it + incurs a performance penalty. + +When ``--enable-memchecker`` is specified, ``configure`` will check +for a recent-enable Valgrind distribution. If found, Open MPI will +build Memchecker support. + +For example: + +.. code-block:: sh + + shell$ ./configure --prefix=/path/to/openmpi --enable-debug \ + --enable-memchecker --with-valgrind=/path/to/valgrind + +You can check that Open MPI was built with Memchecker support by using +the ``ompi_info`` application: + +.. code-block:: sh + + # The exact version numbers shown may be different for your Open + # MPI installation + shell$ ompi_info | grep memchecker + MCA memchecker: valgrind (MCA v1.0, API v1.0, Component v1.3) + +If you do not see the "MCA memchecker: valgrind" line, ou probably +didn't configure and install Open MPI correctly. + +///////////////////////////////////////////////////////////////////////// + +How to run my MPI application with Memchecker? +---------------------------------------------- + +First of all, you have to make sure that Valgrind 3.2.0 or later is +installed, and Open MPI is compiled with Memchecker support +enabled. Then simply run your application with Valgrind, e.g.: + +.. code-block:: sh + + shell$ mpirun -np 2 valgrind ./my_app + +Or if you enabled Memchecker, but you don't want to check the +application at this time, then just run your application as +usual. E.g.: + +.. code-block:: sh + + shell$ mpirun -np 2 ./my_app + +///////////////////////////////////////////////////////////////////////// + +Does Memchecker cause performance degradation to my application? +---------------------------------------------------------------- + +The configure option ``--enable-memchecker`` (together with +``--enable-debug``) *does* cause performance degradation, even if not +running under Valgrind. The following explains the mechanism and may +help in making the decision whether to provide a cluster-wide +installation with ``--enable-memchecker``. + +There are two cases: + +#. If run without Valgrind, the Valgrind ClientRequests (assembler + instructions added to the normal execution path for checking) do + not affect overall MPI performance. Valgrind ClientRequests are + explained in detail `in Valgrind's documentation + `_. + In the case of x86-64, ClientRequests boil down to the following + four rotate-left (ROL) and one xchange (XCHG) assembler instructions + from ``valgrind.h``: + + .. code-block:: c + + #define __SPECIAL_INSTRUCTION_PREAMBLE \ + "rolq \$3, %%rdi; rolq \$13, %%rdi\\n\\t" \ + "rolq \$61, %%rdi; rolq \$51, %%rdi\\n\\t" + + and + + .. We do not make the code block below as "c" because the Sphinx C + syntax highlighter fails to parse it as C and emits a warning. + So we might as well just leave it as a plan verbatim block + (i.e., not syntax hilighted). + + .. code-block:: + + __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ + /* %RDX = client_request ( %RAX ) */ \ + "xchgq %%rbx,%%rbx" \ + : "=d" (_zzq_result) \ + : "a" (& _zzq_args``0``), "0" (_zzq_default) \ + : "cc", "memory" \ + ); + + for every single ClientRequest. In the case of not running + Valgrind, these ClientRequest instructions do not change the + arithmetic outcome (rotating a 64-bit register left by 128-Bits, + exchanging a register with itself), except for the carry flag. + + The first request is checking whether we're running under Valgrind. + In case we're not running under Valgrind subsequent checks (aka ClientRequests) + are not done. + +#. If the application is run under Valgrind, performance is naturally reduced due + to the Valgrind JIT and the checking tool employed. + For costs and overheads of Valgrind's Memcheck tool on the SPEC 2000 Benchmark, + please see the excellent paper + `Valgrind: A Framework for Heavyweight Dynamic Binary Instrumentation + `_. + For an evaluation of various internal implementation alternatives of Shadow Memory, please see + `Building Workload Characterization Tools with Valgrind + `_. + + +Further information and performance data with the NAS Parallel +Benchmarks may be found in the paper `Memory Debugging of MPI-Parallel +Applications in Open MPI +`_. + +///////////////////////////////////////////////////////////////////////// + +Is Open MPI "Valgrind-clean" or how can I identify real errors? +--------------------------------------------------------------- + +This issue has been raised many times on the mailing list, e.g., `such +as here +`_ +`and here +`_. + +There are many situations where Open MPI purposefully does not initialize and +subsequently communicates memory, e.g., by calling ``writev(2)``. +Furthermore, several cases are known where memory is not properly freed upon +``MPI_FINALIZE``. + +This certainly does not help distinguishing real errors from false positives. +Valgrind provides functionality to suppress errors and warnings from certain +function contexts. + +In an attempt to ease debugging using Valgrind, Open MPI provides a +so-called Valgrind-suppression file, that can be passed on the command +line: + +.. code-block:: sh + + shell$ mpirun -np 2 valgrind --suppressions=$PREFIX/share/openmpi/openmpi-valgrind.supp + +More information on suppression-files and how to generate them can be +found in `Valgrind's documentation +`_. diff --git a/docs/faq/fault-tolerance.rst b/docs/faq/fault-tolerance.rst new file mode 100644 index 00000000000..099a51296e9 --- /dev/null +++ b/docs/faq/fault-tolerance.rst @@ -0,0 +1,143 @@ +Fault Tolerance +=============== + +.. TODO Hello world + +///////////////////////////////////////////////////////////////////////// + +What is "fault tolerance"? +-------------------------- + +The phrase "fault tolerance" means many things to many +people. Typical definitions range from user processes dumping vital +state to disk periodically to checkpoint/restart of running processes +to elaborate recreate-process-state-from-incremental-pieces schemes to +... (you get the idea). + +In the scope of Open MPI, we typically define "fault tolerance" to +mean the ability to recover from one or more component failures in a +well defined manner with either a transparent or application-directed +mechanism. Component failures may exhibit themselves as a corrupted +transmission over a faulty network interface or the failure of one or +more serial or parallel processes due to a processor or node failure. +Open MPI strives to provide the application with a consistent system +view while still providing a production quality, high performance +implementation. + +Yes, that's pretty much as all-inclusive as possible |mdash| intentionally +so! Remember that in addition to being a production-quality MPI +implementation, Open MPI is also a vehicle for research. So while +some forms of "fault tolerance" are more widely accepted and used, +others are certainly of valid academic interest. + +///////////////////////////////////////////////////////////////////////// + +What fault tolerance techniques has / does / will Open MPI support? +------------------------------------------------------------------- + +Open MPI was a vehicle for research in fault tolerance and over the years provided +support for a wide range of resilience techniques: + +* Current + * User Level Fault Mitigation techniques similar to + those implemented in FT-MPI. + +* Deprecated / no longer available + * Coordinated and uncoordinated process checkpoint and + restart. Similar to those implemented in LAM/MPI and MPICH-V, + respectively. + * Message logging techniques. Similar to those implemented in + MPICH-V + * Data Reliability and network fault tolerance. Similar to those + implemented in LA-MPI + +The Open MPI team will not limit their fault tolerance techniques to +those mentioned above, but intend on extending beyond them in the +future. + +///////////////////////////////////////////////////////////////////////// + +Does Open MPI support checkpoint and restart of parallel jobs (similar to LAM/MPI)? +----------------------------------------------------------------------------------- + +Old versions of OMPI (starting from v1.3 series) had support for +the transparent, coordinated checkpointing and restarting of MPI +processes (similar to LAM/MPI). + +Open MPI supported both the the `BLCR `_ +checkpoint/restart system and a "self" checkpointer that allows +applications to perform their own checkpoint/restart functionality while taking +advantage of the Open MPI checkpoint/restart infrastructure. +For both of these, Open MPI provides a coordinated checkpoint/restart protocol +and integration with a variety of network interconnects including shared memory, +Ethernet, and InfiniBand. + +The implementation introduces a series of new frameworks and +components designed to support a variety of checkpoint and restart +techniques. This allows us to support the methods described above +(application-directed, BLCR, etc.) as well as other kinds of +checkpoint/restart systems (e.g., Condor, libckpt) and protocols +(e.g., uncoordinated, message induced). + +.. note:: The + checkpoint/restart support was last released as part of the v1.6 + series. + +///////////////////////////////////////////////////////////////////////// + +Where can I find the fault tolerance development work? +------------------------------------------------------ + +The only active work in resilience in Open MPI +targets the User Level Fault Mitigation (ULFM) approach, a +technique discussed in the context of the MPI standardization +body. + +For information on the Fault Tolerant MPI prototype in Open MPI see the +links below: + +* `MPI Forum's Fault Tolerance Working Group `_ +* Fault Tolerant MPI Prototype: + * `Development / code `_ + * `Information and support `_ + +Support for other types of resilience (e.g., :ref:`data reliability `, +checkpoint) has been deprecated over the years +due to lack of adoption and lack of maintenance. If you are interested +in doing some archeological work, traces are still available on the main +repository. + +///////////////////////////////////////////////////////////////////////// + +.. _faq-ft-data-reliability-label: + +Does Open MPI support end-to-end data reliability in MPI message passing? +------------------------------------------------------------------------- + +Current Open MPI releases have no support for end-to-end data +reliability, at least not more than currently provided by the +underlying network. + +The data reliability PML component (``dr``, available +on some past releases has been deprecated), assumed that the +underlying network is unreliable. It could drop / restart connections, +retransmit corrupted or lost data, etc. The end effect is that data +sent through MPI API functions will be guaranteed to be reliable. + +For example, if you're using TCP as a message transport, chances of +data corruption are fairly low. However, other interconnects do *not* +guarantee that data will be uncorrupted when traveling across the +network. Additionally, there are nonzero possibilities that data can +be corrupted while traversing PCI buses, etc. (some corruption errors +at this level can be caught/fixed, others cannot). Such errors are +not uncommon at high altitudes (!). + +Note that such added reliability does incur a performance cost |mdash| +latency and bandwidth suffer when Open MPI performs the consistency +checks that are necessary to provide such guarantees. + +Most clusters/networks do not need data reliability. But some do +(e.g., those operating at high altitudes). The ``dr`` PML was intended for +these rare environments where reliability was an issue; and users were +willing to tolerate slightly slower applications in order to guarantee +that their job does not crash (or worse, produce wrong answers). diff --git a/docs/faq/general-tuning.rst b/docs/faq/general-tuning.rst new file mode 100644 index 00000000000..5b67bb239ed --- /dev/null +++ b/docs/faq/general-tuning.rst @@ -0,0 +1,816 @@ +General Tuning +============== + +.. TODO How can I create a TOC just for this page here at the top? + +///////////////////////////////////////////////////////////////////////// + +What is the Modular Component Architecture (MCA)? +------------------------------------------------- + +The Modular Component Architecture (MCA) is the backbone for much of +Open MPI's functionality. It is a series of *projects*, *frameworks*, +*components*, and *modules* that are assembled at run-time to create +an MPI implementation. + +* **Projects:** An Open MPI project is essentially the highest + abstraction layer division of code. + + .. note:: The word "project" is unfortunately overloaded. It can be + used to mean the code/resources/people in the greater Open + MPI community associated with the development of a + particular software package, but it can also be used to + mean a section of code within the Open MPI code base. + + For the purposes of this documentation, "project" means + the latter: a section of code within the Open MPI code + base. + +* **Frameworks:** An MCA framework manages zero or more components at + run-time and is targeted at a specific task (e.g., providing MPI + collective operation functionality). Each MCA framework supports a + single component type, but may support multiple versions of that + type. The framework uses the services from the MCA base + functionality to find and/or load components. + +* **Components:** An MCA component is an implementation of a + framework's interface. It is a standalone collection of code that + can be bundled into a plugin that can be inserted into the Open MPI + code base, either at run-time and/or compile-time. + +* **Modules:** An MCA module is an instance of a component (in the C++ + sense of the word "instance"; an MCA component is analogous to a C++ + class). For example, if a node running an Open MPI application has + multiple ethernet NICs, the Open MPI application will contain one + TCP MPI point-to-point *component*, but two TCP point-to-point + *modules*. + +///////////////////////////////////////////////////////////////////////// + +What are MCA parameters? +------------------------ + +MCA parameters are the basic unit of run-time tuning for Open +MPI. They are simple "key = value" pairs that are used extensively +throughout the code base. The general rules of thumb that the +developers use are: + +#. Instead of using a constant for an important value, make it an MCA + parameter. +#. If a task can be implemented in multiple, user-discernible ways, + implement as many as possible and make choosing between them be an MCA + parameter. + +For example, an easy MCA parameter to describe is the boundary between +short and long messages in TCP wire-line transmissions. "Short" +messages are sent eagerly whereas "long" messages use a rendezvous +protocol. The decision point between these two protocols is the +overall size of the message (in bytes). By making this value an MCA +parameter, it can be changed at run-time by the user or system +administrator to use a sensible value for a particular environment or +set of hardware (e.g., a value suitable for 1Gpbs Ethernet is probably +not suitable for 100 Gigabit Ethernet, and may require even a third +different value for 40 Gigabit Ethernet). + +Note that MCA parameters may be set in several different ways +(described in another FAQ entry). This allows, for example, system +administrators to fine-tune the Open MPI installation for their +hardware / environment such that normal users can simply use the +default values. + +More specifically, HPC environments |mdash| and the applications that run +on them |mdash| tend to be unique. Providing extensive run-time tuning +capabilities through MCA parameters allows the customization of Open +MPI to each system's / user's / application's particular needs. + +///////////////////////////////////////////////////////////////////////// + +What projects are included in the Open MPI code base? +----------------------------------------------------- + +The following *projects* exist in Open MPI |ompi_ver|: + +* **Open Porability Access Layer (OPAL):** Low-level, operating + system and architecture portability code. +* **Open MPI (OMPI):** The MPI API and supporting infrastructure. +* **OpenSHMEM (OSHMEM):** The OpenSHMEM API and supporting + infrastructure. + +.. note:: Prior versions of Open MPI also included an Open MPI + Runtime Envionrment (ORTE) project. ORTE essentially + evolved into the standalone `PMIx Runtime Reference + Environment (PRRTE) `_, + and is now considered a 3rd-party dependency of Open MPI + -- not one of its included projects. + +///////////////////////////////////////////////////////////////////////// + +What frameworks are in Open MPI? +-------------------------------- + +Each project has its own frameworks. + +.. error:: TODO This question may be moot due to :doc:`this list + already in the higher-level doc `. + + +///////////////////////////////////////////////////////////////////////// + +How do I know what components are in my Open MPI installation? +-------------------------------------------------------------- + +The ``ompi_info`` command, in addition to providing a wealth of +configuration information about your Open MPI installation, will list +all components (and the frameworks that they belong to) that are +available. These include system-provided components as well as +user-provided components. + +Please note that starting with Open MPI v1.8, ``ompi_info`` categorizes its +parameter parameters in so-called levels, as defined by the MPI_T +interface. You will need to specify ``--level 9`` (or +``--all``) to show *all* MCA parameters. +`See this Cisco Blog entry +`_ +for further information. + +///////////////////////////////////////////////////////////////////////// + +.. _faq-general-tuning-install-components: + +How do I install my own components into an Open MPI installation? +----------------------------------------------------------------- + +By default, Open MPI looks in two places for components at +run-time (in order): + +#. ``$prefix/lib/openmpi/``: This is the system-provided components + directory, part of the installation tree of Open MPI itself. +#. ``$HOME/.openmpi/components/``: This is where users can drop their + own components that will automatically be "seen" by Open MPI at + run-time. This is ideal for developmental, private, or otherwise + unstable components. + +Note that the directories and search ordering used for finding +components in Open MPI is, itself, an MCA parameter. Setting the +``mca_component_path`` changes this value (a colon-delimited list of +directories). + +Note also that components are only used on nodes where they are +"visible". Hence, if your ``$prefix/lib/openmpi/`` is a directory on a +local disk that is not shared via a network filesystem to other nodes +where you run MPI jobs, then components that are installed to that +directory will *only* be used by MPI jobs running on the local node. + +More specifically: components have the same visibility as normal +files. If you need a component to be available to all nodes where you +run MPI jobs, then you need to ensure that it is visible on all nodes +(typically either by installing it on all nodes for non-networked +filesystem installs, or by installing them in a directory that is +visibile to all nodes via a networked filesystem). Open MPI does not +automatically send components to remote nodes when MPI jobs are run. + +///////////////////////////////////////////////////////////////////////// + +How do I know what MCA parameters are available? +------------------------------------------------ + +The ``ompi_info`` command can list the parameters for a given +component, all the parameters for a specific framework, or all +parameters. Most parameters contain a description of the parameter; +all will show the parameter's current value. + +For example, the following shows all the MCA parameters for all +components that ``ompi_info`` finds: + +.. code-block:: sh + + # Starting with Open MPI v1.7, you must use "--level 9" to see + # all the MCA parameters (the default is "--level 1"): + shell$ ompi_info --param all all --level 9 + + # Before Open MPI v1.7, the "--level" command line options + # did not exist; do not use it. + shell$ ompi_info --param all all + +This example shows all the MCA parameters for all BTL components that +``ompi_info`` finds: + +.. code-block:: sh + + # All remaining examples assume Open MPI v1.7 or later (i.e., + # they assume the use of the "--level" command line option) + shell$ ompi_info --param btl all --level 9 + +This example shows all the MCA parameters for the TCP BTL component: + +.. code-block:: sh + + shell$ ompi_info --param btl tcp --level 9 + +///////////////////////////////////////////////////////////////////////// + +.. _faq-general-tuning-setting-mca-params: + +How do I set the value of MCA parameters? +----------------------------------------- + +There are multiple ways to set MCA parameters, each of which are +listed below, and are resolved in the following priority order: + +#. **Command line:** The highest-precedence method is setting MCA + parameters on the command line. For example: + + .. code-block:: sh + + shell$ mpirun --mca mpi_show_handle_leaks 1 -np 4 a.out + + This sets the MCA parameter ``mpi_show_handle_leaks`` to the value + of 1 before running ``a.out`` with four processes. In general, the + format used on the command line is ``--mca ``. + + Note that when setting multi-word values, you need to use quotes to + ensure that the shell and Open MPI understand that they are a + single value. For example: + + .. code-block:: sh + + shell$ mpirun --mca param "value with multiple words" ... + +#. **Environment variable:** Next, environment variables are searched. + Any environment variable named ``OMPI_MCA_`` will be + used. For example, the following has the same effect as the + previous example (for sh-flavored shells): + + .. code-block:: sh + + shell$ OMPI_MCA_mpi_show_handle_leaks=1 + shell$ export OMPI_MCA_mpi_show_handle_leaks + shell$ mpirun -np 4 a.out + + Note that setting environment variables to values with multiple words + requires quoting, such as: + + .. code-block:: sh + + shell$ OMPI_MCA_param="value with multiple words" + +#. **Tuning MCA parameter files:** Simple text files can be used to + set MCA parameter values for a specific application. :ref:`See this FAQ + entry for more details `. + +#. **Aggregate MCA parameter files:** Simple text files can be used to + set MCA parameter values for a specific application. :ref:`See this FAQ + entry for more details `. + + .. warning:: The use of AMCA param files is deprecated. + +#. **Files:** Finally, simple text files can be used to set MCA + parameter values. Parameters are set one per line (comments are + permitted). For example: + + .. code-block:: ini + + # This is a comment + # Set the same MCA parameter as in previous examples + mpi_show_handle_leaks = 1 + + Note that quotes are *not* necessary for setting multi-word values + in MCA parameter files. Indeed, if you use quotes in the MCA + parameter file, they will be used as part of the value itself. For + example: + + .. code-block:: ini + + # The following two values are different: + param1 = value with multiple words + param2 = "value with multiple words" + + By default, two files are searched (in order): + + #. ``$HOME/.openmpi/mca-params.conf``: The user-supplied set of + values takes the highest precedence. + #. ``$prefix/etc/openmpi-mca-params.conf``: The system-supplied set + of values has a lower precedence. + + More specifically, the MCA parameter ``mca_param_files`` specifies + a colon-delimited path of files to search for MCA parameters. + Files to the left have lower precedence; files to the right are + higher precedence. + + .. note:: Keep in mind that, just like components, these parameter + files are *only* relevant where they are "visible" + (:ref:`see this FAQ entry + `). Specifically, + Open MPI does not read all the values from these files + during startup and then send them to all nodes in the job + |mdash| the files are read on each node during each + process' startup. This is intended behavior: it allows + for per-node customization, which is especially relevant + in heterogeneous environments. + +///////////////////////////////////////////////////////////////////////// + +.. _faq-general-tuning-amca-param-files: + +What are Aggregate MCA (AMCA) parameter files? +---------------------------------------------- + +.. error:: TODO This entire entry needs to be checked for correctness. + Are AMCA files actually deprecated? + +.. warning:: The use of AMCA param files is still available in Open + MPI |ompi_ver|, but is deprecated, and may disappear + in a future version of Open MPI. + +Aggregate MCA (AMCA) parameter files contain MCA parameter key/value +pairs similar to the ``$HOME/.openmpi/mca-params.conf`` file described +in :ref:`this FAQ entry `. + +The motivation behind AMCA parameter sets came from the realization +that certain applications require a large number of MCA parameters are +to run well and/or execute as the user expects. Since these MCA +parameters are application-specific (or even application-run-specific) +they should not be set in a global manner, but only pulled in as +determined by the user. + +MCA parameters set in AMCA parameter files will override any MCA +parameters supplied in global parameter files (e.g., +``$HOME/.openmpi/mca-params.conf``), but not command line or +environment parameters. + +AMCA parameter files are typically supplied on the command line via +the ``--am`` option. + +For example, consider an AMCA parameter file called ``foo.conf`` +placed in the same directory as the application ``a.out``. A user +will typically run the application as: + +.. code-block:: sh + + shell$ mpirun -np 2 a.out + +To use the ``foo.conf`` AMCA parameter file, this command line +changes to: + +.. code-block:: sh + + shell$ mpirun -np 2 --am foo.conf a.out + +If the user wants to override a parameter set in ``foo.conf`` they +can add it to the command line: + +.. code-block:: sh + + shell$ mpirun -np 2 --am foo.conf --mca btl tcp,self a.out + +AMCA parameter files can be coupled if more than one file is to be +used. If we have another AMCA parameter file called ``bar.conf`` +that we want to use, we add it to the command line as follows: + +.. code-block:: sh + + shell$ mpirun -np 2 --am foo.conf:bar.conf a.out + +AMCA parameter files are loaded in priority order. This means that +``foo.conf`` AMCA file has priority over the ``bar.conf`` file. So +if the ``bar.conf`` file sets the MCA parameter +``mpi_leave_pinned=0`` and the ``foo.conf`` file sets this MCA +parameter to ``mpi_leave_pinned=1`` then the latter will be used. + +The location of AMCA parameter files are resolved in a similar way as +the shell: + +#. If no path operator is provided (i.e., ``foo.conf``), then + Open MPI will search the ``$sysconfdir/amca-param-sets`` directory, + then the current working directory. +#. If a relative path is specified, then only that path will be + searched (e.g., ``./foo.conf``, ``baz/foo.conf``). +#. If an absolute path is specified, then only that path will be + searched (e.g., ``/bip/boop/foo.conf``). + +Although the typical use case for AMCA parameter files is to be +specified on the command line, they can also be set as MCA parameters +in the environment. The MCA parameter ``mca_base_param_file_prefix`` +contains a ``:``-delimited list of AMCA parameter files exactly as +they would be passed to the ``--am`` command line option. The MCA +parameter ``mca_base_param_file_path`` specifies the path to search +for AMCA files with relative paths. By default this is +``$sysconfdir/amca-param-sets/:$CWD``. + +///////////////////////////////////////////////////////////////////////// + +.. _faq-general-tuning-tune-param-files: + +How do I set application specific environment variables in global parameter files? +---------------------------------------------------------------------------------- + +.. error:: TODO This entire entry needs to be checked for correctness. + +The ``mpirun`` ``--tune`` CLI options allows users to specify both MCA +parameters and environment variables from within a single file. + +MCA parameters set in tuned parameter files will override any MCA +parameters supplied in global parameter files (e.g., +``$HOME/.openmpi/mca-params.conf``), but not command line or +environment parameters. + +Tuned parameter files are typically supplied on the command line via +the ``--tune`` option. + +For example, consider an tuned parameter file called ``foo.conf`` +placed in the same directory as the application ``a.out``. A user +will typically run the application as: + +.. code-block:: sh + + shell$ mpirun -np 2 a.out + +To use the ``foo.conf`` tuned parameter file, this command line +changes to: + +.. code-block:: sh + + shell$ mpirun -np 2 --tune foo.conf a.out + +Tuned parameter files can be coupled if more than one file is to be +used. If we have another tuuned parameter file called ``bar.conf`` +that we want to use, we add it to the command line as follows: + +.. code-block:: sh + + shell$ mpirun -np 2 --tune foo.conf,bar.conf a.out + + +The contents of tuned files consist of one or more lines, each of +which contain zero or more `-x` and `--mca` options. Comments are not +allowed. For example, the following tuned file: + +.. code-block:: + + -x envvar1=value1 -mca param1 value1 -x envvar2 + -mca param2 value2 + -x envvar3 + +is equivalent to: + +.. code-block:: sh + + shell$ mpirun \ + -x envvar1=value1 -mca param1 value1 -x envvar2 \ + -mca param2 value2 + -x envvar3 \ + ...rest of mpirun command line... + +Although the typical use case for tuned parameter files is to be +specified on the command line, they can also be set as MCA parameters +in the environment. The MCA parameter ``mca_base_envvar_file_prefix`` +contains a ``,``-delimited list of tuned parameter files exactly as +they would be passed to the ``--tune`` command line option. The MCA +parameter ``mca_base_envvar_file_path`` specifies the path to search +for tune files with relative paths. + +.. error:: TODO Check that these MCA var names ^^ are correct. + +///////////////////////////////////////////////////////////////////////// + +How do I select which components are used? +------------------------------------------ + +Each MCA framework has a top-level MCA parameter that helps guide +which components are selected to be used at run-time. Specifically, +there is an MCA parameter of the same name as each MCA framework that +can be used to *include* or *exclude* components from a given run. + +For example, the ``btl`` MCA parameter is used to control which BTL +components are used (e.g., MPI point-to-point communications; +:doc:`see the MCA frameworks listing ` for a full +listing). It can take as a value a comma-separated list of components +with the optional prefix ``^``. For example: + +.. code-block:: sh + + # Tell Open MPI to exclude the tcp and uct BTL components + # and implicitly include all the rest + shell$ mpirun --mca btl ^tcp,uct ... + + # Tell Open MPI to include *only* the components listed here and + # implicitly ignore all the rest (i.e., the loopback, shared memory, + # etc.) MPI point-to-point components): + shell$ mpirun --mca btl self,sm,usnic ... + +Note that ``^`` can *only* be the prefix of the entire value because +the inclusive and exclusive behavior are mutually exclusive. +Specifically, since the exclusive behavior means "use all components +*except* these", it does not make sense to mix it with the inclusive +behavior of not specifying it (i.e., "use all of these components"). +Hence, something like this: + +.. code-block:: sh + + shell$ mpirun --mca btl self,sm,usnic,^tcp ... + +does not make sense because it says both "use only the ``self``, ``sm``, +and ``usnic`` components" and "use all components except ``tcp``" and +will result in an error. + +Just as with all MCA parameters, the ``btl`` parameter (and all +framework parameters) :ref:`can be set in multiple ways +`. + +///////////////////////////////////////////////////////////////////////// + +What is processor affinity? Does Open MPI support it? +------------------------------------------------------ + +Open MPI supports processor affinity on a variety of systems through +process binding, in which each MPI process, along with its threads, is +"bound" to a specific subset of processing resources (cores, packages, +etc.). That is, the operating system will constrain that process to +run on only that subset. + +.. note:: The operating system may allow other processes to run on the + same resources. + +Affinity can improve performance by inhibiting excessive process +movement |mdash| for example, away from "hot" caches or NUMA memory. +Judicious bindings can improve performance by reducing resource +contention (by spreading processes apart from one another) or +improving interprocess communications (by placing processes close to +one another). Binding can also improve performance reproducibility by +eliminating variable process placement. + +.. warning:: Processor affinity probably should *not* be used when a + node is over-subscribed (i.e., more processes are + launched than there are processors). + + This can lead to a serious degradation in performance + (even more than simply oversubscribing the node). Open + MPI will usually detect this situation and automatically + disable the use of processor affinity (and display + run-time warnings to this effect). + +///////////////////////////////////////////////////////////////////////// + +What is memory affinity? Does Open MPI support it? +--------------------------------------------------- + +Memory affinity is increasingly relevant on modern servers +because most architectures exhibit Non-Uniform Memory Access (NUMA) +architectures. In a NUMA architecture, memory is physically +distributed throughout the machine even though it is virtually treated +as a single address space. That is, memory may be physically local to +one or more processors |mdash| and therefore remote to other processors. + +Simply put: some memory will be faster to access (for a given process) +than others. + +Open MPI supports general and specific memory affinity, meaning that +it generally tries to allocate all memory local to the processor that +asked for it. When shared memory is used for communication, Open MPI +uses memory affinity to make certain pages local to specific +processes in order to minimize memory network/bus traffic. + +Open MPI supports memory affinity on a variety of systems. + +In recent versions of Open MPI, memory affinity is controlled through +the `Hardware Locality (hwloc) +`_ library. + +Note that memory affinity support is enabled +*only when processor affinity is enabled.* Specifically: using memory +affinity does not make sense if processor affinity is not enabled +because processes may allocate local memory and then move to a +different processor, potentially remote from the memory that it just +allocated. + +///////////////////////////////////////////////////////////////////////// + +How do I tell Open MPI to use processor and/or memory affinity? +--------------------------------------------------------------- + +Open MPI will, by default, enable processor and memory affinty when +not running in an oversubscribed environment (i.e., when the number of +MPI processes are less than or equal two the number of processors +available). + +The ``mpirun(1)`` man page for each version of Open MPI contains a lot of +information about the use of processor and memory affinity. You +should consult the ``mpirun(1)`` page for your version of Open MPI for +detailed information about processor/memory affinity. + +.. error:: TODO Link to mpirun(1) ...? + +///////////////////////////////////////////////////////////////////////// + +Does Open MPI support calling fork(), system(), or popen() in MPI processes? +---------------------------------------------------------------------------- + +It depends on a lot of factors, including (but not limited to): + +* The operating system +* The underlying compute hardware +* The network stack +* Interactions with other middleware in the MPI process + +In some cases, Open MPI will determine that it is not safe to +``fork()``. In these cases, Open MPI will register a +``pthread_atfork()`` callback to print a warning when the process +forks. + +This warning is helpful for legacy MPI applications where the current +maintainers are unaware that ``system()`` or ``popen()`` is being invoked from +an obscure subroutine nestled deep in millions of lines of Fortran code +(we've seen this kind of scenario many times). + +However, this atfork handler can be dangerous because there is no way +to *unregister* an atfork handler. Hence, packages that +dynamically open Open MPI's libraries (e.g., Python bindings for Open +MPI) may fail if they finalize and unload libmpi, but later call +fork. The atfork system will try to invoke Open MPI's atfork handler; +nothing good can come of that. + +For such scenarios, or if you simply want to disable printing the +warning, Open MPI can be set to never register the atfork handler with +the ``mpi_warn_on_fork`` MCA parameter. For example: + +.. code-block:: sh + + shell$ mpirun --mca mpi_warn_on_fork 0 ... + +Of course, systems that ``dlopen("libmpi.so", ...)`` may not use Open +MPI's ``mpirun``, and therefore may need to use :ref:`a different +mechanism to set MCA parameters +`. + +///////////////////////////////////////////////////////////////////////// + +I want to run some performance benchmarks with Open MPI. How do I do that? +--------------------------------------------------------------------------- + +Running benchmarks is an extremely difficult task to do correctly. +There are many, many factors to take into account; it is *not* as +simple as just compiling and running a stock benchmark application. +This documentation is by no means a definitive guide, but it does try +to offer some suggestions for generating accurate, meaningful +benchmarks. + +#. Decide *exactly* what you are benchmarking and setup your system + accordingly. For example, if you are trying to benchmark maximum + performance, then many of the suggestions listed below are + extremely relevant (be the only user on the systems and network in + question, be the only software running, use processor affinity, + etc.). If you're trying to benchmark average performance, some of + the suggestions below may be less relevant. Regardless, it is + critical to *know* exactly what you're trying to benchmark, and + *know* (not guess) both your system and the benchmark application + itself well enough to understand what the results mean. + + To be specific, many benchmark applications are not well understood + for exactly what they are testing. There have been many cases + where users run a given benchmark application and wrongfully + conclude that their system's performance is bad |mdash| solely on + the basis of a single benchmark that they did not understand. Read + the documentation of the benchmark carefully, and possibly even + look into the code itself to see exactly what it is testing. + + Case in point: not all ping-pong benchmarks are created equal. + Most users assume that a ping-pong benchmark is a ping-pong + benchmark is a ping-pong benchmark. But this is not true; the + common ping-pong benchmarks tend to test subtly different things + (e.g., NetPIPE, TCP bench, IMB, OSU, etc.). *Make sure you + understand what your benchmark is actually testing.* + +#. Make sure that you are the *only* user on the systems where you are + running the benchmark to eliminate contention from other + processes. + +#. Make sure that you are the *only* user on the entire network / + interconnect to eliminate network traffic contention from other + processes. This is usually somewhat difficult to do, especially in + larger, shared systems. But your most accurate, repeatable results + will be achieved when you are the only user on the entire network. + +#. Disable all services and daemons that are not being used. Even + "harmless" daemons consume system resources (such as RAM) and cause + "jitter" by occasionally waking up, consuming CPU cycles, reading + or writing to disk, etc. The optimum benchmark system has an + absolute minimum number of system services running. + +#. Ensure that processor and memory affinity are properly utilized to + disallow the operating system from swapping MPI processes between + processors (and causing unnecessary cache thrashing, for example). + + .. warning:: On NUMA architectures, having the processes getting + bumped from one socket to another is more expensive in + terms of cache locality (with all of the cache + coherency overhead that comes with the lack of it) + than in terms of memory transfer routing (see below). + +#. Be sure to understand your system's architecture, particularly with + respect to the memory, disk, and network characteristics, and test + accordingly. For example, on NUMA architectures, memory accesses + may be routed through a memory interconnect; remote device and/or + memory accesses will be noticeably slower than local device and/or + memory accesses. + +#. Compile your benchmark with the appropriate compiler optimization + flags. With some MPI implementations, the compiler wrappers (like + ``mpicc``, ``mpifort``, etc.) add optimization flags + automatically. Open MPI does not. Add ``-O`` or other flags + explicitly. + +#. Make sure your benchmark runs for a sufficient amount of time. + Short-running benchmarks are generally less accurate because they + take fewer samples; longer-running jobs tend to take more samples. + +#. If your benchmark is trying to benchmark extremely short events + (such as the time required for a single ping-pong of messages): + + * Perform some "warmup" events first. Many MPI implementations + (including Open MPI) |mdash| and other subsystems upon which the + MPI uses |mdash| may use "lazy" semantics to setup and maintain + streams of communications. Hence, the first event (or first few + events) may well take significantly longer than subsequent + events. + + * Use a high-resolution timer if possible |mdash| + ``gettimeofday()`` only returns millisecond precision (sometimes + on the order of several microseconds). + + * Run the event many, many times (hundreds or thousands, depending + on the event and the time it takes). Not only does this provide + more samples, it may also be necessary, especially when the + precision of the timer you're using may be several orders of + magnitude less precise than the event you're trying to + benchmark. + +#. Decide whether you are reporting minimum, average, or maximum + numbers, and have good reasons why. + +#. Accurately label and report all results. Reproducibility is a + major goal of benchmarking; benchmark results are effectively + useless if they are not precisely labeled as to exactly what they + are reporting. Keep a log and detailed notes about the ''exact'' + system configuration that you are benchmarking. Note, for example, + all hardware and software characteristics (to include hardware, + firmware, and software versions as appropriate). + +///////////////////////////////////////////////////////////////////////// + +I am getting a MPI_WIN_FREE error from IMB-EXT |mdash| what do I do? +-------------------------------------------------------------------- + +When you run IMB-EXT with Open MPI, you'll see a +message like this: + +.. code-block:: + + [node01.example.com:2228] *** An error occurred in MPI_Win_free + [node01.example.com:2228] *** on win + [node01.example.com:2228] *** MPI_ERR_RMA_SYNC: error while executing rma sync + [node01.example.com:2228] *** MPI_ERRORS_ARE_FATAL (your MPI job will now abort) + +This is due to a bug in the Intel MPI Benchmarks, known to be in at +least versions v3.1 and v3.2. Intel was notified of this bug in May +of 2009. If you have a version after then, the bug should be fixed. +If not, here is the fix that you can apply to the IMB-EXT source code +yourself. + +Here is a small patch that fixes the bug in IMB v3.2: + +.. code-block:: diff + + diff -u imb-3.2-orig/src/IMB_window.c imb-3.2-fixed/src/IMB_window.c + --- imb-3.2-orig/src/IMB_window.c 2008-10-21 04:17:31.000000000 -0400 + +++ imb-3.2-fixed/src/IMB_window.c 2009-07-20 09:02:45.000000000 -0400 + @@ -140,6 +140,9 @@ + c_info->rank, 0, 1, c_info->r_data_type, + c_info->WIN); + MPI_ERRHAND(ierr); + } + + /* Added a call to MPI_WIN_FENCE, per MPI-2.1 11.2.1 */ + + ierr = MPI_Win_fence(0, c_info->WIN); + + MPI_ERRHAND(ierr); + ierr = MPI_Win_free(&c_info->WIN); + MPI_ERRHAND(ierr); + } + +And here is the corresponding patch for IMB v3.1: + +.. code-block:: diff + + Index: IMB_3.1/src/IMB_window.c + =================================================================== + --- IMB_3.1/src/IMB_window.c(revision 1641) + +++ IMB_3.1/src/IMB_window.c(revision 1642) + @@ -140,6 +140,10 @@ + c_info->rank, 0, 1, c_info->r_data_type, c_info->WIN); + MPI_ERRHAND(ierr); + } + + /* Added a call to MPI_WIN_FENCE here, per MPI-2.1 + + 11.2.1 */ + + ierr = MPI_Win_fence(0, c_info->WIN); + + MPI_ERRHAND(ierr); + ierr = MPI_Win_free(&c_info->WIN); + MPI_ERRHAND(ierr); + } diff --git a/docs/faq/index.rst b/docs/faq/index.rst new file mode 100644 index 00000000000..d2109311b68 --- /dev/null +++ b/docs/faq/index.rst @@ -0,0 +1,31 @@ +.. Open MPI FAQ + + This page will likely eventually go away (i.e., the "FAQ" section + will likely disappear -- its contents will likely just be folded in + elsewhere in the document). + + +Frequently Asked Questions (FAQ) +================================ + +The pages below include questions that we are asked frequently enough +that they are worth categorizing in an official way. + +.. toctree:: + :maxdepth: 1 + + supported-systems + sysadmin + building-open-mpi + running-mpi-apps + fault-tolerance + troubleshooting + + debugging + large-clusters + + ompio + macos + + tuning + general-tuning diff --git a/docs/faq/large-clusters.rst b/docs/faq/large-clusters.rst new file mode 100644 index 00000000000..80db5a0930a --- /dev/null +++ b/docs/faq/large-clusters.rst @@ -0,0 +1,202 @@ +Large Clusters +============== + +.. TODO How can I create a TOC just for this page here at the top? + +///////////////////////////////////////////////////////////////////////// + +How do I reduce startup time for jobs on large clusters? +-------------------------------------------------------- + +There are several ways to reduce the startup time on large +clusters. Some of them are described on this page. We continue to work +on making startup even faster, especially on the large clusters coming +in future years. + +Open MPI |ompi_ver| is significantly faster and more robust than its +predecessors. We recommend that anyone running large jobs and/or on +large clusters make the upgrade to the |ompi_series| series. + +Several major launch time enhancements have been made starting with the +v3.0 release. Most of these take place in the background |mdash| i.e., there +is nothing you (as a user) need do to take advantage of them. However, +there are a few that are left as options until we can assess any potential +negative impacts on different applications. + +Some options are available when launching via ``mpirun`` or when launching using +the native resource manager launcher (e.g., ``srun`` in a Slurm environment). +These are activated by setting the corresponding MCA parameter, and include: + +* Setting the ``pmix_base_async_modex`` MCA parameter will eliminate a + global out-of-band collective operation during ``MPI_INIT``. This + operation is performed in order to share endpoint information prior + to communication. At scale, this operation can take some time and + scales at best logarithmically. Setting the parameter bypasses the + operation and causes the system to lookup the endpoint information + for a peer only at first message. Thus, instead of collecting + endpoint information for all processes, only the endpoint + information for those processes this peer communicates with will be + retrieved. The parameter is especially effective for applications + with sparse communication patterns |mdash| i.e., where a process + only communicates with a few other peers. Applications that use + dense communication patterns (i.e., where a peer communicates + directly to all other peers in the job) will probably see a negative + impact of this option. + + .. note:: This option is only available in PMIx-supporting + environments, or when launching via ``mpirun`` + +* The ``async_mpi_init`` parameter is automatically set to ``true`` + when the ``pmix_base_async_modex`` parameter has been set, but can + also be independently controlled. When set to ``true``, this parameter + causes ``MPI_Init`` to skip an out-of-band barrier operation at the end + of the procedure that is not required whenever direct retrieval of + endpoint information is being used. + +* Similarly, the ``async_mpi_finalize`` parameter skips an out-of-band + barrier operation usually performed at the beginning of + ``MPI_FINALIZE``. Some transports (e.g., the ``usnic`` BTL) require this + barrier to ensure that all MPI messages are completed prior to + finalizing, while other transports handle this internally and thus + do not require the additional barrier. Check with your transport + provider to be sure, or you can experiment to determine the proper + setting. + +///////////////////////////////////////////////////////////////////////// + +.. _faq-large-clusters-network-vs-local: + +Where should I put my libraries: Network vs. local filesystems? +--------------------------------------------------------------- + +Open MPI itself doesn't really care where its libraries and plugins +are stored. However, where they are stored does have an impact on +startup times, particularly for large clusters, which can be mitigated +somewhat through use of Open MPI's configuration options. + +Startup times will always be minimized by storing the libraries and +plugins local to each node, either on local disk or in ramdisk. The +latter is sometimes problematic since the libraries do consume some +space, thus potentially reducing memory that would have been available +for MPI processes. + +There are two main considerations for large clusters that need to +place the Open MPI libraries on networked file systems: + +* While dynamic shared objects ("DSO") are more flexible, you + definitely do *not* want to use them when the Open MPI libraries + will be mounted on a network file system! Doing so will lead to + significant network traffic and delayed start times, especially on + clusters with a large number of nodes. Instead, be sure to configure + your build with ``--disable-dlopen``. This will include the DSO's in + the main libraries, resulting in much faster startup times. + +* Many networked file systems use automount for user level + directories, as well as for some locally administered system + directories. There are many reasons why system administrators may + choose to automount such directories. MPI jobs, however, tend to + launch very quickly, thereby creating a situation wherein a large + number of nodes will nearly simultaneously demand automount of a + specific directory. This can overload NFS servers, resulting in + delayed response or even failed automount requests. + + Note that this applies to both automount of directories containing + Open MPI libraries as well as directories containing user + applications. Since these are unlikely to be the same location, + multiple automount requests from each node are possible, thus + increasing the level of traffic. + +///////////////////////////////////////////////////////////////////////// + +Static vs. shared libraries? +---------------------------- + +It is perfectly fine to use either shared or static +libraries. Shared libraries will save memory when operating multiple +processes per node, especially on clusters with high numbers of cores +on a node, but can also take longer to launch on networked file +systems. + +.. note:: Be sure to also see :ref:`this FAQ entry about network + vs. local storage ` for + suggestions on how to mitigate such problems. + +///////////////////////////////////////////////////////////////////////// + +How do I reduce the time to wireup OMPI's out-of-band communication system? +--------------------------------------------------------------------------- + +Open MPI's run-time uses an *out-of-band* (OOB) communication +subsystem to pass messages during the launch, initialization, and +termination stages for the job. These messages allow ``mpirun`` to tell +its daemons what processes to launch, and allow the daemons in turn to +forward stdio to ``mpirun``, update ``mpirun`` on process status, etc. + +The OOB uses TCP sockets for its communication, with each daemon +opening a socket back to ``mpirun`` upon startup. In a large cluster, +this can mean thousands of connections being formed on the node where +``mpirun`` resides, and requires that ``mpirun`` actually process all +these connection requests. ``mpirun`` defaults to processing +connection requests sequentially |mdash| so on large clusters, a +backlog can be created that can cause remote daemons to timeout +waiting for a response. + +Fortunately, Open MPI provides an alternative mechanism for processing +connection requests that helps alleviate this problem. Setting the MCA +parameter ``oob_tcp_listen_mode`` to ``listen_thread`` causes +``mpirun`` to startup a separate thread dedicated to responding to +connection requests. Thus, remote daemons receive a quick response to +their connection request, allowing ``mpirun`` to deal with the message +as soon as possible. + +.. error:: TODO This seems very out of date. We should have content + about PMIx instant on. + +This parameter can be included in the default MCA parameter file, +placed in the user's environment, or added to the ``mpirun`` command +line. See :ref:`this FAQ entry ` +for more details on how to set MCA parameters. + +///////////////////////////////////////////////////////////////////////// + +I know my cluster's configuration - how can I take advantage of that knowledge? +------------------------------------------------------------------------------- + +Clusters rarely change from day-to-day, and large clusters rarely +change at all. If you know your cluster's configuration, there are +several steps you can take to both reduce Open MPI's memory footprint +and reduce the launch time of large-scale applications. These steps +use a combination of build-time configuration options to eliminate +components |mdash| thus eliminating their libraries and avoiding +unnecessary component open/close operations |mdash| as well as +run-time MCA parameters to specify what modules to use by default for +most users. + +One way to save memory is to avoid building components that will +actually never be selected by the system. Unless MCA parameters +specify which components to open, built components are always opened +and tested as to whether or not they should be selected for use. If +you know that a component can build on your system, but due to your +cluster's configuration will never actually be selected, then it is +best to simply configure OMPI to not build that component by using the +``--enable-mca-no-build`` configure option. + +For example, if you know that your system will only utilize the +``ob1`` component of the PML framework, then you can ``no_build`` all +the others. This not only reduces memory in the libraries, but also +reduces memory footprint that is consumed by Open MPI opening all the +built components to see which of them can be selected to run. + +In some cases, however, a user may optionally choose to use a +component other than the default. For example, you may want to build +all of the PRRTE ``routed`` framework components, even though the vast +majority of users will simply use the default ``debruijn`` +component. This means you have to allow the system to build the other +components, even though they may rarely be used. + +You can still save launch time and memory, though, by setting the +``routed=debruijn`` MCA parameter in the default MCA parameter file. +This causes OMPI to not open the other components during startup, but +allows users to override this on their command line or in their +environment so no functionality is lost |mdash| you just save some +memory and time. diff --git a/docs/faq/macos.rst b/docs/faq/macos.rst new file mode 100644 index 00000000000..ade27ff1c53 --- /dev/null +++ b/docs/faq/macos.rst @@ -0,0 +1,104 @@ +MacOS +===== + +.. TODO How can I create a TOC just for this page here at the top? + +///////////////////////////////////////////////////////////////////////// + +How does Open MPI handle HFS+ / UFS / AFP filesystems? +------------------------------------------------------ + +Generally, Open MPI does not care whether it is running from an Apple +filesystem. However, the C++ wrapper compiler historically has been +called ``mpiCC``, which of course is the same file as ``mpicc`` when +running on a case-insensitive filesystem such as HFS+. + +During the ``configure`` process, Open MPI will attempt to determine +if the build filesystem is case sensitive or not, and assume the +install file system is the same way. Generally, this is all that is +needed to deal with HFS+. + +You can force ``configure`` to build for a case-sensitive filesystem +by using the ``--with-cs-fs`` CLI option, or force ``configure`` to +build for a case insensitive filesystem by using ``--without-cs-fs``. + +///////////////////////////////////////////////////////////////////////// + +How do I use the Open MPI wrapper compilers in XCode? +----------------------------------------------------- + +XCode has a non-public interface for adding compilers to XCode. A +friendly Open MPI user sent in a configuration file for XCode 2.3 +``MPICC.pbcompspec``, which will add support for the +Open MPI wrapper compilers. + +Create a file named ``/Library/Application Support/Apple/Developer +Tools/Specifications/MPICC.pbcompspec`` in a text editor and put the +following content in it: + +.. code-block:: + + /** + Xcode Compiler Specification for MPICC + */ + + { + Type = Compiler; + Identifier = com.apple.compilers.mpicc; + BasedOn = com.apple.compilers.gcc.4_0; + Name = "MPICC"; + Version = "Default"; + Description = "MPI GNU C/C++ Compiler 4.0"; + ExecPath = "/usr/local/bin/mpicc"; // This gets converted to the g++ variant automatically + PrecompStyle = pch; + } + +Upon starting XCode, this file is loaded and added to the list of +known compilers. + +.. warning:: This file has not been tested since XCode 2.3. YMMV. + +To use the ``mpicc`` compiler: open the project, get info on the +target, click the rules tab, and add a new entry. Change the process +rule for "C source files" and select "using MPICC". + +Before moving the file, the ``ExecPath`` parameter should be set +to the location of the Open MPI install. The ``BasedOn`` parameter +should be updated to refer to the compiler version that ``mpicc`` +will invoke — generally ``gcc-4.0`` on OS X 10.4 machines. + +Thanks to Karl Dockendorf for this information. + +///////////////////////////////////////////////////////////////////////// + +How do I get Open MPI for MacOS? +-------------------------------- + +There are two main options for installing Open MPI on MacOS: + +#. Use a package manager, such as `Homebrew `_ or + `MacPorts `_. For example: + + .. code-block:: sh + + # For Homebrew + shell$ brew install openmpi + + # For MacPorts + shell$ port install openmpi + +#. Install Open MPI from source. :doc:`See the installation section + of this documentation ` for more details. + + .. warning:: Ensure to install a Fortran compiler if you want Open + MPI to build the Fortran MPI interfaces. For + simplicity, the Open MPI team recommends using + Homebrew or MacPorts to install a Fortran compiler. + +///////////////////////////////////////////////////////////////////////// + +I'm getting weird messages about filenames that are too long +------------------------------------------------------------ + +.. error:: TODO Find out specific information about how MacOS's tmpdir + is very long, and macOS users may need to redefine it. diff --git a/docs/faq/ompio.rst b/docs/faq/ompio.rst new file mode 100644 index 00000000000..d17f4c5eec8 --- /dev/null +++ b/docs/faq/ompio.rst @@ -0,0 +1,330 @@ +Open MPI IO ("OMPIO") +===================== + +.. TODO How can I create a TOC just for this page here at the top? + +///////////////////////////////////////////////////////////////////////// + +What is the OMPIO? +------------------ + +OMPIO is an implementation of the MPI I/O functions defined in version +two of the Message Passing Interface specification. The main goals of +OMPIO are: + +#. Increase the modularity of the parallel I/O library by separating + MPI I/O functionality into sub-frameworks. + +#. Allow frameworks to utilize different run-time decision algorithms + to determine which module to use in a particular scenario, enabling + non-file-system-specific decisions. + +#. Improve the integration of parallel I/O functions with other + components of Open MPI, most notably the derived data type engine + and the progress engine. The integration with the derived data type + engine allows for faster decoding of derived data types and the + usage of optimized data type to data type copy operations. + +OMPIO is fundamentally a component of the ``io`` framework in Open +MPI. Upon opening a file, the OMPIO component initializes a number of +sub-frameworks and their components, namely: + +* ``fs``: responsible for all file management operations +* ``fbtl``: support for individual blocking and non-blocking + I/O operations +* ``fcoll``: support for collective blocking and non-blocking I/O + operations +* ``sharedfp``: support for all shared file pointer operations. + +///////////////////////////////////////////////////////////////////////// + +How can I use OMPIO? +-------------------- + +OMPIO is included in Open MPI and is used by default when invoking +MPI IO API functions. + +///////////////////////////////////////////////////////////////////////// + +How do I know what MCA parameters are available for tuning the performance of OMPIO? +------------------------------------------------------------------------------------ + +The ``ompi_info`` command can display all the parameters available for the +OMPIO ``io``, ``fcoll``, ``fs``, and ``sharedfp`` components: + +.. code-block:: sh + + shell$ ompi_info --param io ompio + shell$ ompi_info --param fcoll all + shell$ ompi_info --param fs all + shell$ ompi_info --param sharedfp all + +///////////////////////////////////////////////////////////////////////// + +How can I choose the right component for a sub-framework of OMPIO? +------------------------------------------------------------------ + +The OMPIO architecture is designed around sub-frameworks, which allow +you to develop a relatively small amount of code optimized for a +particular environment, application, or infrastructure. Although +significant efforts have been invested into making good decisions for +default values and switching points between components, users and/or +system administrators might occasionally want to tune the selection +logic of the components and force the utilization of a particular +component. + +The simplest way to force the usage of a component is to simply +restrict the list of available components for that framework. For +example, an application wanting to use the ``dynamic`` ``fcoll`` +component simply has to pass the name of the component as a value to +the corresponding MCA parameter during ``mpirun`` or any other +mechanism available in Open MPI to influence a parameter value, e.g.: + +.. code-block:: sh + + shell$ mpirun --mca fcoll dynamic -np 64 ./a.out + +``fs`` and ``fbtl`` components are typically chosen based on the file +system type utilized (e.g. the ``pvfs2`` component is chosen when the +file is located on an PVFS2 file system, the ``lustre`` component is +chosen for Lustre file systems, etc.). + +The ``fcoll`` framework provides several different implementations, +which provide different levels of data reorganization across +processes. ``two_phase``, ``dynamic`` segmentation, ``static`` +segmentation and ``individual`` provide decreasing communication costs +during the shuffle phase of the collective I/O operations (in the +order listed here), but provide also decreasing contiguity guarantuees +of the data items before the aggregators read/write data to/from the +file. The current decision logic in OMPIO is using the file view +provided by the application as well as file system level +characteristics (stripe width of the file system) in the selection +logic of the fcoll framework. + +The ``sharedfp`` framework provides a different implementation of the +shared file pointer operations depending on file system features, such +as: + +* ``lockfile``: support for file locking. +* ``sm``: locality of the MPI processes in the communicator that has + been used to open the file. +* ``individual``: guarantees by the application on using only a subset + of the available functionality (i.e. write operations only). + +///////////////////////////////////////////////////////////////////////// + +How can I tune OMPIO parameters to improve performance? +------------------------------------------------------- + +The most important parameters influencing the performance of an I/O +operation are listed below: + +#. ``io_ompio_cycle_buffer_size``: Data size issued by individual + reads/writes per call. By default, an individual read/write + operation will be executed as one chunk. Splitting the operation up + into multiple, smaller chunks can lead to performance improvements + in certain scenarios. + +#. ``io_ompio_bytes_per_agg``: Size of temporary buffer for collective + I/O operations on aggregator processes. Default value is 32MB. + Tuning this parameter has a very high impact on the performance of + collective operations. + + .. note:: Be sure to also see recommendations for tuning collective + operations. + +#. ``io_ompio_num_aggregators``: Number of aggregators used in + collective I/O operations. Setting this parameter to a value + larger zero disables the internal automatic aggregator selection + logic of OMPIO. Tuning this parameter has a very high impact on + the performance of collective operations. + + .. note:: Be sure to also see recommendations for tuning collective + operations. + +#. ``io_ompio_grouping_option``: Algorithm used to automatically + decide the number of aggregators used. Applications working with + regular 2-D or 3-D data decomposition can try changing this + parameter to 4 (hybrid) algorithm. + +///////////////////////////////////////////////////////////////////////// + +What are the main parameters of the ``fs`` framework and components? +-------------------------------------------------------------------- + +The main parameters of the ``fs`` components allow you to manipulate +the layout of a new file on a parallel file system. + +#. ``fs_pvfs2_stripe_size``: Sets the number of storage servers for a + new file on a PVFS2 file system. If not set, system default will be + used. Note that this parameter can also be set through the + ``stripe_size`` MPI Info value. + +#. ``fs_pvfs2_stripe_width``: Sets the size of an individual block for + a new file on a PVFS2 file system. If not set, system default will + be used. Note that this parameter can also be set through the + ``stripe_width`` MPI Info value. + +#. ``fs_lustre_stripe_size``: Sets the number of storage servers for a + new file on a Lustre file system. If not set, system default will + be used. Note that this parameter can also be set through the + ``stripe_size`` MPI Info value. + +#. ``fs_lustre_stripe_width``: Sets the size of an individual block + for a new file on a Lustre file system. If not set, system default + will be used. Note that this parameter can also be set through the + ``stripe_width`` MPI Info value. + +//////////////////////////////////////////////////////////////////////// + +What are the main parameters of the ``fbtl`` framework and components? +---------------------------------------------------------------------- + +No performance relevant parameters are currently available for the +``fbtl`` components. + +///////////////////////////////////////////////////////////////////////// + +What are the main parameters of the ``fcoll`` framework and components? +----------------------------------------------------------------------- + +The design of the ``fcoll`` frameworks maximizes the utilization of +parameters of the OMPIO component, in order to minimize the number of similar +MCA parameters in each component. + +For example, the ``two_phase``, ``dynamic``, and ``static`` components +all retrieve the ``io_ompio_bytes_per_agg`` parameter to define the +collective buffer size and the ``io_ompio_num_aggregators`` parameter +to force the utilization of a given number of aggregators. + +///////////////////////////////////////////////////////////////////////// + +What are the main parameters of the ``sharedfp`` framework and components? +-------------------------------------------------------------------------- + +No performance relevant parameters are currently available for the +``sharedfp`` components. + +///////////////////////////////////////////////////////////////////////// + +How do I tune collective I/O operations? +---------------------------------------- + +The most influential parameter that can be tuned in advance is the +``io_ompio_bytes_per_agg`` parameter of the ``ompio`` component. This +parameter is essential for the selection of the collective I/O +component as well for determining the optimal number of aggregators +for a collective I/O operation. It is a file system-specific value, +independent of the application scenario. To determine the correct +value on your system, take an I/O benchmark (e.g., the IMB or IOR +benchmark) and run an individual, single process write test. E.g., for +IMB: + +.. code-block:: sh + + shell$ mpirun -np 1 ./IMB-IO S_write_indv + +For IMB, use the values obtained for AGGREGATE test cases. Plot the +bandwidth over the message length. The recommended value for +``io_ompio_bytes_per_agg`` is the smallest message length which +achieves (close to) maximum bandwidth from that process's +perspective. + +.. note:: Make sure that the ``io_ompio_cycle_buffer_size`` parameter + is set to -1 when running this test, which is its default + value). The value of ``io_ompio_bytes_per_agg`` could be + set by system administrators in the system-wide Open MPI + configuration file, or by users individually. See :ref:`this + FAQ item ` on setting + MCA parameters for details. + +For more exhaustive tuning of I/O parameters, we recommend the +utilization of the `Open Tool for Parameter Optimization (OTPO) +`_, a tool specifically +designed to explore the MCA parameter space of Open MPI. + +///////////////////////////////////////////////////////////////////////// + +When should I use the ``individual`` ``sharedfp`` component, and what are its limitations? +------------------------------------------------------------------------------------------ + +The ``individual`` sharedfp component provides an approximation of +shared file pointer operations that can be used for *write operations +only*. It is only recommended in scenarios, where neither the ``sm`` +nor the ``lockedfile`` component can be used, e.g., due to the fact +that more than one node are being used and the file system does not +support locking. + +Conceptually, each process writes the data of a write_shared operation +into a separate file along with a time stamp. In every collective +operation (latest in file_close), data from all individual files are +merged into the actual output file, using the time stamps as the main +criteria. + +The component has certain limitations and restrictions, such as its +relience on the synchronization accuracy of the clock on the cluster +to determine the order between entries in the final file, which might +lead to some deviations compared to the actual calling sequence. + +///////////////////////////////////////////////////////////////////////// + +What other features of OMPIO are available? +------------------------------------------- + +OMPIO has a number of additional features, mostly directed towards +developers, which could occasionally also be useful to interested +end-users. These can typically be controlled through MCA parameters. + +* ``io_ompio_sharedfp_lazy_open``: By default, ``ompio`` does not + establish the necessary data structures required for shared file + pointer operations during file_open. It delays generating these data + structures until the first utilization of a shared file pointer + routine. This is done mostly to minimize the memory footprint of + ``ompio``, and due to the fact that shared file pointer operations + are rarely used compared to the other functions. Setting this + parameter to 0 disables this optimization. + +* ``io_ompio_coll_timing_info``: Setting this parameter will lead to a + short report upon closing a file indicating the amount of time spent + in communication and I/O operations of collective I/O operations + only. + +* ``io_ompio_record_file_offset_info``: Setting this parameter will + report neighborhood relationship of processes based on the file view + used. This is occasionally important for understanding performance + characteristics of I/O operations. Note, that using this features + requires an additional compile time flag when compiling ``ompio``. + + The output file generated as a result of this flag provides the + access pattern of processes to the file recorded as neighborhood + relationships of processes as a matrix. For example, if the first + four bytes of a file are being accessed by process 0 and the next + four bytes by process 1, processes 0 and 1 are said to have a + neighborhood relationship since they access neighboring elements of + the file. For each neighborhood relation detected in the file, the + value for the corresponding pair of processes is increased by one. + + Data is provided in compressed row storage format. To minimize the + amount of data written using this feature, only non-zero values are + output. The first row in the output file indicates the number of + non-zero elements in the matrix; the second number is the number of + elements in the row index. The third row of the output file gives + all the column indexes. The fourth row lists all the values and the + fifth row gives the row index. A row index represents the position + in the value array where a new row starts. + +///////////////////////////////////////////////////////////////////////// + +Known limitations +----------------- + +OMPIO implements most of the I/O functionality of the MPI +specification. There are, however, two not very commonly used +functions that are not implemented as of today: + +* Switching from the relaxed consistency semantics of MPI to stricter, sequential + consistency through the MPI_File_set_atomicity functions + +* Using user defined data representations + +.. error:: TODO Are these still accurate? diff --git a/docs/faq/running-mpi-apps.rst b/docs/faq/running-mpi-apps.rst new file mode 100644 index 00000000000..673db026d3c --- /dev/null +++ b/docs/faq/running-mpi-apps.rst @@ -0,0 +1,1419 @@ +Running MPI applications +======================== + +.. TODO How can I create a TOC just for this page here at the top? + +///////////////////////////////////////////////////////////////////////// + +.. _faq-running-mpi-apps-run-prereqs-label: + +What prerequisites are necessary for running an Open MPI job? +------------------------------------------------------------- + +In general, Open MPI requires that its executables are in your +``PATH`` on every node on which you will run and if Open MPI was +compiled was dynamic libraries (which is the default), the directory +where its libraries are located must be in your ``LD_LIBRARY_PATH`` on +every node. + +For example, if Open MPI was installed with a prefix of ``/opt/openmpi``, +then the following should be in your ``PATH`` and ``LD_LIBRARY_PATH`` + +.. list-table:: + :header-rows: 1 + + * - Environment variable + - Value to add + + * - ``PATH`` + - ``/opt/openmpi/bin`` + + * - ``LD_LIBRARY_PATH`` + - ``/opt/openmpi/lib`` + +.. error:: TODO Josh H points out that we might also want to mention + ``OMPIHOME`` for PRRTE's ``.ini`` file here. Leaving this + as a future to-do item, since PRRTE's ``.ini`` file support + does not exist yet. + +Depending on your environment, you may need to set these values in +your shell startup files (e.g., ``.bashrc``, ``.cshrc``, etc.). + +.. note:: There are exceptions to this rule |mdash| see :ref:`this FAQ + entry ` for a + description of the ``--prefix`` option to ``mpirun``. + +See :ref:`this FAQ entry +` for more details on +how to add Open MPI to your ``PATH`` and ``LD_LIBRARY_PATH``. + +Additionally, Open MPI requires that jobs can be started on remote +nodes without any input from the keyboard. For example, if using +``ssh`` as the remote agent, you must have your environment setup to +allow execution on remote nodes without entering a password or +passphrase. + +///////////////////////////////////////////////////////////////////////// + +What ABI guarantees does Open MPI provide? +------------------------------------------ + +:ref:`See this section for a description of Open MPI's versioning and +ABI scheme `. The short version is: + +#. Open MPI is source code compatible across all versions. This means + that you can compile and link your compliant MPI application + against :ref:`any version of Open MPI that supports the version of + the MPI standard ` to + which your application was written. + +#. Open MPI provided forward application binary interface (ABI) + compatibility within a major series for MPI applications starting + with v1.3.2. Prior to that version, no ABI guarantees were + provided. + +#. Open MPI reserves the right to break ABI compatibility at new major + release series. + +///////////////////////////////////////////////////////////////////////// + +Do I need a common filesystem on all my nodes? +---------------------------------------------- + +No, but it certainly makes life easier if you do. + +A common environment to run Open MPI is in a "Beowulf"-class or +similar cluster (e.g., a bunch of 1U servers in a bunch of racks). +Simply stated, Open MPI can run on a group of servers or workstations +connected by a network. As mentioned above, there are several +prerequisites, however (for example, you typically must have an +account on all the machines, you can or ``ssh`` between the +nodes without using a password, etc.). + +Regardless of whether Open MPI is installed on a shared / networked +filesystem or independently on each node, it is usually easiest if +Open MPI is available in the same filesystem location on every node. +For example, if you install Open MPI to ``/opt/openmpi-$ver_current`` on +one node, ensure that it is available in ``/opt/openmpi-$ver_current`` +on *all* nodes. + +The :ref:`where to install +` FAQ question +contains some suggestions on where to install Open MPI. + +///////////////////////////////////////////////////////////////////////// + +.. _faq-running-mpi-apps-adding-ompi-to-path-label: + +How do I add Open MPI to my ``PATH`` and ``LD_LIBRARY_PATH``? +------------------------------------------------------------- + +Open MPI *must* be able to find its executables in your ``PATH`` +on every node (if Open MPI was compiled as dynamic libraries, then its +library path must appear in ``LD_LIBRARY_PATH`` as well). As such, your +configuration/initialization files need to add Open MPI to your ``PATH`` +/ ``LD_LIBRARY_PATH`` properly. + +How to do this may be highly dependent upon your local configuration; +you may need to consult with your local system administrator. Some +system administrators take care of these details for you, some don't. +YMMV. Some common examples are included below, however. + +You must have at least a minimum understanding of how your shell works +to get Open MPI in your ``PATH`` / ``LD_LIBRARY_PATH`` properly. Note +that Open MPI must be added to your ``PATH`` and ``LD_LIBRARY_PATH`` +in the following situations: + +#. When you login to an interactive shell + + If your interactive login environment is not configured properly, + executables like ``mpicc`` will not be found, and it is typically + obvious what is wrong. The Open MPI executable directory can + manually be added to the ``PATH``, or the user's startup files can + be modified such that the Open MPI executables are added to the + ``PATH`` every login. This latter approach is preferred. + + All shells have some kind of script file that is executed at login + time to set things like ``PATH`` and ``LD_LIBRARY_PATH`` and + perform other environmental setup tasks. This startup file is the + one that needs to be edited to add Open MPI to the ``PATH`` and + ``LD_LIBRARY_PATH``. Consult the manual page for your shell for + specific details (some shells are picky about the permissions of + the startup file, for example). The table below lists some common + shells and the startup files that they read/execute upon login: + + .. list-table:: + :header-rows: 1 + :widths: 10 90 + + * - Shell + - Interactive login startup files + + * - ``bash`` + - ``.bash_profile`` if it exists, or ``.bash_login`` if it + exists, or ``.profile`` if it exists + + (in that order). Note that some Linux distributions + automatically come with + + ``.bash_profile`` scripts for users that automatically + execute ``.bashrc`` as well. + + Consult the ``bash(1)`` man page for more information. + + * - ``zsh`` + - ``.zshrc`` followed by ``.zshenv`` + + * - ``sh`` (or Bash + + named ``sh``) + - ``.profile`` + + * - ``csh`` + - ``.cshrc`` followed by ``.login`` + + * - ``tcsh`` + - ``.tcshrc`` if it exists, ``.cshrc`` if it does not, followed by + ``.login`` + +#. When you login to non-interactive shells on remote nodes + + If your non-interactive remote environment is not configured + properly, executables like ``mpirun`` will not function properly, + and it can be somewhat confusing to figure out. + + The startup files in question here are the ones that are + automatically executed for a non-interactive login on a remote node + (e.g., ``ssh othernode ps``). Note that not all shells support + this, and that some shells use different files for this than listed + for interactive logins. Some shells will supersede non-interactive + login startup files with files for interactive logins. That is, + running non-interactive login startup file *may* automatically + invoke interactive login startup file. The following table lists + some common shells and the startup file that is automatically + executed, either by Open MPI or by the shell itself: + + .. list-table:: + :header-rows: 1 + :widths: 10 90 + + * - Shell + - Non-interactive login startup files + + * - ``bash`` + - ``.bashrc`` if it exists + + * - ``zsh`` + - ``.zshrc`` followed by ``.zshenv`` + + * - ``sh`` (or Bash + + named ``sh``) + - This shell does not execute any file automatically, + + so Open MPI will execute the ``.profile`` script + + before invoking Open MPI executables on remote nodes + + * - ``csh`` + - ``.cshrc`` + + * - ``tcsh`` + - ``.tcshrc`` if it exists, ``.cshrc`` if it does not + +///////////////////////////////////////////////////////////////////////// + +.. _faq-running-mpi-apps-mpirun-prefix-label: + +What if I can't modify my ``PATH`` and/or ``LD_LIBRARY_PATH``? +-------------------------------------------------------------- + +There are some situations where you cannot modify the ``PATH`` or +``LD_LIBRARY_PATH`` |mdash| e.g., some ISV applications prefer to hide +all parallelism from the user, and therefore do not want to make the +user modify their shell startup files. Another case is where you want +a single user to be able to launch multiple MPI jobs simultaneously, +each with a different MPI implementation. Hence, setting shell +startup files to point to one MPI implementation would be problematic. + +In such cases, you have two options: + +#. Use ``mpirun``'s ``--prefix`` command line option (described + below). +#. Modify the wrapper compilers to include directives to include + run-time search locations for the Open MPI libraries. + +``mpirun``'s ``--prefix`` command line option takes as an argument the +top-level directory where Open MPI was installed. While relative +directory names are possible, they can become ambiguous depending on +the job launcher used; using absolute directory names is strongly +recommended. + +For example, say that Open MPI was installed into +``/opt/openmpi-$ver_current``. You would use the ``--prefix`` option +thusly: + +.. code-block:: + + shell$ mpirun --prefix /opt/openmpi-$ver_current -np 4 a.out + +This will prefix the ``PATH`` and ``LD_LIBRARY_PATH`` on both the +local and remote hosts with ``/opt/openmpi-$ver_current/bin`` and +``/opt/openmpi-$ver_current/lib``, respectively. This is *usually* +unnecessary when using resource managers to launch jobs (e.g., Slurm, +Torque, etc.) because they tend to copy the entire local environment +|mdash| to include the ``PATH`` and ``LD_LIBRARY_PATH`` |mdash| to +remote nodes before execution. As such, if ``PATH`` and +``LD_LIBRARY_PATH`` are set properly on the local node, the resource +manager will automatically propagate those values out to remote nodes. +The ``--prefix`` option is therefore usually most useful in +``ssh``-based environments (or similar). + +It is possible to make this the default behavior by passing to +``configure`` the flag ``--enable-mpirun-prefix-by-default``. This +will make ``mpirun`` behave exactly the same as ``mpirun --prefix +$prefix ...``, where ``$prefix`` is the value given to ``--prefix`` +in ``configure``. + +Finally, note that specifying the absolute pathname to ``mpirun`` is +equivalent to using the ``--prefix`` argument. For example, the +following is equivalent to the above command line that uses +``--prefix``: + +.. code-block:: + + shell$ /opt/openmpi-$ver_current/bin/mpirun -np 4 a.out + +///////////////////////////////////////////////////////////////////////// + +How do I launch Open MPI parallel jobs? +--------------------------------------- + +Similar to many MPI implementations, Open MPI provides the commands +``mpirun`` and ``mpiexec`` to launch MPI jobs. Several of the +questions in this FAQ category deal with using these commands. + +Note, however, that in Open MPI, ``mpirun`` and ``mpiexec`` are +exactly identical. Specifically, they are symbolic links to a common +back-end launcher command. + +.. note:: The name of the back-end launcher command has changed over + time (it used to be ``orterun``, it is now ``prte``). This + back-end name is largely irrelevant to the user. + +The rest of this FAQ usually refers only to ``mpirun``, even though +the same discussions also apply to ``mpiexec`` (because they are both, +in fact, the same command). + +///////////////////////////////////////////////////////////////////////// + +.. _faq-running-mpi-apps-spmd-label: + +How do I run a simple SPMD MPI job? +----------------------------------- + +Open MPI provides both ``mpirun`` and ``mpiexec`` commands. A simple way +to start a single program, multiple data (SPMD) application in +parallel is: + +.. code-block:: + + shell$ mpirun -np 4 my_parallel_application + +This starts a four-process parallel application, running four copies +of the executable named ``my_parallel_application``. + +The ``rsh`` starter component accepts the ``--hostfile`` option (and +its synonym, the ``--machinefile`` option) to indicate on which hosts +to start the processes: + +.. code-block:: + + shell$ cat my_hostfile + host01.example.com + host02.example.com + shell$ mpirun --hostfile my_hostfile -np 4 my_parallel_application + +This command will launch one copy of ``my_parallel_application`` on +each of ``host01.example.com`` and ``host02.example.com``. + +More information about the ``--hostfile`` option, and hostfiles in +general, is available in :ref:`this FAQ entry +`. + +Note, however, that not all environments require a hostfile. For +example, Open MPI will automatically detect when it is running in +batch / scheduled environments (such as Slurm, PBS/Torque, SGE, +LoadLeveler), and will use host information provided by those systems. + +Also note that if using a launcher that requires a hostfile and no +hostfile is specified, all processes are launched on the local host. + +///////////////////////////////////////////////////////////////////////// + +How do I run an MPMD MPI job? +----------------------------- + +Both the ``mpirun`` and ``mpiexec`` commands support multiple program, +multiple data (MPMD) style launches, either from the command line or +from a file. For example: + +.. code-block:: + + shell$ mpirun -np 2 a.out : -np 2 b.out + +This will launch a single parallel application, but the first two +processes will be instances of the ``a.out`` executable, and the +second two processes will be instances of the ``b.out`` executable. +In MPI terms, this will be a single ``MPI_COMM_WORLD``, but the +``a.out`` processes will be ranks 0 and 1 in ``MPI_COMM_WORLD``, while +the ``b.out`` processes will be ranks 2 and 3 in ``MPI_COMM_WORLD``. + +``mpirun`` (and ``mpiexec``) can also accept a parallel application +specified in a file instead of on the command line. For example: + +.. code-block:: + + shell$ mpirun --app my_appfile + +where the file ``my_appfile`` contains the following: + +.. code-block:: sh + + # Comments are supported; comments begin with # + # Application context files specify each sub-application in the + # parallel job, one per line. The first sub-application is the 2 + # a.out processes: + -np 2 a.out + # The second sub-application is the 2 b.out processes: + -np 2 b.out + +This will result in the same behavior as running ``a.out`` and ``b.out`` +from the command line. + +Note that ``mpirun`` and ``mpiexec`` are identical in command-line options +and behavior; using the above command lines with ``mpiexec`` instead of +``mpirun`` will result in the same behavior. + +///////////////////////////////////////////////////////////////////////// + +How do I specify the hosts on which my MPI job runs? +---------------------------------------------------- + +There are three general mechanisms: + + +#. The ``--hostfile`` option to ``mpirun``. + + Use this option to specify a list of hosts on which to run. Note + that for compatibility with other MPI implementations, + ``--machinefile`` is a synonym for ``--hostfile``. See :ref:`this + FAQ entry ` for more + information about the ``--hostfile`` option. + +#. The ``--host`` option to ``mpirun``. + + This option can be used to specify a list of hosts on which to run + on the command line. See :ref:`this FAQ entry + ` for more information + about the ``--host`` option. + +#. Running in a scheduled environment. + + If you are running in a scheduled environment (e.g., in a Slurm, + Torque, or LSF job), Open MPI will automatically get the lists of + hosts from the scheduler. + +.. important:: The specification of hosts using any of the above + methods has nothing to do with the network interfaces + that are used for MPI traffic. The list of hosts is + *only* used for specifying which hosts on which to + launch MPI processes. + +///////////////////////////////////////////////////////////////////////// + +.. _faq-running-mpi-aps-diagnose-multi-host-problems-label: + +How can I diagnose problems when running across multiple hosts? +--------------------------------------------------------------- + +When you are able to run MPI jobs on a single host, but fail to run +them across multiple hosts, try the following: + +#. Ensure that your launcher is able to launch across multiple hosts. + For example, if you are using ``ssh``, try to ``ssh`` to each + remote host and ensure that you are not prompted for a password. + For example: + + .. code-block:: + + shell$ ssh remotehost hostname + remotehost + + If you are unable to launch across multiple hosts, check that your + SSH keys are setup properly. Or, if you are running in a managed + environment, such as in a Slurm, Torque, or other job launcher, + check that you have reserved enough hosts, are running in an + allocated job, etc. + +#. Ensure that your ``PATH`` and ``LD_LIBRARY_PATH`` are set correctly + on each remote host on which you are trying to run. For example, + with ``ssh``: + + .. code-block:: + + shell$ ssh remotehost env | grep -i path + PATH=...path on the remote host... + LD_LIBRARY_PATH=...LD library path on the remote host... + + If your ``PATH`` or ``LD_LIBRARY_PATH`` are not set properly, see + :ref:`this FAQ entry ` for + the correct values. Keep in mind that it is fine to have multiple + Open MPI installations installed on a machine; the *first* Open MPI + installation found by ``PATH`` and ``LD_LIBARY_PATH`` is the one + that matters. + +#. Run a simple, non-MPI job across multiple hosts. This verifies + that the Open MPI run-time system is functioning properly across + multiple hosts. For example, try running the ``hostname`` command: + + .. code-block:: + + shell$ mpirun --host remotehost hostname + remotehost + shell$ mpirun --host remotehost,otherhost hostname + remotehost + otherhost + + If you are unable to run non-MPI jobs across multiple hosts, check + for common problems such as: + + #. Check your non-interactive shell setup on each remote host to + ensure that it is setting up the ``PATH`` and + ``LD_LIBRARY_PATH`` properly. + #. Check that Open MPI is finding and launching the correct + version of Open MPI on the remote hosts. + #. Ensure that you have firewalling disabled between hosts (Open + MPI opens random TCP and sometimes random UDP ports between + hosts in a single MPI job). + #. Try running with the ``plm_base_verbose`` MCA parameter at level + 10, which will enable extra debugging output to see how Open MPI + launches on remote hosts. For example: + + .. code-block:: + + mpirun --mca plm_base_verbose 10 --host remotehost hostname`` + +#. Now run a simple MPI job across multiple hosts that does not + involve MPI communications. The ``hello_c`` program in the + ``examples`` directory in the Open MPI distribution is a good + choice. This verifies that the MPI subsystem is able to initialize + and terminate properly. For example: + + .. code-block:: + + shell$ mpirun --host remotehost,otherhost hello_c + Hello, world, I am 0 of 1, (Open MPI v$ver_current, package: Open MPI jsquyres@example.com Distribution, ident: $ver_current, DATE) + Hello, world, I am 1 of 1, (Open MPI v$ver_current, package: Open MPI jsquyres@example.com Distribution, ident: $ver_current, DATE) + + If you are unable to run simple, non-communication MPI jobs, this + can indicate that your Open MPI installation is unable to + initialize properly on remote hosts. Double check your + non-interactive login setup on remote hosts. + +#. Now run a simple MPI job across multiple hosts that does does some + simple MPI communications. The ``ring_c`` program in the + ``examples`` directory in the Open MPI distribution is a good + choice. This verifies that the MPI subsystem is able to pass MPI + traffic across your network. For example: + + .. code-block:: + + shell$ mpirun --host remotehost,otherhost ring_c + Process 0 sending 10 to 0, tag 201 (1 processes in ring) + Process 0 sent to 0 + Process 0 decremented value: 9 + Process 0 decremented value: 8 + Process 0 decremented value: 7 + Process 0 decremented value: 6 + Process 0 decremented value: 5 + Process 0 decremented value: 4 + Process 0 decremented value: 3 + Process 0 decremented value: 2 + Process 0 decremented value: 1 + Process 0 decremented value: 0 + Process 0 exiting + + If you are unable to run simple MPI jobs across multiple hosts, + this may indicate a problem with the network(s) that Open MPI is + trying to use for MPI communications. Try limiting the networks + that it uses, and/or exploring levels 1 through 3 MCA parameters + for the communications module that you are using. For example, if + you're using the TCP BTL, see the output of: + + .. code-block:: + + ompi_info --level 3 --param btl tcp + +///////////////////////////////////////////////////////////////////////// + +.. Missing libraries FAQ items addressing errors of the form: + + prted: error while loading shared libraries: libimf.so: cannot open shared + object file: No such file or directory + + Compiler => Compiler library linked to orted + + $compilers``"Intel"`` = "libimf.so"; + $compilers``"PGI"`` = "libpgc.so"; + $compilers``"PathScale"`` = "libmv.so"; + +I get errors about missing libraries. What should I do? +-------------------------------------------------------- + +When building Open MPI with the compilers that have libraries in +non-default search path locations, you may see errors about those +compiler's support libraries when trying to launch MPI applications if +their corresponding environments were not setup properly. + +For example, you may see warnings similar to the following: + +.. code-block:: sh + + # With the Intel compiler suite + shell$ mpirun -np 1 --host node1.example.com mpi_hello + prted: error while loading shared libraries: libimf.so: cannot open shared object file: No such file or directory + -------------------------------------------------------------------------- + A daemon (pid 11893) died unexpectedly with status 127 while + attempting to launch so we are aborting. + ...more error messages... + + # With the PGI compiler suite + shell$ mpirun -np 1 --host node1.example.com mpi_hello + prted: error while loading shared libraries: libpgcc.so: cannot open shared object file: No such file or directory + ...more error messages... + + # With the PathScale compiler suite + shell$ mpirun -np 1 --host node1.example.com mpi_hello + prted: error while loading shared libraries: libmv.so: cannot open shared object file: No such file or directory + ...more error messages... + +Specifically, Open MPI first attempts to launch a "helper" daemon +``prted`` on ``node1.example.com``, but it failed because one of +``prted``'s dependent libraries was not able to be found. The +libraries shown above (``libimf.so``, ``libpgcc.so``, and +``libmv.so``) are specific to their compiler suites (Intel, PGI, and +PathScale, respectively). As such, it is likely that the user did not +setup the compiler library in their environment properly on this node. + +Double check that you have setup the appropriate compiler environment +on the target node, for both interactive and non-interactive logins. + +.. note:: It is a common error to ensure that the compiler environment + is setup properly for *interactive* logins, but not for + *non-interactive* logins. + +Here's an example of a user-compiled MPI application working fine +locally, but failing when invoked non-interactively on a remote node: + +.. code-block:: sh + + # Compile a trivial MPI application + head_node$ cd $HOME + head_node$ mpicc mpi_hello.c -o mpi_hello + + # Run it locally; it works fine + head_node$ ./mpi_hello + Hello world, I am 0 of 1. + + # Run it remotely interactively; it works fine + head_node$ ssh node2.example.com + + Welcome to node2. + node2$ ./mpi_hello + Hello world, I am 0 of 1. + node2$ exit + + # Run it remotely *NON*-interactively; it fails + head_node$ ssh node2.example.com $HOME/mpi_hello + mpi_hello: error while loading shared libraries: libimf.so: cannot open shared object file: No such file or directory + +In cases like this, check your shell script startup files and verify +that the appropriate compiler environment is setup properly for +non-interactive logins. + +///////////////////////////////////////////////////////////////////////// + +Can I run non-MPI programs with ``mpirun`` / ``mpiexec``? +--------------------------------------------------------- + +Yes. + +For example: + +.. code-block:: + + shell$ mpirun -np 2 --host a,b uptime + +This will launch a copy of the Unix command ``uptime`` on the hosts ``a`` +and ``b``. + +Other questions in the FAQ section deal with the specifics of the +``mpirun`` command line interface; suffice it to say that it works +equally well for MPI and non-MPI applications. + +///////////////////////////////////////////////////////////////////////// + +Can I run GUI applications with Open MPI? +----------------------------------------- + +Yes, but it will depend on your local setup and may require additional +setup. + +In short: you will need to have graphics forwarding (e.g., X11 +forwarding) enabled from the remote processes to the display where you +want output to appear. In a secure environment, you can simply allow +all X requests to be shown on the target display and set the +``DISPLAY`` environment variable in all MPI processes' environments to +the target display, perhaps something like this: + +.. code-block:: + + shell$ hostname + my_desktop.secure-cluster.example.com + shell$ xhost + + shell$ mpirun -np 4 -x DISPLAY=my_desktop.secure-cluster.example.com a.out + +However, this technique is not generally suitable for unsecure +environments (because it allows anyone to read and write to your +display). A slightly more secure way is to only allow X connections +from the nodes where your application will be running: + +.. code-block:: + + shell$ hostname + my_desktop.secure-cluster.example.com + shell$ xhost +compute1 +compute2 +compute3 +compute4 + compute1 being added to access control list + compute2 being added to access control list + compute3 being added to access control list + compute4 being added to access control list + shell$ mpirun -np 4 -x DISPLAY=my_desktop.secure-cluster.example.com a.out + +(assuming that the four nodes you are running on are ``compute1`` +through ``compute4``). + +Other methods are available, but they involve sophisticated X +forwarding through ``mpirun`` and are generally more complicated than +desirable. + +///////////////////////////////////////////////////////////////////////// + +Can I run ncurses-based / curses-based / applications with funky input schemes with Open MPI? +--------------------------------------------------------------------------------------------- + +Maybe. But probably not. + +Open MPI provides fairly sophisticated stdin / stdout / stderr +forwarding. However, it does not work well with curses, ncurses, +readline, or other sophisticated I/O packages that generally require +direct control of the terminal. + +Every application and I/O library is different |mdash| you should try to +see if yours is supported. But chances are that it won't work. + +Sorry. :-( + +///////////////////////////////////////////////////////////////////////// + +What other options are available to ``mpirun``? +----------------------------------------------- + +``mpirun`` supports the ``--help`` option which provides a usage +message and a summary of the options that it supports. It should be +considered the definitive list of what options are provided. + +Several notable options are: + +* ``--hostfile``: Specify a hostfile for launchers (such as the + ``rsh`` launcher) that need to be told on which hosts to start + parallel applications. Note that for compatibility with other MPI + implementations, *--machinefile* is a synonym for ``--hostfile``. +* ``--host``: Specify a host or list of hosts to run on (see + :ref:`this FAQ entry for more details + `). +* ``--np`` (or ``-np``): Indicate the number of processes to + start. +* ``--mca``: Set MCA parameters (see the :doc:`Run-Time Tuning FAQ + category ` for more details). +* ``--wdir DIRECTORY``: Set the working directory of the started + applications. If not supplied, the current working directory is + assumed (or ``$HOME``, if the current working directory does not + exist on all nodes). +* ``-x ENV_VARIABLE_NAME``: The name of an environment variable to + export to the parallel application. The ``-x`` option can be + specified multiple times to export multiple environment variables to + the parallel application. + +///////////////////////////////////////////////////////////////////////// + +.. _faq-running-mpi-apps-mpirun-hostfile-label: + +How do I use the ``--hostfile`` option to ``mpirun``? +----------------------------------------------------- + +.. error:: TODO For cross reference, this is the PRRTE man page section + about ``--hostfile``: + https://github.com/openpmix/prrte/blame/master/src/tools/prte/prte-map.1.md#L236 + +The ``--hostfile`` option to ``mpirun`` takes a filename that lists +hosts on which to launch MPI processes. + +.. important:: The hosts listed in a hostfile have *nothing* to do + with which network interfaces are used for MPI + communication. They are *only* used to specify on + which hosts to launch MPI processes. + +Hostfiles are simple text files with hosts specified, one per line. +Each host can also specify a default and maximum number of *slots* to +be used on that host (i.e., the maximum number of processes that will +be launched on that node). Comments are also supported, and blank +lines are ignored. For example: + +.. code-block:: + + # This is an example hostfile. Comments begin with #. + # + # Since no slots are specified, the number of slots defaults to the + # number of processor cores available on the machine. + foo.example.com + + # We want to allow launching a maximum of 2 processes on this host + # (e.g., potentially because it has two processor cores): + bar.example.com slots=2 + +Slots are discussed in much more detail :ref:`in this FAQ entry +`. + +Hostfiles works in two different ways: + +#. *Exclusionary:* If a list of hosts to run on has been provided by + another source (e.g., by a hostfile or a batch scheduler such as + Slurm, PBS/Torque, SGE, etc.), the hosts provided by the hostfile + must be in the already-provided host list. If the + hostfile-specified nodes are *not* in the already-provided host + list, ``mpirun`` will abort without launching anything. + + In this case, hostfiles act like an exclusionary filter |mdash| + they limit the scope of where processes will be scheduled from the + original list of hosts to produce a final list of hosts. + + For example, say that a scheduler job contains hosts ``node01`` + through ``node04``. If you run: + + .. code-block:: + + shell$ cat my_hosts + node03 + shell$ mpirun -np 1 --hostfile my_hosts hostname + + This will run a single copy of ``hostname`` on the host ``node03``. + + However, presuming your job was allocated only to ``node03`` and + you run the following: + + .. code-block:: + + shell$ cat my_hosts + node17 + shell$ mpirun -np 1 --hostfile my_hosts hostname + + This is an error (because ``node17`` is not allocated to your job), + and ``mpirun`` will abort. + + Finally, note that in exclusionary mode, processes will *only* be + executed on the hostfile-specified hosts, If this ends up causing + an oversubscription situation, ``mpirun`` will abort by default. + +#. *Inclusionary:* If a list of hosts has *not* been provided by + another source, then the hosts provided by the ``--hostfile`` + option will be used as the original and final host list. + + In this case, ``--hostfile`` acts as an inclusionary agent; all + ``--hostfile``-supplied hosts become available for scheduling + processes. For example (assume that you are *not* in a scheduling + environment where a list of nodes is being transparently supplied): + + .. code-block:: + + shell$ cat my_hosts + node01.example.com slots=1 + node02.example.com slots=1 + node03.example.com slots=1 + shell$ mpirun -np 3 --hostfile my_hosts hostname + + This will launch a single copy of ``hostname`` on the hosts + ``node01.example.com``, ``node02.example.com``, and + ``node03.example.com``. + +Note, too, that ``--hostfile`` is essentially a per-application switch. +Hence, if you specify multiple applications (as in an MPMD job), +``--hostfile`` can be specified multiple times: + +.. code-block:: + + shell$ cat hostfile_1 + node01.example.com + shell$ cat hostfile_2 + node02.example.com + shell$ mpirun -np 1 --hostfile hostfile_1 hostname : -np 1 --hostfile hostfile_2 uptime + node01.example.com + 06:11:45 up 1 day, 2:32, 0 users, load average: 21.65, 20.85, 19.84 + +Notice that ``hostname`` was launched on ``node01.example.com`` and +``uptime`` was launched on ``node02.example.com``. + +///////////////////////////////////////////////////////////////////////// + +.. _faq-running-mpi-apps-mpirun-host-label: + +How do I use the ``--host`` option to ``mpirun``? +------------------------------------------------- + +The ``--host`` option to ``mpirun`` takes a comma-delimited list of +hosts on which to run. For example: + +.. code-block:: + + shell$ mpirun -np 3 --host a,b,c hostname + +Will launch *one* copy of ``hostname`` on each of hosts ``a``, ``b``, +and ``c``. Specifically: each host defaults to 1 slot, unless +specified by the ``:N`` suffix. For example: + +.. code-block:: + + shell$ mpirun --host a,b:2,c:3 hostname + +Will launch one copy of ``hostname`` on ``a``, two copies of +``hostname`` on ``b``, and three copies of ``hostname`` and ``c``. + +Slots are discussed in much more detail :ref:`in this FAQ entry +`. + +.. important:: The hosts specified by the ``--host`` option have + *nothing* to do with which network interfaces are used + for MPI communication. They are *only* used to specify + on which hosts to launch MPI processes. + +``--host`` works in two different ways: + +#. *Exclusionary:* If a list of hosts to run on has been provided by + another source (e.g., by a hostfile or a batch scheduler such as + Slurm, PBS/Torque, SGE, etc.), the hosts provided by the ``--host`` + option must be in the already-provided host list. If the + ``--host``-specified nodes are *not* in the already-provided host + list, ``mpirun`` will abort without launching anything. + + In this case, the ``--host`` option acts like an exclusionary + filter |mdash| it limits the scope of where processes will be + scheduled from the original list of hosts to produce a final list + of hosts. + + For example, say that the hostfile ``my_hosts`` contains the hosts + ``node1`` through ``node4``. If you run: + + .. code-block:: + + shell$ mpirun -np 1 --hostfile my_hosts --host node3 hostname + + This will run a single copy of ``hostname`` on the host ``node3``. + However, if you run: + + .. code-block:: + + shell$ mpirun -np 1 --hostfile my_hosts --host node17 hostname + + This is an error (because ``node17`` is not listed in + ``my_hosts``); ``mpirun`` will abort. + + Finally, note that in exclusionary mode, processes will *only* be + executed on the ``--host``-specified hosts. If this ends up + causing an oversubscription situation, ``mpirun`` will abort by + default. + +#. *Inclusionary:* If a list of hosts has *not* been provided by + another source, then the hosts provided by the ``--host`` option + will be used as the original and final host list. + + In this case, ``--host`` acts as an inclusionary agent; all + ``--host``-supplied hosts become available for scheduling + processes. For example (assume that you are *not* in a scheduling + environment where a list of nodes is being transparently supplied): + + .. code-block:: + + shell$ mpirun -np 3 --host a,b,c hostname + + This will launch a single copy of ``hostname`` on the hosts ``a``, + ``b``, and ``c``. + +Note, too, that ``--host`` is essentially a per-application switch. +Hence, if you specify multiple applications (as in an MPMD job), +``--host`` can be specified multiple times: + +.. code-block:: + + shell$ mpirun -np 1 --host a hostname : -np 1 --host b uptime + +This will launch ``hostname`` on host ``a`` and ``uptime`` on host ``b``. + +///////////////////////////////////////////////////////////////////////// + +.. _faq-running-mpi-apps-slots-label: + +What are "slots"? +----------------- + +*Slots* are Open MPI's representation of how many processes can be +launched on a given host. + +Open MPI maintains the number of slots for each host in a given +parallel job, and |mdash| by default |mdash| will not let you launch +more processes on a host than it has slots. + +.. important:: It is common to set the number of slots on a host to be + less than or equal to the number of processor cores on + that host. + + **But it is important to realize that Open MPI's concept + of slots is actually unrelated to the number of + physical processor cores on a host.** + + Specifically: the number of slots on a host can be less + than, equal to, or more than the number of processor + cores on a host. + +If you wish to run more processes on a host than it has slots, +:ref:`see the FAQ entry on oversubscription +`. + +///////////////////////////////////////////////////////////////////////// + +.. _faq-running-mpi-apps-default-slots-label: + +How are the number of slots calculated? +--------------------------------------- + +The number of slots on a host depends on a few factors: + +#. If the host is specified by a job scheduler (e.g., Slurm, + PBS/Torque, etc.), the job scheduler specifies the number of slots + for that host. + +#. If the host is specified in a hostfile: + + #. If the ``slots`` parameter is specified, that value is used for + the number of slots on that host. + #. Otherwise: + + #. If ``--map-by :HWTCPUS`` was specified, the number of slots + defaults to the number of hardware threads on that host. + #. Otherwise, the number of slots defaults to the number of + processor cores on that host. + +#. If the host is specified via the ``--host`` command line option: + + #. If the ``:N`` suffix is specified, ``N`` is used for the number + of slots on that host. + #. Otherwise, the number of slots defaults to 1. + #. If the same host name is specified multiple times, the slots + value for that host is increased by ``N`` if ``:N`` is + specified, or increased by 1 if ``:N`` is not specified. + +.. caution:: The exact scheme used to determine the number of slots + has varied between different major versions of Open MPI. + The scheme described above is relevant for Open MPI + |ompi_series|. + +Max slot counts, however, are rarely specified by schedulers. The max +slot count for each node will default to "infinite" if it is not +provided (meaning that Open MPI will oversubscribe the node if you ask +it to |mdash| see more on oversubscribing in :ref:`this FAQ entry +`). + +.. error:: TODO Ralph: do we still have the concept of "max slots"? + Issue is open: + https://github.com/openpmix/prrte/issues/770. + +Here are some examples, all from unscheduled environments: + +#. Use a hostfile and specify the ``slots`` parameter. + + .. code-block:: sh + + shell$ cat my-hostfile + node01.example.come slots=4 + shell$ mpirun --hostfile my-hostfile hostname + node01 + node01 + node01 + node01 + + This launched 4 processes because ``slots=4`` was specified in the + hostfile. + +#. Use a hostfile and do *not* specify the ``slots`` parameter (assume + that ``node01.example.com`` has 2 processor cores): + + .. code-block:: sh + + shell$ cat my-hostfile + node01.example.come + shell$ mpirun --hostfile my-hostfile hostname + node01 + node01 + + This launched 2 processes because ``slots`` was not specified, and + ``node02`` has 2 processor cores. + +#. Use ``--host``: + + .. code-block:: sh + + shell$ mpirun --host node01.example.com hostname + node01 + + This launched 1 processes because ``--host`` with no ``:N`` suffix + increments the slot count for that host by 1. + +#. Use ``--host`` with a ``:N`` suffix: + + .. code-block:: sh + + shell$ mpirun --host node01.example.com:2 hostname + node01 + node01 + + This launched 2 processes because ``:2`` was specified on the + command line. + +#. Use ``--host`` with a ``:N`` suffix, and mention the host multiple times: + + .. code-block:: sh + + shell$ mpirun --host node01.example.com:2,node01.example.com hostname + node01 + node01 + node01 + + This launched 3 processes because ``:2`` was specified on the + command line, and then ``node01.example.com`` was specified an + additional time, incrementing the slot count for that host to 3. + +///////////////////////////////////////////////////////////////////////// + +.. _faq-running-mpi-apps-mpirun-scheduling-label: + +How do I control how my processes are scheduled across hosts? +------------------------------------------------------------- + +The short version is that if you are not oversubscribing your hosts +(i.e., trying to run more processes than slots available on that +host), scheduling is pretty simple and occurs either on a by-slot or +by-node round robin schedule. If you're oversubscribing, the issue +gets much more complicated |mdash| keep reading. + +The more complete answer is: Open MPI schedules processes to nodes by +asking two questions from each application on the ``mpirun`` command +line: + +#. *How many* processes should be launched? +#. *Where* should those processes be launched? + +The "how many" question is directly answered with the ``-np`` switch +to ``mpirun``. If ``-np`` is not specified on the ``mpirun`` command +line, its value is the sum of the slots on all the nodes. + +The "where" question is a little more complicated, and depends on +three factors: + +#. The final node list (e.g., after ``-hostname`` / ``--host`` + exclusionary or inclusionary processing) +#. The scheduling policy (which applies to all applications in a + single job) +#. The default and maximum number of slots on each host + +.. error:: TODO Ralph: do we still have the concept of "max slots"? + Issue is open: + https://github.com/openpmix/prrte/issues/770. + +Open MPI currently supports two scheduling policies: by slot and by +node: + +#. *By slot:* This is the default scheduling policy, but can also be + explicitly requested by using either the ``--map-by slot`` option + to ``mpirun`` or by setting the MCA parameter + ``rmaps_default_mapping_policy`` to the string ``slot``. + + In this mode, Open MPI will schedule processes on a node until all + of its default slots are exhausted before proceeding to the next + node. In MPI terms, this means that Open MPI tries to maximize the + number of adjacent ranks in ``MPI_COMM_WORLD`` on the same host + without oversubscribing that host. + + For example: + + .. code-block:: + + shell$ cat my-hosts + node0 slots=2 max_slots=20 + node1 slots=2 max_slots=20 + shell$ mpirun --hostfile my-hosts -np 8 --map-by slot hello | sort + Hello World I am rank 0 of 8 running on node0 + Hello World I am rank 1 of 8 running on node0 + Hello World I am rank 2 of 8 running on node1 + Hello World I am rank 3 of 8 running on node1 + Hello World I am rank 4 of 8 running on node0 + Hello World I am rank 5 of 8 running on node0 + Hello World I am rank 6 of 8 running on node1 + Hello World I am rank 7 of 8 running on node1 + +#. *By node:* This policy can be requested either by using the + ``--map-by node`` option to ``mpirun`` or by setting the MCA parameter + ``rmaps_default_mapping_policy`` to the string "node". + + In this mode, Open MPI will schedule a single process on each node + in a round-robin fashion (looping back to the beginning of the node + list as necessary) until all processes have been scheduled. Nodes + are skipped once their default slot counts are exhausted. + + For example: + + .. code-block:: + + shell$ cat my-hosts + node0 slots=2 max_slots=20 + node1 slots=2 max_slots=20 + shell$ mpirun --hostname my-hosts -np 8 --map-by node hello | sort + Hello World I am rank 0 of 8 running on node0 + Hello World I am rank 1 of 8 running on node1 + Hello World I am rank 2 of 8 running on node0 + Hello World I am rank 3 of 8 running on node1 + Hello World I am rank 4 of 8 running on node0 + Hello World I am rank 5 of 8 running on node1 + Hello World I am rank 6 of 8 running on node0 + Hello World I am rank 7 of 8 running on node1 + +In both policies, if the default slot count is exhausted on all nodes +while there are still processes to be scheduled, Open MPI will trigger +an oversubscription condition. + +If ``:OVERSUBSCRIBE`` is added as a modifier to the ``--map-by`` +option (e.g., ``mpirun --map-by node:OVERSUBSCRIBE ...`` -- :ref:`see +this FAQ item ` for more +details), Open MPI will continue to loop through the list of nodes +again and try to schedule one more process to each node until all +processes are scheduled. Nodes are skipped in this process if their +maximum slot count is exhausted. If the maximum slot count is +exhausted on all nodes while there are still processes to be +scheduled, Open MPI will abort without launching any processes. + +If ``:OVERSUBSCRIBE`` is *not* specified and an oversubscription +condition occurs, Open MPI will abort without launching any processes. + +///////////////////////////////////////////////////////////////////////// + +.. _faq-running-mpi-apps-oversubscribing-label: + +Can I oversubscribe nodes (run more processes than processors)? +--------------------------------------------------------------- + +Yes. But it very much matters *how* you do it. + +Specifically: it is critical that Open MPI *knows* that you are +oversubscribing the node, or **severe** performance degradation can +result. + +.. important:: Here is a good general rule to follow: **never specify + a number of slots that is more than the available + number of processors.** + +For example, if you want to run 4 processes on a host with 2 processor +cores, then indicate that you only have 2 slots but want to run 4 +processes. For example: + +.. code-block:: sh + + # In a hostfile, the number of slots will default to the number of + # processor cores on the host + shell$ cat my-hostfile + localhost + shell$ mpirun -np 4 --hostfile my-hostfile a.out + +Specifically: we strongly suggest that you do **NOT** have a hostfile +that contains ``slots=4`` (because there are only two available +processor cores). + +That being said, the above command will fail, because you are trying +to run 4 processes but there are only 2 slots available. You must +specifically tell Open MPI that it is ok to oversubscribe via +``--map-by :OVERSUBSCRIBE``: + +.. code-block:: sh + + shell$ cat my-hostfile + # For the purposes of this example, explicitly tell Open MPI + # that we have 2 slots on the host. + localhost slots=2 + shell$ mpirun -np 4 --hostfile my-hostfile --map-by :OVERSUBSCRIBE a.out + +The reason you should tell Open MPI whether you're oversubscribing or +not (i.e., never specify a ``slots`` value more than the number of +processor cores available) is because Open MPI basically runs its +message passing progression engine in two modes: *aggressive* and +*degraded*. + +#. *Degraded:* When Open MPI thinks that it is in an oversubscribed + mode (i.e., more processes are running than there are processor + cores available), MPI processes will automatically run in + *degraded* mode and frequently yield the processor to its peers, + thereby allowing all processes to make progress. + + .. note:: Be sure to see :ref:`this FAQ entry + ` that describes how + degraded mode affects processor and memory + affinity. + +#. *Aggressive:* When Open MPI thinks that it is in an exactly- or + under-subscribed mode (i.e., the number of running processes is + equal to or less than the number of available processor cores), MPI + processes will automatically run in *aggressive* mode, meaning that + they will never voluntarily give up the processor to other + processes. With some network transports, this means that Open MPI + will spin in tight loops attempting to make message passing + progress, effectively causing other processes to not get any CPU + cycles (and therefore never make any progress). + +For example, on a node with a two processor cores: + +.. code-block:: + + shell$ cat my-hostfile + localhost slots=4 + shell$ mpirun -np 4 --hostfile my-hostfile a.out + +This would cause all 4 MPI processes to run in *aggressive* mode +because Open MPI thinks that there are 4 available processor cores to +use. This is actually a lie (there are only 2 processor core |mdash| +not 4), and can cause extremely bad performance. + +///////////////////////////////////////////////////////////////////////// + +Can I force Agressive or Degraded performance modes? +---------------------------------------------------- + +Yes. + +The MCA parameter ``mpi_yield_when_idle`` controls whether an MPI +process runs in Aggressive or Degraded performance mode. Setting it +to 0 forces Aggressive mode; setting it to 1 forces Degraded mode (see +:ref:`this FAQ entry ` to see how +to set MCA parameters). + +Note that this value *only* affects the behavior of MPI processes when +they are blocking in MPI library calls. It does not affect behavior +of non-MPI processes, nor does it affect the behavior of a process +that is not inside an MPI library call. + +Open MPI normally sets this parameter automatically (see :ref:`this +FAQ entry ` for details). +Users are cautioned against setting this parameter unless you are +really, absolutely, positively sure of what you are doing. + +///////////////////////////////////////////////////////////////////////// + +.. _faq-running-mpi-apps-totalview-label: + +How do I run with the TotalView parallel debugger? +-------------------------------------------------- + +This has changed with different releases of TotalView and Open MPI; it +is best to consult TotalView's documentation for how you should debug +Open MPI applications with TotalView. + +///////////////////////////////////////////////////////////////////////// + +.. _faq-running-mpi-apps-ddt-label: + +How do I run with the DDT parallel debugger? +-------------------------------------------- + +This has changed with different releases of DDT and Open MPI; it is +best to consult DDT's documentation for how you should debug Open MPI +applications with DDT. + +///////////////////////////////////////////////////////////////////////// + +How do I dynamically load libmpi at runtime? +-------------------------------------------- + +If you want to load a the shared library ``libmpi`` explicitly at +runtime either by using ``dlopen()`` from C/C ++ or something like the +``ctypes`` package from Python, some extra care is required. The +default configuration of Open MPI uses ``dlopen()`` internally to load +its support components. These components rely on symbols available in +``libmpi``. In order to make the symbols in ``libmpi`` available to +the components loaded by Open MPI at runtime, ``libmpi`` must be +loaded with the ``RTLD_GLOBAL`` option. + +In C/C++, this option is specified as the second parameter to the +POSIX ``dlopen(3)`` function. + +When using ``ctypes`` with Python, this can be done with the second +(optional) parameter to ``CDLL()``. For example (shown below in Mac OS +X, where Open MPI's shared library name ends in ``.dylib``; other +operating systems use other suffixes, such as ``.so``): + +.. code-block:: python + + from ctypes import * + + mpi = CDLL('libmpi.0.dylib', RTLD_GLOBAL) + + f = pythonapi.Py_GetArgcArgv + argc = c_int() + argv = POINTER(c_char_p)() + f(byref(argc), byref(argv)) + mpi.MPI_Init(byref(argc), byref(argv)) + + # Your MPI program here + + mpi.MPI_Finalize() + +Other scripting languages should have similar options when dynamically +loading shared libraries. + +///////////////////////////////////////////////////////////////////////// + +What MPI environment variables exist? +------------------------------------- + +Open MPI provides the following environment variables that will be +defined on every MPI process: + +* ``OMPI_COMM_WORLD_SIZE``: the number of processes in this process's + MPI_COMM_WORLD +* ``OMPI_COMM_WORLD_RANK``: the MPI rank of this process in + MPI_COMM_WORLD +* ``OMPI_COMM_WORLD_LOCAL_SIZE``: the number of ranks from this job + that are running on this node. +* ``OMPI_COMM_WORLD_LOCAL_RANK``: the relative rank of this process on + this node within its job. For example, if four processes in a job + share a node, they will each be given a local rank ranging from 0 to + 3. +* ``OMPI_UNIVERSE_SIZE``: the number of process slots allocated to + this job. Note that this may be different than the number of + processes in the job. +* ``OMPI_COMM_WORLD_NODE_RANK``: the relative rank of this process on + this node looking across *all* jobs. diff --git a/docs/faq/supported-systems.rst b/docs/faq/supported-systems.rst new file mode 100644 index 00000000000..3285e107f69 --- /dev/null +++ b/docs/faq/supported-systems.rst @@ -0,0 +1,216 @@ +Supported systems +================= + +.. TODO How can I create a TOC just for this page here at the top? + +///////////////////////////////////////////////////////////////////////// + +What operating systems does Open MPI support? +--------------------------------------------- + +We primarily develop Open MPI on Linux and MacOS. + +Other operating systems are supported, however. The exact list of +operating systems supported has changed over time (e.g., native +Microsoft Windows support was added in v1.3.3, and although it was +removed prior to v1.8, is still supported through Cygwin). :ref:`See +the Platform Notes section ` for a +listing of the OSes that that version supports. + +Open MPI is fairly POSIX-neutral, so it will run without *too* many +modifications on most POSIX-like systems. Hence, if we haven't listed +your favorite operating system here, it may not be difficult to get +Open MPI to compile and run properly. The biggest obstacle is +typically the assembly language, but that's fairly modular and we're +happy to provide information about how to port it to new platforms. + +It should be noted that we are quite open to accepting patches for +operating systems that we do not currently support. If we do not have +systems to test these on, we probably will only claim to +"unofficially" support those systems. + + +///////////////////////////////////////////////////////////////////////// + +What hardware platforms does Open MPI support? +---------------------------------------------- + +Essentially all the common platforms that the operating +systems listed in the previous question support. + +For example, Linux runs on a *wide* variety of platforms, and we +certainly don't claim to test all of them. Open MPI includes +Linux-compiler-based assembly for support of Intel, AMD, ARM, and +PowerPC chips, for example. + + +///////////////////////////////////////////////////////////////////////// + +What network interconnects does Open MPI support? +------------------------------------------------- + +:ref:`See the Platform Notes section ` +for a list of networks supported in this specific release of Open MPI. + +The set of commonly-available HPC-class network interconnects has +evolved and changed over time. + +Reflecting that evolution, each release of Open MPI supports a +specific set of such network interconnects. You will need to check +the documentation of the version of your Open MPI installation to see +which interconnects it supports. A general rule of thumb is that a +given Open MPI version tends to support the popular HPC-class +interconnects at the time of its release. + +This, unfortunately, does mean that Open MPI removes support for +networks that are no longer commonly-used in HPC environments. If you +still have one of these older interconnects, not all new version of +Open MPI may support your interconnect -- sorry! This simply reflects +the reality of limited development, testing, and maintenance +resources. + +That being said, :doc:`contributions are always welcome! +`. + + +///////////////////////////////////////////////////////////////////////// + +How does Open MPI interface to back-end run-time systems? +--------------------------------------------------------- + +Prior versions of Open MPI used to be layered on top of the Open +Run-Time Environment (ORTE). ORTE originally started as a small +portion of the Open MPI code base, but over time, ORTE effectively +spun off into its own sub-project. ORTE ultimately evolved into the +`Process Management Interface Exascale (PMIx) standard and +corresponding OpenPMIx software project `_. + +The OpenPMIx project then evolved its own `PMIx Reference Run-Time +Environment (PRRTE) `_ project. + +PRRTE has effectively replaced ORTE in the Open MPI implementation. + + +///////////////////////////////////////////////////////////////////////// + +What run-time environments does Open MPI support? +------------------------------------------------- + +:ref:`See the Platform Notes section ` +for a list of run-time environments supported in this specific release +of Open MPI. + +Since Open MPI uses `PRRTE `_ as +its back-end run-time system, Open MPI supports whatever run-time +systems PRRTE supports. + +Each version of Open MPI supports a specific set of versions of +PRRTE. Those versions therefore determine which run-time systems that +that release of Open MPI supports. + + +///////////////////////////////////////////////////////////////////////// + +.. _faq_supported_systems_mpi_compliance_label: + +How much MPI does Open MPI support? +----------------------------------- + +* Open MPI 1.2 supports all of MPI-2.0. + +* Open MPI 1.3 supports all of MPI-2.1. + +* Open MPI 1.8 supports all of MPI-3.0. + +* Starting with v2.0, Open MPI supports all of MPI-3.1. + + +///////////////////////////////////////////////////////////////////////// + +Is Open MPI thread safe? +------------------------ + +Support for ``MPI_THREAD_MULTIPLE`` (i.e., multiple threads +executing within the MPI library) and asynchronous message passing +progress (i.e., continuing message passing operations even while no +user threads are in the MPI library) has been designed into Open MPI +from its first planning meetings. + +Support for ``MPI_THREAD_MULTIPLE`` was included in the first version of +Open MPI, but it only became robust around v3.0.0. Subsequent +releases continually improve reliability and performance of +multi-threaded MPI applications. + + +///////////////////////////////////////////////////////////////////////// + +Does Open MPI support 32 bit environments? +------------------------------------------ + +As far as we know, yes. 64 bit architectures have effectively taken +over the world, though, so 32-bit is not tested nearly as much as +64-bit. + +Specifically, most of the Open MPI developers only have 64-bit +machines, and therefore only test 32-bit in emulation mode. + + +///////////////////////////////////////////////////////////////////////// + +Does Open MPI support 64 bit environments? +------------------------------------------ + +Yes, Open MPI is 64 bit clean. You should be able to use Open MPI on +64 bit architectures and operating systems with no difficulty. + + +///////////////////////////////////////////////////////////////////////// + +Does Open MPI support execution in heterogeneous environments? +-------------------------------------------------------------- + +Heterogeneous support (specifically: supporting different sized and/or +represented data types in a single MPI application run) within a +single MPI job is technically required by the MPI standard. + +However, there are both theoretical and practical problems with +supporting true data heterogeneity at run time. + +Indeed, it is quite uncommon for production HPC environments to be +data-heterogeneous (e.g., natively support little endian on some nodes +and big endian on other nodes in the same MPI application job). + +As such, supporting data heterogeneity is a feature that has fallen +into disrepair: it is currently known to be broken in this release of +Open MPI. + +:doc:`Contributions to fix it would be welcome! ` + +///////////////////////////////////////////////////////////////////////// + +Does Open MPI support parallel debuggers? +----------------------------------------- + +Yes. Open MPI supports the TotalView API for parallel process +attaching, which several parallel debuggers support (e.g., DDT, fx2). +As part of v1.2.4 (released in September 2007), Open MPI also supports +the TotalView API for viewing message queues in running MPI processes. + +:ref:`See this FAQ entry ` for +details on how to run Open MPI jobs under TotalView, and :ref:`this +FAQ entry ` for details on how to run +Open MPI jobs under DDT. + +.. note:: The integration of Open MPI message queue support is + problematic with 64 bit versions of TotalView prior to v8.3: + + * The message queues views will be truncated. + * Both the communicators and requests list will be incomplete. + * Both the communicators and requests list may be filled with wrong + values (such as an ``MPI_Send`` to the destination + ``MPI_ANY_SOURCE``). + + There are two workarounds: + + * Use a 32 bit version of TotalView + * Upgrade to TotalView v8.3 diff --git a/docs/faq/sysadmin.rst b/docs/faq/sysadmin.rst new file mode 100644 index 00000000000..eb17a2f6fd7 --- /dev/null +++ b/docs/faq/sysadmin.rst @@ -0,0 +1,294 @@ +System administrator-level technical information +================================================ + +.. TODO How can I create a TOC just for this page here at the top? + +///////////////////////////////////////////////////////////////////////// + +I'm a sysadmin; what do I care about Open MPI? +---------------------------------------------- + +Several members of the Open MPI team have strong system +administrator backgrounds; we recognize the value of having software +that is friendly to system administrators. Here are some of the reasons +that Open MPI is attractive for system administrators: + +* Simple, standards-based installation +* Reduction of the number of MPI installations +* Ability to set system-level and user-level parameters +* Scriptable information sources about the Open MPI installation + +See the rest of the questions in this FAQ section for more details. + +///////////////////////////////////////////////////////////////////////// + +Do I need multiple Open MPI installations? +------------------------------------------ + +Yes and no. + +Open MPI can handle a variety of different run-time environments +(e.g., ssh, Slurm, PBS, etc.) and a variety of different +interconnection networks (e.g., ethernet, InfiniBand, etc.) +in a single installation. Specifically: because Open MPI is +fundamentally powered by a component architecture, plug-ins for all +these different run-time systems and interconnect networks can be +installed in a single installation tree. The relevant plug-ins will +only be used in the environments where they make sense. + +Hence, there is no need to have one MPI installation for InfiniBand, one +MPI installation for ethernet, one MPI installation for PBS, one MPI +installation for ``ssh``, etc. Open MPI can handle all of these in a +single installation. + +However, there are some issues that Open MPI cannot solve. Binary +compatibility between different compilers is such an issue. Let's +examine this on a per-language basis (be sure see the big caveat at +the end): + +* *C:* Most C compilers are fairly compatible, such that if you compile + Open MPI with one C library and link it to an application that was + compiled with a different C compiler, everything should "just work." + As such, a single installation of Open MPI should work for most C MPI + applications. + +* *C++:* The same is not necessarily true for C++. While Open MPI does not currently contain any C++ code (the MPI C++ bindings were removed in a prior release), and C++ compilers *should* produce ABI-equivalent code for C symbols, obscure problem can sometimes arise when mixing compilers from different suites. For example, if you compile Open MPI with the XYZ C/C++ + compiler, you may need to have the XYC C++ run-time libraries + installed everywhere you want to run. + +* *Fortran:* There are multiple issues with Fortran. + + #. Fortran compilers do something called "symbol mangling," meaning that the + back-end symbols may have slightly different names than their corresponding + global variables, subroutines, and functions. There are 4 common name + mangling schemes in use by Fortran compilers. On many systems (e.g., + Linux), Open MPI will automatically support all 4 schemes. As such, a + single Open MPI installation *should* just work with multiple different + Fortran compilers. However, on some systems, this is not possible (e.g., + OS X), and Open MPI will only support the name mangling scheme of the + Fortran compiler that was identified during ``configure``. + + #. That being said, there are two notable exceptions that do *not* work + across Fortran compilers that are "different enough": + + #. The C constants ``MPI_F_STATUS_IGNORE`` and ``MPI_F_STATUSES_IGNORE`` + will only compare properly to Fortran applications that were + created with Fortran compilers that that use the same + name-mangling scheme as the Fortran compiler with which Open MPI was + configured. + + #. Fortran compilers may have different values for the logical + ``.TRUE.`` constant. As such, any MPI function that uses the + Fortran ``LOGICAL`` type may only get ``.TRUE.`` values back that + correspond to the the ``.TRUE.`` value of the Fortran compiler with which + Open MPI was configured. + + #. Similar to C++, linking object files that Fortran language features such as modules and/or polymorphism from different + Fortran compilers is not likely to work. The ``mpi`` and ``mpi_f08`` modules that + Open MPI creates will likely only work with the Fortran compiler + that was identified during ``configure`` (and used to build Open MPI). + +The big caveat to all of this is that Open MPI will only work with +different compilers *if all the datatype sizes are the same.* For +example, even though Open MPI supports all 4 name mangling schemes, +the size of the Fortran ``LOGICAL`` type may be 1 byte in some compilers +and 4 bytes in others. This will likely cause Open MPI to perform +unpredictably. + +The bottom line is that Open MPI can support all manner of run-time +systems and interconnects in a single installation, but supporting +multiple compilers "sort of" works (i.e., is subject to trial and +error) in some cases, and definitely does not work in other cases. +There's unfortunately little that we can do about this |mdash| it's a +compiler compatibility issue, and one that compiler authors have +little incentive to resolve. + +///////////////////////////////////////////////////////////////////////// + +What are MCA Parameters? Why would I set them? +----------------------------------------------- + +MCA parameters are a way to tweak Open MPI's behavior at +run-time. For example, MCA parameters can specify: + +* Which interconnect networks to use +* Which interconnect networks *not* to use +* The size difference between eager sends and rendezvous protocol sends +* How many registered buffers to pre-pin (e.g., for InfiniBand) +* The size of the pre-pinned registered buffers +* ...etc. + +It can be quite valuable for a system administrator to play with such +values a bit and find an "optimal" setting for a particular +operating environment. These values can then be set in a global text +file that all users will, by default, inherit when they run Open MPI +jobs. + +For example, say that you have a cluster with 2 ethernet networks |mdash| +one for NFS and other system-level operations, and one for MPI jobs. +The system administrator can tell Open MPI to not use the NFS TCP +network at a system level, such that when users invoke ``mpirun`` or +``mpiexec`` to launch their jobs, they will automatically only be using +the network meant for MPI jobs. + +:doc:`See the run-time tuning FAQ category ` for information on how to set global MCA parameters. + +///////////////////////////////////////////////////////////////////////// + +Do my users need to have their own installation of Open MPI? +------------------------------------------------------------ + +Usually not. It is typically sufficient for a single Open MPI +installation (or perhaps a small number of Open MPI installations, +depending on compiler interoperability) to serve an entire parallel +operating environment. + +Indeed, a system-wide Open MPI installation can be customized on a +per-user basis in two important ways: + +* *Per-user MCA parameters:* Each user can set their own set of MCA + parameters, potentially overriding system-wide defaults. +* *Per-user plug-ins:* Users can install their own Open MPI + plug-ins under ``$HOME/.openmpi/components``. Hence, developers can + experiment with new components without destabilizing the rest of the + users on the system. Or power users can download 3rd party components + (perhaps even research-quality components) without affecting other users. + +///////////////////////////////////////////////////////////////////////// + +I have power users who will want to override my global MCA parameters; is this possible? +---------------------------------------------------------------------------------------- + +Absolutely. + +:doc:`See the run-time tuning FAQ category ` for information how to set MCA parameters, both at the +system level and on a per-user (or per-MPI-job) basis. + +///////////////////////////////////////////////////////////////////////// + +What MCA parameters should I, the system administrator, set? +------------------------------------------------------------ + +This is a difficult question and depends on both your specific +parallel setup and the applications that typically run there. + +The best thing to do is to use the ``ompi_info`` command to see what +parameters are available and relevant to you. Specifically, +``ompi_info`` can be used to show all the parameters that are available +for each plug-in. Two common places that system administrators like +to tweak are: + +* *Only allow specific networks:* Say you have a cluster with a + high-speed interconnect (such as InfiniBand) and a + low-speed ethernet network (e.g., 1Gpbps). The high-speed network is intended for MPI jobs; + the control network is intended for NFS and other + administrative-level tasks. In this case, you can simply turn off Open + MPI's TCP support. The ``btl`` framework contains Open MPI's network + support; in this case, you want to disable the ``tcp`` plug-in. You can + do this by adding the following line in the file + ``$prefix/etc/openmpi-mca-params.conf``: + + .. code-block:: + + btl = ^tcp + + This tells Open MPI to load all BTL components *except* ``tcp``. + + Consider another example: your cluster has two TCP networks, one for + NFS and administration-level jobs, and another for MPI jobs. You can + tell Open MPI to ignore the TCP network used by NFS by adding the + following line in the file ``$prefix/etc/openmpi-mca-params.conf``: + + .. code-block:: + + btl_tcp_if_exclude = lo,eth0 + + The value of this parameter is the device names to exclude. In this + case, we're excluding ``lo`` (localhost, because Open MPI has its own + internal loopback device) and ``eth0``. + +* *Tune the parameters for specific networks:* Each network plug-in + has a variety of different tunable parameters. Use the ``ompi_info`` + command to see what is available. You show *all* available parameters + with: + + .. code-block:: + + shell$ ompi_info --param all all + + .. note:: Starting with Open MPI v1.8, ``ompi_info`` categorizes its + parameters in *levels*, as defined by the MPI_T interface from + the MPI standard. You will need to specify ``--level 9`` (or + ``--all``) to show *all* MCA parameters. `See this blog entry + `_ + for further information. + + .. code-block:: sh + + shell$ ompi_info --level 9 + # or + shell$ ompi_info --all + + Beware: there are *many* variables available. You can limit the + output by showing all the parameters in a specific framework or in a + specific plug-in with the command line parameters: + + .. code-block:: sh + + shell$ ompi_info --param btl all --level 9 + + Shows all the parameters of all BTL components, and: + + .. code-block:: sh + + shell$ ompi_info --param btl tcp --level 9 + + Shows all the parameters of just the ``tcp`` BTL component. + +///////////////////////////////////////////////////////////////////////// + +I just added a new plugin to my Open MPI installation; do I need to recompile all my MPI apps? +---------------------------------------------------------------------------------------------- + +If your installation of Open MPI uses shared libraries and +components are standalone plug-in files, then no. If you add a new +component (such as support for a new network), Open MPI will simply +open the new plugin at run-time |mdash| your applications do not need to be +recompiled or re-linked. + +///////////////////////////////////////////////////////////////////////// + +I just upgraded my InfiniBand network; do I need to recompile all my MPI apps? +------------------------------------------------------------------------------ + +If your installation of Open MPI uses shared libraries and +components are standalone plug-in files, then no. You simply need to +recompile the Open MPI components that support that network and +re-install them. + +More specifically, Open MPI shifts the dependency on the underlying +network away from the MPI applications and to the Open MPI plug-ins. +This is a major advantage over many other MPI implementations. + +MPI applications will simply open the new plugin when they run. + +///////////////////////////////////////////////////////////////////////// + +We just upgraded our version of Open MPI; do I need to recompile all my MPI apps? +--------------------------------------------------------------------------------- + +It depends on which version of Open MPI your applications were initially compiled against and the target version of Open MPI to which you upgraded. + +:doc:`See the section on Open MPI's version numbering scheme ` for more information. + +///////////////////////////////////////////////////////////////////////// + +I have an MPI application compiled for another MPI; will it work with Open MPI? +------------------------------------------------------------------------------- + +It is strongly unlikely. Open MPI does not attempt to +interface to other MPI implementations, nor executables that were +compiled for them. Sorry! + +MPI applications need to be compiled and linked with Open MPI in order +to run under Open MPI. diff --git a/docs/faq/troubleshooting.rst b/docs/faq/troubleshooting.rst new file mode 100644 index 00000000000..e7a20fa4c90 --- /dev/null +++ b/docs/faq/troubleshooting.rst @@ -0,0 +1,152 @@ +Troubleshooting +=============== + +.. TODO How can I create a TOC just for this page here at the top? + +///////////////////////////////////////////////////////////////////////// + +I see strange messages about missing symbols in my application; what do these mean? +----------------------------------------------------------------------------------- + +Open MPI loads a lot of plugins (sometimes called "components" or +"modules") at run time. Sometimes a plugin can fail to load because it +can't resolve all the symbols that it needs. There are a few reasons +why this can happen. + +* The plugin is for a different version of Open MPI. :ref:`See this + section ` for an + explanation of how Open MPI might try to open the "wrong" plugins. +* An application is trying to manually dynamically open ``libmpi`` in + a private symbol space. For example, if an application is not + linked against ``libmpi``, but rather calls something like this: + + .. code-block:: c + + /* This is a Linux example -- the issue is similar/the same on other + operating systems */ + handle = dlopen("libmpi.so", RTLD_NOW | RTLD_LOCAL); + + This is due to some deep run time linker voodoo |mdash| it is + discussed towards the end of `this post to the Open MPI developer's + list + `_. + Briefly, the issue is this: + + #. The dynamic library ``libmpi`` is opened in a "local" symbol + space. + #. ``MPI_INIT`` is invoked, which tries to open Open MPI's plugins. + #. Open MPI's plugins rely on symbols in ``libmpi`` (and other Open + MPI support libraries); these symbols must be resolved when the + plugin is loaded. + #. However, since ``libmpi`` was opened in a "local" symbol space, + its symbols are not available to the plugins that it opens. + #. Hence, the plugin fails to load because it can't resolve all of + its symbols, and displays a warning message to that effect. + + The ultimate fix for this issue is a bit bigger than Open MPI, + unfortunately |mdash| it's a POSIX issue (as briefly described in the + devel mailing list posting, above). + + However, there are several common workarounds: + + * Dynamically open ``libmpi`` in a public / global symbol scope + |mdash| not a private / local scope. This will enable + ``libmpi``'s symbols to be available for resolution when Open MPI + dynamically opens its plugins. + * If ``libmpi`` is opened as part of some underlying framework where + it is not possible to change the private / local scope to a public + / global scope, then dynamically open ``libmpi`` in a public / + global scope before invoking the underlying framework. This + sounds a little gross (and it is), but at least the run-time + linker is smart enough to not load ``libmpi`` twice |mdash| but it + does keeps ``libmpi`` in a public scope. + * Use the ``--disable-dlopen`` or ``--disable-mca-dso`` options to + Open MPI's ``configure`` script (see this TODO NONEXISTANT FAQ entry + for more details on these + options). These options slurp all of Open MPI's plugins up in to + ``libmpi`` |mdash| meaning that the plugins physically reside in + ``libmpi`` and will not be dynamically opened at run time. + * Build Open MPI as a static library by configuring Open MPI with + ``--disable-shared`` and ``--enable-static``. This has the same + effect as ``--disable-dlopen``, but it also makes ``libmpi.a`` (as + opposed to a shared library). + +///////////////////////////////////////////////////////////////////////// + +How do I attach a parallel debugger to my MPI job? +-------------------------------------------------- + +.. error:: TODO Need to update this with PMIx debugger info. + +///////////////////////////////////////////////////////////////////////// + +How do I find out what MCA parameters are being seen/used by my job? +-------------------------------------------------------------------- + +MCA parameters are the "life's blood" of Open MPI. MCA parameters are +used to control both detailed and large-scale behavior of Open MPI and +are present throughout the code base. + +This raises an important question: since MCA parameters can be set from a +file, the environment, the command line, and even internally within Open MPI, +how do I actually know what MCA params my job is seeing, and their value? + +One way, of course, is to use the ``ompi_info`` command, which is +documented elsewhere (you can use ``man ompi_info``, or ``ompi_info +--help`` to get more info on this command). However, this still +doesn't fully answer the question since ``ompi_info`` isn't an MPI +process. + +To help relieve this problem, Open MPI provides the MCA parameter +``mpi_show_mca_params`` that directs the ``MPI_COMM_WORLD`` rank 0 +process to report the name of MCA parameters, their current value as +seen by that process, and the source that set that value. The +parameter can take several values that define which MCA parameters to +report: + +* ``all``: report all MCA params. Note that this typically generates a + rather long list of parameters since it includes all of the default + parameters defined inside Open MPI +* ``default``: MCA params that are at their default settings - i.e., + all MCA params that are at the values set as default within Open MPI +* ``file``: MCA params that had their value set by a file +* ``api``: MCA params set using Open MPI's internal APIs, perhaps to + override an incompatible set of conditions specified by the user +* ``enviro``: MCA params that obtained their value either from the + local environment or the command line. Open MPI treats environmental + and command line parameters as equivalent, so there currently is no + way to separate these two sources + +These options can be combined in any order by separating them with +commas. + +Here is an example of the output generated by this parameter: + +.. code-block:: sh + + shell$ mpirun --mca mpi_show_mca_params enviro hello_c + [local-hostname:12345] mpi_show_mca_params=enviro (environment) + Hello, World, I am 0 of 1 + +Note that several MCA parameters set by Open MPI itself for internal +uses are displayed in addition to the ones actually set by the user. + +Since the output from this option can be long, and since it can be +helpful to have a more permanent record of the MCA parameters used for +a job, a companion MCA parameter ``mpi_show_mca_params_file`` is +provided. If ``mpi_show_mca_params_file`` is *also* set, the output +listing of MCA parameters will be directed into the specified file +instead of being printed to stdout. For example: + +.. code-block:: sh + + shell$ mpirun --mca mpi_show_mca_params enviro \ + --mca mpi_show_mca_param_file /tmp/foo.txt hello_c + Hello, World, I am 0 of 1 + shell$ cat /tmp/foo.txt + # + # This file was automatically generated on Sun Feb 7 14:34:31 2021 + # by MPI_COMM_WORLD rank 0 (out of a total of 16) on savbu-usnic-a + # + mpi_show_mca_params=enviro (environment) + mpi_show_mca_params_file=/tmp/foo.txt (environment) diff --git a/docs/faq/tuning.rst b/docs/faq/tuning.rst new file mode 100644 index 00000000000..af482effc59 --- /dev/null +++ b/docs/faq/tuning.rst @@ -0,0 +1,13 @@ +Run-Time Tuning +=============== + +Placeholder; haven't converted over the "run time tuning" FAQ entry to +restructured text yet. + +.. _faq-tuning-setting-mca-params-label: + +setting mca params + +.. _faq-tuning-using-paffinity-label: + +blah using paffinity diff --git a/docs/features/extensions.rst b/docs/features/extensions.rst new file mode 100644 index 00000000000..5a821da6a1e --- /dev/null +++ b/docs/features/extensions.rst @@ -0,0 +1,89 @@ +Open MPI extensions +=================== + +Open MPI contains a framework for extending the MPI API that is +available to applications. Each extension is usually a standalone set +of functionality that is distinct from other extensions (similar to +how Open MPI's plugins are usually unrelated to each other). These +extensions provide new functions and/or constants that are available +to MPI applications. + +.. warning:: These extensions are neither standard nor portable to + other MPI implementations! + + They are a mechanism for the Open MPI developer community to + provide new functionality to users, typically before it becomes + standardized by the MPI Forum. + +Available extenions +------------------- + +The following extensions are included in this version of Open MPI: + +#. ``shortfloat``: Provides MPI datatypes ``MPIX_C_FLOAT16``, + ``MPIX_SHORT_FLOAT``, ``MPIX_SHORT_FLOAT``, and + ``MPIX_CXX_SHORT_FLOAT_COMPLEX`` if corresponding language types are + available. See ``ompi/mpiext/shortfloat/README.txt`` for details. +#. ``affinity``: Provides the ``OMPI_Affinity_str()`` API, which returns + a string indicating the resources which a process is bound. For + more details, see its man page. +#. ``cuda``: When the library is compiled with CUDA-aware support, it + provides two things. First, a macro + ``MPIX_CUDA_AWARE_SUPPORT``. Secondly, the function + ``MPIX_Query_cuda_support()`` that can be used to query for support. +#. ``example``: A non-functional extension; its only purpose is to + provide an example for how to create other extensions. +#. ``ftmpi``: An implementation of the User Level Fault Mitigation + (ULFM) proposal. :ref:`See its documentation section ` + for more details. + +Compiling the extensions +------------------------ + +Open MPI extensions are all enabled by default; they can be disabled +via the ``--disable-mpi-ext`` command line switch. + +Since extensions are meant to be used by advanced users only, this +file does not document which extensions are available or what they do. +Look in the ``ompi/mpiext`` directory in a distribution Open MPI +tarball to see the extensions; each subdirectory of that directory +contains an extension. Each has a ``README`` file that describes what +it does. + +Using the extensions +-------------------- + +To reinforce the fact that these extensions are non-standard, you must +include a separate header file after ```` to obtain the function +prototypes, constant declarations, etc. For example: + +.. code-block:: c + + #include + #if defined(OPEN_MPI) && OPEN_MPI + #include + #endif + + int main() { + MPI_Init(NULL, NULL); + + #if defined(OPEN_MPI) && OPEN_MPI + char ompi_bound[OMPI_AFFINITY_STRING_MAX]; + char current_binding[OMPI_AFFINITY_STRING_MAX]; + char exists[OMPI_AFFINITY_STRING_MAX]; + + OMPI_Affinity_str(OMPI_AFFINITY_LAYOUT_FMT, ompi_bound, + current_bindings, exists); + #endif + + MPI_Finalize(); + return 0; + } + +Notice that the Open MPI-specific code is surrounded by the ``#if`` +statement to ensure that it is only ever compiled by Open MPI. + +The Open MPI wrapper compilers (``mpicc`` and friends) should +automatically insert all relevant compiler and linker flags necessary +to use the extensions. No special flags or steps should be necessary +compared to "normal" MPI applications. diff --git a/docs/features/index.rst b/docs/features/index.rst new file mode 100644 index 00000000000..3f803bd23dd --- /dev/null +++ b/docs/features/index.rst @@ -0,0 +1,16 @@ +Open MPI-specific features +========================== + +Open MPI has multiple features that are above and beyond what is +specified in the `MPI Standard `_. + +This section does not list *all* Open MPI features that are not +specified in the official MPI Standard, but does list two major +categories of Open MPI-specific features. + +.. toctree:: + :maxdepth: 1 + + extensions + ulfm + java diff --git a/docs/features/java.rst b/docs/features/java.rst new file mode 100644 index 00000000000..cb01ec284c2 --- /dev/null +++ b/docs/features/java.rst @@ -0,0 +1,308 @@ +.. _open-mpi-java-label: + +Open MPI Java bindings +====================== + +Open MPI |ompi_ver| provides support for Java-based MPI applications. + +.. warning:: The Open MPI Java bindings are provided on a + "provisional" basis -- i.e., they are not part of the current or + proposed MPI standards. Thus, inclusion of Java support is not + required by the standard. Continued inclusion of the Java bindings + is contingent upon active user interest and continued developer + support. + +The rest of this document provides step-by-step instructions on +building OMPI with Java bindings, and compiling and running Java-based +MPI applications. Also, part of the functionality is explained with +examples. Further details about the design, implementation and usage +of Java bindings in Open MPI can be found in its canonical reference +paper [#ompijava]_. The bindings follow a JNI approach, that is, we do +not provide a pure Java implementation of MPI primitives, but a thin +layer on top of the C implementation. This is the same approach as in +mpiJava [#mpijava]_; in fact, mpiJava was taken as a starting point +for Open MPI Java bindings, but they were later totally rewritten. + +Building the Java Bindings +-------------------------- + +Java support requires that Open MPI be built at least with shared +libraries (i.e., ``--enable-shared``). Note that this is the default +for Open MPI, so you don't have to explicitly add the option. The Java +bindings will build only if ``--enable-mpi-java`` is specified, and a +JDK is found in a typical system default location. + +If the JDK is not in a place where we automatically find it, you can +specify the location. For example, this is required on the Mac +platform as the JDK headers are located in a non-typical location. Two +options are available for this purpose: + +1. ``--with-jdk-bindir=``: the location of ``javac`` and ``javah`` +1. ``--with-jdk-headers=``: the directory containing ``jni.h`` + +For simplicity, typical configurations are provided in platform files +under ``contrib/platform/hadoop``. These will meet the needs of most +users, or at least provide a starting point for your own custom +configuration. + +In summary, therefore, you can configure the system using the +following Java-related options:: + + $ ./configure --with-platform=contrib/platform/hadoop/ ... + +or:: + + $ ./configure --enable-mpi-java --with-jdk-bindir= --with-jdk-headers= ... + +or simply:: + + $ ./configure --enable-mpi-java ... + +if JDK is in a "standard" place that we automatically find. + +Running Java MPI Applications +----------------------------- + +The ``mpijavac`` wrapper compiler is available for compiling +Java-based MPI applications. It ensures that all required Open MPI +libraries and class paths are defined. You can see the actual command +line using the ``--showme`` option, if you are interested. + +Once your application has been compiled, you can run it with the +standard ``mpirun`` command line:: + + $ mpirun java + +``mpirun`` will detect the ``java`` token and ensure that the required +MPI libraries and class paths are defined to support execution. You +therefore do **not** need to specify the Java library path to the MPI +installation, nor the MPI classpath. Any class path definitions +required for your application should be specified either on the +command line or via the ``CLASSPATH`` environment variable. Note that +the local directory will be added to the class path if nothing is +specified. + +.. note:: The ``java`` executable, all required libraries, and your + application classes must be available on all nodes. + +Basic usage of the Java bindings +-------------------------------- + +There is an MPI package that contains all classes of the MPI Java +bindings: ``Comm``, ``Datatype``, ``Request``, etc. These classes have a +direct correspondence with classes defined by the MPI standard. MPI +primitives are just methods included in these classes. The convention +used for naming Java methods and classes is the usual camel-case +convention, e.g., the equivalent of ``MPI_File_set_info(fh,info)`` is +``fh.setInfo(info)``, where ``fh`` is an object of the class ``File``. + +Apart from classes, the MPI package contains predefined public +attributes under a convenience class ``MPI``. Examples are the +predefined communicator ``MPI.COMM_WORLD`` or predefined datatypes such +as ``MPI.DOUBLE``. Also, MPI initialization and finalization are methods +of the ``MPI`` class and must be invoked by all MPI Java +applications. The following example illustrates these concepts: + +.. code-block:: java + + import mpi.*; + + class ComputePi { + + public static void main(String args[]) throws MPIException { + + MPI.Init(args); + + int rank = MPI.COMM_WORLD.getRank(), + size = MPI.COMM_WORLD.getSize(), + nint = 100; // Intervals. + double h = 1.0/(double)nint, sum = 0.0; + + for (int i=rank+1; i<=nint; i+=size) { + double x = h * ((double)i - 0.5); + sum += (4.0 / (1.0 + x * x)); + } + + double sBuf[] = { h * sum }, + rBuf[] = new double[1]; + + MPI.COMM_WORLD.reduce(sBuf, rBuf, 1, MPI.DOUBLE, MPI.SUM, 0); + + if (rank == 0) System.out.println("PI: " + rBuf[0]); + MPI.Finalize(); + } + } + +Exception handling +------------------ + +The Java bindings in Open MPI support exception handling. By default, +errors are fatal, but this behavior can be changed. The Java API will +throw exceptions if the ``MPI.ERRORS_RETURN`` error handler is set: + +.. code-block:: java + + MPI.COMM_WORLD.setErrhandler(MPI.ERRORS_RETURN); + +If you add this statement to your program, it will show the line +where it breaks, instead of just crashing in case of an error. +Error-handling code can be separated from main application code by +means of try-catch blocks, for instance: + +.. code-block:: java + + try + { + File file = new File(MPI.COMM_SELF, "filename", MPI.MODE_RDONLY); + } + catch(MPIException ex) + { + System.err.println("Error Message: "+ ex.getMessage()); + System.err.println(" Error Class: "+ ex.getErrorClass()); + ex.printStackTrace(); + System.exit(-1); + } + +How to specify buffers +---------------------- + +In MPI primitives that require a buffer (either send or receive), the +Java API admits a Java array. Since Java arrays can be relocated by +the Java runtime environment, the MPI Java bindings need to make a +copy of the contents of the array to a temporary buffer, then pass the +pointer to this buffer to the underlying C implementation. From the +practical point of view, this implies an overhead associated to all +buffers that are represented by Java arrays. The overhead is small for +small buffers but increases for large arrays. + +There is a pool of temporary buffers with a default capacity of 64K. +If a temporary buffer of 64K or less is needed, then the buffer will +be obtained from the pool. But if the buffer is larger, then it will +be necessary to allocate the buffer and free it later. + +The default capacity of pool buffers can be modified with an Open MPI +MCA parameter:: + + shell$ mpirun --mca mpi_java_eager SIZE ... + +Where ``SIZE`` is the number of bytes, or kilobytes if it ends with 'k', +or megabytes if it ends with 'm'. + +An alternative is to use "direct buffers" provided by standard classes +available in the Java SDK such as ``ByteBuffer``. For convenience we +provide a few static methods ``new[Type]Buffer`` in the ``MPI`` class to +create direct buffers for a number of basic datatypes. Elements of the +direct buffer can be accessed with methods ``put()`` and ``get()``, and +the number of elements in the buffer can be obtained with the method +``capacity()``. This example illustrates its use: + +.. code-block:: java + + int myself = MPI.COMM_WORLD.getRank(); + int tasks = MPI.COMM_WORLD.getSize(); + + IntBuffer in = MPI.newIntBuffer(MAXLEN * tasks), + out = MPI.newIntBuffer(MAXLEN); + + for (int i = 0; i < MAXLEN; i++) + out.put(i, myself); // fill the buffer with the rank + + Request request = MPI.COMM_WORLD.iAllGather( + out, MAXLEN, MPI.INT, in, MAXLEN, MPI.INT); + request.waitFor(); + request.free(); + + for (int i = 0; i < tasks; i++) { + for (int k = 0; k < MAXLEN; k++) { + if (in.get(k + i * MAXLEN) != i) + throw new AssertionError("Unexpected value"); + } + } + +Direct buffers are available for: ``BYTE``, ``CHAR``, ``SHORT``, +``INT``, ``LONG``, ``FLOAT``, and ``DOUBLE``. There is no direct +buffer for booleans. + +Direct buffers are not a replacement for arrays, because they have +higher allocation and deallocation costs than arrays. In some cases +arrays will be a better choice. You can easily convert a buffer into +an array and vice versa. + +All non-blocking methods must use direct buffers and only +blocking methods can choose between arrays and direct buffers. + +The above example also illustrates that it is necessary to call the +``free()`` method on objects whose class implements the ``Freeable`` +interface. Otherwise, a memory leak is produced. + +Specifying offsets in buffers +----------------------------- + +In a C program, it is common to specify an offset in a array with +``&array[i]`` or ``array+i`` to send data starting from a given +position in the array. The equivalent form in the Java bindings is to +``slice()`` the buffer to start at an offset. Making a ``slice()`` on +a buffer is only necessary, when the offset is not zero. Slices work +for both arrays and direct buffers. + +.. code-block:: java + + import static mpi.MPI.slice; + // ... + int numbers[] = new int[SIZE]; + // ... + MPI.COMM_WORLD.send(slice(numbers, offset), count, MPI.INT, 1, 0); + + +Supported APIs +-------------- + +Complete MPI-3.1 coverage is provided in the Open MPI Java bindings, +with a few exceptions: + +* The bindings for the ``MPI_Neighbor_alltoallw`` and + ``MPI_Ineighbor_alltoallw`` functions are not implemented. + +* Also excluded are functions that incorporate the concepts of + explicit virtual memory addressing, such as + ``MPI_Win_shared_query``. + + +Known issues +------------ + +There exist issues with the Omnipath (PSM2) interconnect involving +Java. The problems definitely exist in PSM2 v10.2; we have not tested +previous versions. + +As of November 2016, there is not yet a PSM2 release that completely +fixes the issue. + +The following ``mpirun`` command options will disable PSM2:: + + shell$ mpirun ... --mca mtl ^psm2 java ...your-java-options... your-app-class + + +Questions? Problems? +--------------------- + +The Java API documentation is generated at build time in +``$prefix/share/doc/openmpi/javadoc``. + +Additionally, `this Cisco blog post +`_ has +quite a bit of information about the Open MPI Java bindings. + +If you have any problems, or find any bugs, please feel free to report +them to `Open MPI user's mailing list +`_. + +.. rubric:: Footnotes + +.. [#ompijava] O. Vega-Gisbert, J. E. Roman, and J. M. Squyres. "Design + and implementation of Java bindings in Open MPI". Parallel Comput. + 59: 1-20 (2016). + +.. [#mpijava] M. Baker et al. "mpiJava: An object-oriented Java + interface to MPI". In Parallel and Distributed Processing, LNCS + vol. 1586, pp. 748-762, Springer (1999). diff --git a/docs/features/ulfm.rst b/docs/features/ulfm.rst new file mode 100644 index 00000000000..202a13f5f5d --- /dev/null +++ b/docs/features/ulfm.rst @@ -0,0 +1,536 @@ +.. _ulfm-label: + +User-Level Fault Mitigation (ULFM) +================================== + +This chapter documents the features and options specific to the **User +Level Failure Mitigation (ULFM)** Open MPI implementation. + +Features +-------- + +This implementation conforms to the User Level Failure Mitigation +(ULFM) MPI Standard draft proposal. The ULFM proposal is developed by +the MPI Forum's Fault Tolerance Working Group to support the continued +operation of MPI programs after any type of failures, hard or soft, +have impacted the execution. The key principle is that no MPI call +(point-to-point, collective, RMA, IO, ...) can block indefinitely +after a failure, but must either succeed or raise an MPI +error. Accordingly, the errors are not all fatal, the MPI +implementations will do a best-effort approach to maintain the +execution environment up and running. + +This implementation produces the three supplementary error codes and +five supplementary interfaces defined in the communicator section of +the `ULFM chapter +`_ +standard draft document. + +* ``MPIX_ERR_PROC_FAILED`` when a process failure prevents the + completion of an MPI operation (error code). +* ``MPIX_ERR_PROC_FAILED_PENDING`` when a potential sender matching a + non-blocking wildcard source receive has failed (error code). +* ``MPIX_ERR_REVOKED`` when one of the ranks in the application has + invoked the ``MPI_Comm_revoke`` operation on the communicator (error + code). +* ``MPIX_Comm_revoke(MPI_Comm comm)`` Interrupts any communication + pending on the communicator at all ranks (API). +* ``MPIX_Comm_shrink(MPI_Comm comm, MPI_Comm* newcomm)`` creates a new + communicator where dead processes in comm were removed, and the + remaining processes are renamed to cover all the gaps in the naming + from the original communicator (API). +* ``MPIX_Comm_agree(MPI_Comm comm, int *flag)`` performs a consensus + (i.e. fault tolerant allreduce operation) on flag (with the + operation bitwise AND) (API). Absorbs all new failures, and + propagate the knowledge about failures among the participants. +* ``MPIX_Comm_failure_get_acked(MPI_Comm, MPI_Group*)`` obtains the + group of currently acknowledged failed processes (API). +* ``MPIX_Comm_failure_ack(MPI_Comm)`` acknowledges that the + application intends to ignore the effect of currently known failures + on wildcard receive completions and agreement return values (API). + +Supported Systems +----------------- + +There are several MPI communication engines available in Open MPI, +notably: + +* PML: ``ob1``, ``cm``, ``ucx`` +* MTL: ``ofi``, ``portals4``, ``psm2`` + +However, in Open MPI |ompi_ver|, only ``ob1`` is fully adapted to support +fault tolerance. The UCX PML has been successfully tested in some setups, +but at this point we cannot confirm that all UCT devices are fully capable +to provide the necessary features. + +``ob1`` uses BTL ("Byte Transfer Layer") components for each supported +network. ``ob1`` supports a variety of networks that can be used in +combination with each other. Collective operations (blocking and +non-blocking) use an optimized implementation on top of ``ob1``. + +- Loopback (send-to-self) +- TCP +- UCT (InfiniBand) +- uGNI (Cray Gemini, Aries) +- Shared Memory (FT supported with CMA and XPMEM; KNEM is untested) +- Tuned and non-blocking collective communications + +A full list of supported, untested and disabled components is provided +later in this document. + +ULFM web site +------------- + +More information (tutorials, examples, build instructions for leading +top500 systems) is also available in the Fault Tolerance Research +Hub website: https://fault-tolerance.org + +Bibliographic References +------------------------ + +If you are looking for, or want to cite a general reference for ULFM, +please use: + + *Wesley Bland, Aurelien Bouteiller, Thomas Herault, George Bosilca, Jack + J. Dongarra: Post-failure recovery of MPI communication + capability: Design and rationale. IJHPCA 27(3): 244-254 (2013).* + +Available from: http://journals.sagepub.com/doi/10.1177/1094342013488238. + +Building ULFM support in Open MPI +--------------------------------- + +In Open MPI |ompi_ver|, ULFM support is **enabled by default** |mdash| +when you build Open MPI, unless you specify ``--without-ft``, ULFM +support will automatically be built. + +Optionally, you can specify ``--with-ft`` to ensure that ULFM support +is definitely built. + +Support notes +^^^^^^^^^^^^^ + +* ULFM Fault Tolerance does not apply to OpenSHMEM. It is recomended + that if you are going to use ULFM, you should disable building + OpenSHMEM with ``--disable-oshmem``. + +* SLURM is tested and supported with fault tolerance. + + .. important:: Do not use ``srun``, or your application gets killed + by the scheduler upon the first failure. Instead, + use ``mpirun`` in an ``salloc/sbatch`` allocation. + +* LSF is untested with fault tolerance. + +* PBS/Torque is tested and supported with fault tolerance. + + .. important:: Be sure to use ``mpirun`` in a ``qsub`` allocation. + +Modified, Untested and Disabled Components +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Frameworks and components which are not listed in the following list +are unmodified and support fault tolerance. Listed frameworks may be +**modified** (and work after a failure), **untested** (and work before +a failure, but may malfunction after a failure), or **disabled** (they +cause unspecified behavior all around when FT is enabled). + +All runtime disabled components are listed in the ``ft-mpi`` aggregate +MCA param file +``$installdir/share/openmpi/amca-param-sets/ft-mpi``. You can tune the +runtime behavior with ULFM by either setting or unsetting variables in +this file (or by overiding the variable on the command line (e.g., +``--mca btl ofi,self``). Note that if fault tolerance is disabled at +runtime, these components will load normally (this may change observed +performance when comparing with and without fault tolerance). + +* ``pml``: MPI point-to-point management layer + + * ``monitoring``, ``v``: **untested** (they have not been modified + to handle faults) + * ``cm``, ``crcpw``, ``ucx``: **disabled** + +* ``btl``: Point-to-point Byte Transfer Layer + + * ``ofi``, ``portals4``, ``smcuda``, ``usnic``, ``sm(+knem)``: + **untested** (they may work properly, please report) + +* ``mtl``: Matching transport layer Used for MPI point-to-point messages on + some types of networks + + * All ``mtl`` components are **disabled** + +* ``coll``: MPI collective algorithms + + * ``cuda``, ``inter``, ``sync``, ``sm``: **untested** (they have not + been modified to handle faults, but we expect correct post-fault + behavior) + * ``hcoll``, ``portals4`` **disabled** (they have not been modified + to handle faults, and we expect unspecified post-fault behavior) + +* ``osc``: MPI one-sided communications + + * All ``osc`` components are **untested** (they have not been + modified to handle faults, and we expect unspecified post-fault + behavior) + +* ``io``: MPI I/O and dependent components + + * ``fs``: File system functions for MPI I/O + * ``fbtl``: File byte transfer layer: abstraction for individual + read/write operations for OMPIO + * ``fcoll``: Collective read and write operations for MPI I/O + * ``sharedfp``: Shared file pointer operations for MPI I/O + * All components in these frameworks are unmodified, **untested** + (we expect clean post-failure abort) + +* ``vprotocol``: Checkpoint/Restart components + + * These components have not been modified to handle faults, and are + **untested**. + +* ``threads``, ``wait-sync``: Multithreaded wait-synchronization + object + + * ``argotbots``, ``qthreads``: **disabled** (these components have + not been modified to handle faults; we expect post-failure + deadlock) + + +Running ULFM Open MPI +--------------------- + +Building your application +^^^^^^^^^^^^^^^^^^^^^^^^^ + +As ULFM is still an extension to the MPI standard, you will need to +``#include `` in C, or ``use mpi_ext`` in Fortran to access +the supplementary error codes and functions. + +Compile your application as usual, using the provided ``mpicc`` or +``mpifort`` wrappers. + +Running your application +^^^^^^^^^^^^^^^^^^^^^^^^ + +You can launch your application with fault tolerance by simply using +the normal Open MPI ``mpiexec`` launcher, with the +``--with-ft ulfm`` CLI option: + +.. code-block:: + + shell$ mpirun --with-ft ulfm ... + +Running under a batch scheduler +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +ULFM can operate under a job/batch scheduler, and is tested routinely +with ALPS, PBS, and Slurm. One difficulty comes from the fact that +many job schedulers will "cleanup" the application as soon as any +process fails. In order to avoid this problem, it is preferred that +you use ``mpiexec`` within an allocation (e.g., ``salloc``, +``sbatch``, ``qsub``) rather than a direct launch (e.g., ``srun``). + +Run-time tuning knobs +^^^^^^^^^^^^^^^^^^^^^ + +ULFM comes with a variety of knobs for controlling how it runs. The +default parameters are sane and should result in good performance in +most cases. You can change the default settings with ``--mca +mpi_ft_foo `` for Open MPI options, and with ``--prtemca +errmgr_detector_bar `` for PRTE options. + +PRTE level options +~~~~~~~~~~~~~~~~~~ + +* ``prrte_enable_recovery (default: false)`` controls + automatic cleanup of apps with failed processes within + mpirun. Enabling this option also enables ``mpi_ft_enable``. +* ``errmgr_detector_priority (default 1005``) selects the + PRRTE-based failure detector. Only available when + ``prte_enable_recovery`` is ``true``. You can set this to ``0`` when + using the (experimental) Open MPI detector instead. +* ``errmgr_detector_heartbeat_period (default: 5e0)`` controls + the heartbeat period. Recommended value is 1/2 of the timeout. +* ``errmgr_detector_heartbeat_timeout (default: 1e1 seconds)`` + heartbeat timeout (i.e. failure detection speed). Recommended value + is 2 times the heartbeat period. The default setup is tuned for + failure-free performance at the expense of fault detection + reactivity. In environments where faults are expected to be common, + less conservative values can be used (e.g., 100ms); Values lower + than the TCP poll rate (typically 10ms) can cause false positive. + +Open MPI level options +~~~~~~~~~~~~~~~~~~~~~~ + +* ``mpi_ft_enable (default: same as + prrte_enable_recovery)`` permits turning on/off fault tolerance at + runtime. When false, failure detection is disabled; Interfaces + defined by the fault tolerance extensions are substituted with dummy + non-fault tolerant implementations (e.g., ``MPIX_Comm_agree`` is + implemented with ``MPI_Allreduce``); All other controls below become + irrelevant. +* ``mpi_ft_verbose (default: 0)`` increases the output of the + fault tolerance activities. A value of 1 will report detected + failures. +* ``mpi_ft_detector (default: false)``, **EXPERIMENTAL** + controls the activation of the Open MPI level failure detector. When + this detector is turned off, all failure detection is delegated to + PRTE (see above). The Open MPI level fault detector is + experimental. There is a tradeoff between failure detection accuracy + and performance with this detector. Users that experience accuracy + issues may enable a more precise mode. See the tuning knobs below + to adjust to taste; The Open MPI failure detector operates on + ``MPI_COMM_WORLD`` exclusively. Processes connected from + ``MPI_COMM_CONNECT``/``ACCEPT`` and ``MPI_COMM_SPAWN`` may + occasionally not be detected when they fail. +* ``mpi_ft_detector_thread (default: false)`` controls + the use of a thread to emit and receive failure detector's + heartbeats. *Setting this value to "true" will also set + MPI_THREAD_MULTIPLE support, which has a noticeable effect on + latency (typically 1us increase).* You may want to **enable this + option if you experience false positive** processes incorrectly + reported as failed with the Open MPI failure detector. +* ``mpi_ft_detector_period (default: 3e0 seconds)`` heartbeat + period. Recommended value is 1/3 of the timeout. _Values lower than + 100us may impart a noticeable effect on latency (typically a 3us + increase)._ +* ``mpi_ft_detector_timeout (default: 1e1 seconds)`` heartbeat + timeout (i.e. failure detection speed). Recommended value is 3 times + the heartbeat period. + +Known Limitations in ULFM +^^^^^^^^^^^^^^^^^^^^^^^^^ + +* InfiniBand support is provided through the UCT BTL; fault tolerant + operation over the UCX PML is not yet supported for production runs. +* TOPO, FILE, RMA are not fault tolerant. They are expected to work + properly before the occurence of the first failure. + +Changelog +--------- + +ULFM Integrated in Open MPI +^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +As of |ompi_ver|, ULFM is now integrated directly in to the community +release of Open MPI. The following sections describe previous ULFM +standlone releases. + +ULFM Standalone Release 4.0.2u1 +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +This is a stability and upstream parity upgrade. It is based on the +most current Open MPI Release (v4.0.2, October 2019). + +* This release is based on Open MPI release v4.0.2 (ompi #cb5f4e737a). +* This release is based on ULFM master (ulfm #0e249ca1). +* New features + + * Support for the UCT BTL enters beta stage. + +* Bugfixes + + * High sensitivity to noise in the failure detector. + * Deadlocks when revoking while BTL progress threads are updating messages. + * A case where the failure detector would keep observing a dead + process forever. + * Disable the use of external pmix/libevent by default (the + internals are modified to handle error cases). + * Clean error paths leaving some rdma registration dangling. + * Do not remove the orte job/proc session dir prematurely upon + error. + +ULFM Standalone Release 4.0.1u1 +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +This is a stability and upstream parity upgrade. It improves +stability, performance and is based on the most current Open MPI +Release (v4.0.1, May 2019). + +* This release is based on Open MPI release v4.0.1 (ompi #b780667). +* This release is based on ULFM master (ulfm #cf8dc43f). +* New features + + * Addition of the ``MPI_Comm_is_revoked`` function + * Renamed ``ftbasic`` collective component to ``ftagree`` + * Restored the ``pcollreq`` extension + +* Bugfixes + + * Failures of node-local siblings were not always detected + * Failure propagation and detection was slowed down by trying to + notify known dead processes + * There were deadlocks in multithreaded programs + * There were issues with PMPI when compiling Fortran Interfaces + * There were deadlocks on OS-X + +ULFM Standalone Release 2.1 +^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +This release is a bugfix and upstream parity upgrade. It improves +stability, performance and is based on the most current Open MPI +master (November 2018). + +* ULFM is now based upon Open MPI master branch (#37954b5f). +* ULFM tuning MCA parameters are exposed by ``ompi_info``. +* Fortran 90 bindings have been updated +* Bugfixes: + + * Correct the behavior of process placement during an MPI_COMM_SPAWN + when some slots were occcupied by failed processes. + * MPI_COMM_SPAWN accepts process placement directives in the Info object. + * Fixed deadlocks in some NBC collective operations. + * Crashes and deadlocks in MPI_FINALIZE have been resolved. + * Any-source requests that returned with an error status of + MPIX_PROC_FAILED_PENDING can now correctly complete during later + MPI_WAIT/TEST. + +ULFM Standalone Release 2.0 +^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Focus has been toward integration with current Open MPI master +(November 2017), performance, and stability. + +* ULFM is now based upon Open MPI master branch (#689f1be9). It will + be regularly updated until it will eventually be merged. +* Fault Tolerance is enabled by default and is controlled with MCA variables. +* Added support for multithreaded modes (MPI_THREAD_MULTIPLE, etc.) +* Added support for non-blocking collective operations (NBC). +* Added support for CMA shared memory transport (Vader). +* Added support for advanced failure detection at the MPI level. + Implements the algorithm described in "Failure detection and + propagation in HPC systems." . +* Removed the need for special handling of CID allocation. +* Non-usable components are automatically removed from the build + during configure +* RMA, FILES, and TOPO components are enabled by default, and usage in + a fault tolerant execution warns that they may cause undefined + behavior after a failure. +* Bugfixes: + + * Code cleanup and performance cleanup in non-FT builds; --without-ft at + configure time gives an almost stock Open MPI. + * Code cleanup and performance cleanup in FT builds with FT runtime disabled; + --mca ft_enable_mpi false thoroughly disables FT runtime activities. + * Some error cases would return ERR_PENDING instead of ERR_PROC_FAILED in + collective operations. + * Some test could set ERR_PENDING or ERR_PROC_FAILED instead of + ERR_PROC_FAILED_PENDING for ANY_SOURCE receptions. + +ULFM Standalone Release 1.1 +^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Focus has been toward improving stability, feature coverage for +intercomms, and following the updated specification for +MPI_ERR_PROC_FAILED_PENDING. + +* Forked from Open MPI 1.5.5 devel branch +* Addition of the MPI_ERR_PROC_FAILED_PENDING error code, as per newer + specification revision. Properly returned from point-to-point, + non-blocking ANY_SOURCE operations. +* Alias MPI_ERR_PROC_FAILED, MPI_ERR_PROC_FAILED_PENDING and + MPI_ERR_REVOKED to the corresponding standard blessed -extension- + names MPIX_ERR_xxx. +* Support for Intercommunicators: + + * Support for the blocking version of the agreement, MPI_COMM_AGREE + on Intercommunicators. + * MPI_COMM_REVOKE tested on intercommunicators. + +* Disabled completely (.ompi_ignore) many untested components. +* Changed the default ORTE failure notification propagation + aggregation delay from 1s to 25ms. +* Added an Open MPI internal failure propagator; failure propagation + between SM domains is now immediate. +* Bugfixes: + + * SendRecv would not always report MPI_ERR_PROC_FAILED correctly. + * SendRecv could incorrectly update the status with errors + pertaining to the Send portion of the Sendrecv. + * Revoked send operations are now always completed or remote + cancelled and may not deadlock anymore. + * Cancelled send operations to a dead peer will not trigger an + assert when the BTL reports that same failure. + * Repeat calls to operations returning MPI_ERR_PROC_FAILED will + eventually return MPI_ERR_REVOKED when another process revokes the + communicator. + +ULFM Standalone Release 1.0 +^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Focus has been toward improving performance, both before and after the +occurence of failures. The list of new features includes: + +* Support for the non-blocking version of the agreement, MPI_COMM_IAGREE. +* Compliance with the latest ULFM specification draft. In particular, + the MPI_COMM_(I)AGREE semantic has changed. +* New algorithm to perform agreements, with a truly logarithmic + complexity in number of ranks, which translates into huge + performance boosts in MPI_COMM_(I)AGREE and MPI_COMM_SHRINK. +* New algorithm to perform communicator revocation. MPI_COMM_REVOKE + performs a reliable broadcast with a fixed maximum output degree, + which scales logarithmically with the number of ranks. +* Improved support for our traditional network layer: + + * TCP: fully tested + * SM: fully tested (with the exception of XPMEM, which remains unsupported) + +* Added support for High Performance networks + + * Open IB: reasonably tested + * uGNI: reasonably tested + +* The tuned collective module is now enabled by default (reasonably + tested), expect a huge performance boost compared to the former + basic default setting + + * Back-ported PBS/ALPS fixes from Open MPI + * Back-ported OpenIB bug/performance fixes from Open MPI + * Improve Context ID allocation algorithm to reduce overheads of + Shrink + * Miscellaneous bug fixes + +Binary Compatibility +^^^^^^^^^^^^^^^^^^^^ + +ULFM Open MPI is binary compatible with any version of Open MPI +compatible with the underlying Open MPI master branch or release (see +the binary compatibility and version number section in the upstream +Open MPI README). That is, applications compiled with a compatible +Open MPI can run with the ULFM Open MPI ``mpirun`` and MPI +libraries. Conversely, *as long as the application does not employ one +of the MPIX functions,* which are exclusively defined in ULFM Open +MPI, an application compiled with ULFM Open MPI can be launched with a +compatible Open MPI ``mpirun`` and run with the non-fault tolerant MPI +library. + +Contacting the Authors +---------------------- + +Found a bug? Got a question? Want to make a suggestion? Want to +contribute to ULFM Open MPI? Working on a cool use-case? +Please let us know! + +The best way to report bugs, send comments, or ask questions is to +sign up on the user's mailing list: ulfm+subscribe@googlegroups.com + +Because of spam, only subscribers are allowed to post to these lists +(ensure that you subscribe with and post from exactly the same e-mail +address -- joe@example.com is considered different than +joe@mycomputer.example.com!). Visit these pages to subscribe to the +lists: https://groups.google.com/forum/#!forum/ulfm + +When submitting questions and problems, be sure to include as much +extra information as possible. This web page details all the +information that we request in order to provide assistance: +http://www.open-mpi.org/community/help/ + +Thanks for your time. + + +ULFM Copyright +-------------- + +Copyright (c) 2012-|year| The University of Tennessee and The +University of Tennessee Research Foundation. All rights reserved. diff --git a/docs/getting-help.rst b/docs/getting-help.rst new file mode 100644 index 00000000000..e564a48264c --- /dev/null +++ b/docs/getting-help.rst @@ -0,0 +1,240 @@ +Getting help +============ + +If you have a problem or question, it is highly recommended that you +execute the following steps **in order**. Many people have similar +problems with configuration and initial setup of Open MPI |mdash| it +is possible that your question has already been answered. + +#. :doc:`Validate your Open MPI installation `. This + ensures that you have a nominally-correct Open MPI installation. + +#. `Check prior GitHub issues + `_ and see if others have + asked the same question and had it answered. + +#. `Check the mailing list archives + `_ "search" + features (or use Google) to check old posts and see if others have + asked the same question and had it answered. + +#. If you do not find a solution to your problem in the above + resources, proceed to the :ref:`Where to send? + ` section. + +.. _getting-help-where-to-send-label: + +Where to send? +-------------- + +Different types of questions and problems should be sent to different +places. If you have: + +#. **A general end-user question or problem:** you should probably + subscribe to the `Open MPI user's mailing list + `_ and post it + there. + + .. note:: Because of spam, only subscribers to the mailing list are + allowed to post to the mailing list. Specifically: you must + subscribe to the mailing list before posting. + + * If you have a run-time question or problem, see the :ref:`For + run-time problems ` section below for + the content of what to include in your email. + * If you have a compile-time question or problem, see the :ref:`For + compile-time problems ` section + below for the content of what to include in your email. + + .. note:: The mailing lists have **a 150 KB size limit on + messages** (this is a limitation of the mailing list web + archives). If attaching your files results in an email larger + than this, please try compressing it and/or posting it on the + web somewhere for people to download. A `Github Gist + `_ or a `Pastebin + `_ might be an easy choice for posting + large text files. + + .. important:: Please **use a descriptive "subject" line in your + email!** Some Open MPI question-answering people decide whether + to read a mail based on its subject line (e.g., to see if it's a + question that they can answer). So please plese please use a + good subject line that succinctly describes your problem. + +#. **A bug report:** you should probably post it to `Open MPI's Github + issue tracker `_. Follow + the template to submit all the requested information. + +#. **A patch, bug fix, or other code submission:** please post a Github + Pull Request to the `Open MPI Github repository + `_. + +#. **A developer-level / internal question about Open MPI itself:** you + should probably subscribe to the `Open MPI developer's mailing list + `_ and post it + there. + +If you're unsure where to send your question, subscribe and send an +email to the user's mailing list. + +.. _getting-help-run-time-label: + +For run-time problems +--------------------- + +Please provide *all* of the following information: + +.. important:: The more information you include in your report, the + better. E-mails/bug reports simply stating, "It doesn't work!" + are not helpful; we need to know as much information about your + environment as possible in order to provide meaningful assistance. + + **The best way to get help** is to provide a "recipie" for + reproducing the problem. This will allow the Open MPI developers + to see the error for themselves, and therefore be able to fix it. + +#. The version of Open MPI that you're using. + +#. The ``config.log`` file from the top-level Open MPI directory, if + available (**compress or post to a Github gist or Pastebin**). + +#. The output of the ``ompi_info --all`` command from the node where + you're invoking ``mpirun``. + +#. If you have questions or problems about process affinity / + binding, send the output from running the ``lstopo -v`` + command from a recent version of `Hwloc + `_. *The detailed + text output is preferable to a graphical output.* + +#. If running on more than one node |mdash| especially if you're + having problems launching Open MPI processes |mdash| also include + the output of the ``ompi_info --version`` command **from each node + on which you're trying to run**. + + #. If you are able to launch MPI processes, you can use + ``mpirun`` to gather this information. For example, if + the file ``my_hostfile.txt`` contains the hostnames of the + machines on which you are trying to run Open MPI + processes:: + + shell$ mpirun --map-by node --hostfile my_hostfile.txt --output tag ompi_info --version + + + #. If you cannot launch MPI processes, use some other mechanism + |mdash| such as ``ssh`` |mdash| to gather this information. For + example, if the file ``my_hostfile.txt`` contains the hostnames + of the machines on which you are trying to run Open MPI + processes: + + .. code-block:: sh + + # Bourne-style shell (e.g., bash, zsh, sh) + shell$ for h in `cat my_hostfile.txt` + > do + > echo "=== Hostname: $h" + > ssh $h ompi_info --version + > done + + .. code-block:: sh + + # C-style shell (e.g., csh, tcsh) + shell% foreach h (`cat my_hostfile.txt`) + foreach? echo "=== Hostname: $h" + foreach? ssh $h ompi_info --version + foreach? end + +#. A *detailed* description of what is failing. The more + details that you provide, the better. E-mails saying "My + application doesn't work!" will inevitably be answered with + requests for more information about *exactly what doesn't + work*; so please include as much information detailed in your + initial e-mail as possible. We strongly recommend that you + include the following information: + + * The exact command used to run your application. + + * Any relevant MCA parameters that were set (or unset) when + you ran (from either the command line, environment, + parameter file, etc.). + + * The value of the ``PATH`` and ``LD_LIBRARY_PATH`` + environment variables (did you set them correctly to point + to all relevant executables, the Open MPI libraries, and + any required support libraries, such as libraries required + for high-speed networks such as InfiniBand). + +#. Detailed information about your network: + + .. error:: TODO Update link to IB FAQ entry. + + #. For RoCE- or InfiniBand-based networks, include the information + :ref:`in this FAQ entry `. + + #. For Ethernet-based networks (including RoCE-based networks, + include the output of the ``ip addr`` command (or the legacy + ``ifconfig`` command) on all relevant nodes. + + .. note:: Some Linux distributions do not put ``ip`` or + ``ifconfig`` in the default ``PATH`` of normal users. + Try looking for it in ``/sbin`` or ``/usr/sbin``. + +.. _getting-help-compile-time-label: + +For compile problems +-------------------- + +Please provide *all* of the following information: + +.. important:: The more information you include in your report, the + better. E-mails/bug reports simply stating, "It doesn't work!" + are not helpful; we need to know as much information about your + environment as possible in order to provide meaningful assistance. + + **The best way to get help** is to provide a "recipie" for + reproducing the problaem. This will allow the Open MPI developers + to see the error for themselves, and therefore be able to fix it. + +#. The version of Open MPI that you're using. + +#. All output (both compilation output and run time output, including + all error messages). + +#. Output from when you ran ``./configure`` to configure Open MPI + (**compress or post to a GitHub gist or Pastebin!**). + +#. The ``config.log`` file from the top-level Open MPI directory + (**compress or post to a GitHub gist or Pastebin!**). + +#. Output from when you ran ``make V=1`` to build Open MPI (**compress + or post to a GitHub gist or Pastebin!**). + +#. Output from when you ran ``make install`` to install Open MPI + (**compress or post to a GitHub gist or Pastebin!**). + +To capture the output of the configure and make steps, you can use the +script command or the following technique to capture all the files in +a unique directory, suitable for tarring and compressing into a single +file: + +.. code-block:: sh + + # Bourne-style shell (e.g., bash, zsh, sh) + shell$ mkdir $HOME/ompi-output + shell$ ./configure {options} 2>&1 | tee $HOME/ompi-output/config.out + shell$ make all 2>&1 | tee $HOME/ompi-output/make.out + shell$ make install 2>&1 | tee $HOME/ompi-output/make-install.out + shell$ cd $HOME + shell$ tar jcvf ompi-output.tar.bz2 ompi-output + +.. code-block:: sh + + # C-style shell (e.g., csh, tcsh) + shell% mkdir $HOME/ompi-output + shell% ./configure {options} |& tee $HOME/ompi-output/config.out + shell% make all |& tee $HOME/ompi-output/make.out + shell% make install |& tee $HOME/ompi-output/make-install.out + shell% cd $HOME + shell% tar jcvf ompi-output.tar.bz2 ompi-output + +Then attach the resulting ``ompi-output.tar.bz2`` file to your report. diff --git a/docs/history.rst b/docs/history.rst new file mode 100644 index 00000000000..b701acd7803 --- /dev/null +++ b/docs/history.rst @@ -0,0 +1,75 @@ +History of Open MPI +=================== + +Open MPI represents the merger of three prior MPI implementations: + +#. LAM/MPI: originally from the Ohio State University supercomputing + center and later migrated to the University of Notre Dame. +#. LA-MPI: from the US Department of Energy Los Alamos National + Laboratory. +#. FT-MPI: from the University of Tennassee at Knoxville. One of the + UTK developers moved back to the University of Stuttgart in late + 2004, which effectively added their team into the project. + +The lead developers of these projects kept bumping into each other at +various HPC conferences in 2003. At each conference, our +lunch/dinner-table conversations got more and more technically +involved when it finally dawned on us that we are doing a *lot* of the +same things in each of our respective implementations. Although each +MPI implementation focused on different areas of excellence, we all +shared the same common core values: + +* A full MPI implementation +* Production quality code -- it has to "just work" +* A desire to explore lots of things that an MPI implementation can do + that we've never had the time/resources to investigate because we + are bound to bug fixing, etc. + +Hence, we decided to collaborate and pool our resources. At SC2003, +we decided to start an entire new code base -- leaving all the cruft +and legacy code of our prior implementations behind. Take the best, +leave the rest. The source tree's first commit was on November 22, +2003; development work started in earnest on January 5, 2004. Since +then, we have met together as a group once a month (for at least a +week) to meet our goal of a world-class MPI implementation, bar none. + + +Goals of the Open MPI Project +----------------------------- + +We have several top-level goals: + +#. Create a free, open source, peer-reviewed, production-quality + complete MPI implementation. +#. Provide extremely high, competitive performance (latency, + bandwidth, ...pick your favorite metric). +#. Directly involve the HPC community with external development + and feedback (vendors, 3rd party researchers, users, etc.). +#. Provide a stable platform for 3rd party research and commercial + development. +#. Support a wide variety of HPC platforms and environments. + +In short, we want to work *with* and *for* the HPC community to make a +world-class MPI implementation that can be used on a huge number and +kind of systems. + + +Community +--------- + +Bringing together smart researchers and developers to work on a common +product is not only a good idea, it's the open source model. The Open +MPI project started by multiple MPI implementation teams, and that +proved to work *extremely* well; extending this concept to the HPC +open source community is the next logical step. + +The component architecture upon which Open MPI is founded is designed +to foster 3rd party collaboration by enabling independent developers +to use Open MPI as a production quality research platform. Although +Open MPI is a relatively large code base, it is not necessary to learn +the entirety of it; it may be sufficient to learn the interfaces for +the component type which you are implementing and some of the +surrounding infrastructure. Specifically, the component architecture +was designed to allow small, discrete implementations of major +portions of MPI functionality (e.g., point-to-point messaging, +collective communications, run-time environment support, etc.). diff --git a/docs/index.rst b/docs/index.rst new file mode 100644 index 00000000000..8c6a939b051 --- /dev/null +++ b/docs/index.rst @@ -0,0 +1,85 @@ +.. image:: openmpi_logo.png + :align: right + +Open MPI +======== + +`The Open MPI Project `_ is an open source +implementation of the `Message Passing Interface (MPI) specification +`_ that is developed and maintained +by a consortium of academic, research, and industry partners. Open +MPI is therefore able to combine the expertise, technologies, and +resources from all across the High Performance Computing community in +order to build the best MPI library available. Open MPI offers +advantages for system and software vendors, application developers and +computer science researchers. + +Other documentation +=================== + +Documentation for other versions of Open can be found in the following +locations: + +.. list-table:: + :header-rows: 1 + + * - Open MPI version + - Documentation location + + * - v5.0.0 and later + - Open MPI documentation has consolidated and moved to + ReadTheDocs.io. + + This particular documentation is for |ompi_ver|; use the + selector in the + + top-left of the navigation column to select + documentation for different version. + + * - v4.1.x and earlier + - See the `legacy Open MPI FAQ `_ + and the README file in the source tarball. + + For example: + + * `v4.1.x README file `_ + * `v4.0.x README file `_ + +Release announcements +===================== + +The best way to hear about new Open MPI releases is via the +`low-volume announcement mailing list +`_. + +Additionally, if you are a downstream packager of Open MPI (e.g., you +package Open MPI for an operating system distribution), you may wish +to sign up for the `low-volume packagers list +`_. + +Table of contents +================= + +.. toctree:: + :maxdepth: 2 + :numbered: + + to-do + quickstart + getting-help + release-notes/index + installing-open-mpi/index + features/index + validate + version-numbering + building-apps/index + running-apps/index + networking/index + faq/index + developers/index + contributing + license/index + history + news/index + man-openmpi/index + man-openshmem/index diff --git a/docs/installing-open-mpi/compilers-and-flags.rst b/docs/installing-open-mpi/compilers-and-flags.rst new file mode 100644 index 00000000000..f3ce7418826 --- /dev/null +++ b/docs/installing-open-mpi/compilers-and-flags.rst @@ -0,0 +1,112 @@ +.. _install-configure-compilers-and-flags-label: + +Specifying compilers and flags +============================== + +Changing the compilers that Open MPI uses to build itself uses the +standard Autoconf mechanism of setting special environment variables +either before invoking ``configure`` or on the ``configure`` command +line itself The following environment variables are recognized by +``configure``: + +* ``CC``: C compiler to use +* ``CFLAGS``: Compile flags to pass to the C compiler +* ``CPPFLAGS``: Preprocessor flags to pass to the C compiler +* ``CXX``: C++ compiler to use +* ``FC``: Fortran compiler to use +* ``FCFLAGS``: Compile flags to pass to the Fortran compiler +* ``LDFLAGS``: Linker flags to pass to all compilers +* ``LIBS``: Libraries to pass to all compilers (it is rarely + necessary for users to need to specify additional ``LIBS``) +* ``PKG_CONFIG``: Path to the ``pkg-config`` utility + +.. note:: Open MPI |ompi_ver| does not contain any C++ code. Hence, + specifying ``CXXFLAGS`` or ``CXXCPPFLAGS`` is useless (but + harmless). The value of ``CC`` is used as the compiler for the + ``mpic++`` wrapper compiler, however. + +For example, to build with a specific instance of GCC:: + + shell$ ./configure \ + CC=/opt/gcc-a.b.c/bin/gcc \ + CXX=/opt/gcc-a.b.c/bin/g++ \ + FC=/opt/gcc-a.b.c/bin/gfortran ... + +Here's another example, this time showing building with the Intel +compiler suite:: + + shell$ ./configure CC=icc CXX=icpc FC=ifort ... + +.. note:: We generally suggest using the above command line form for + setting different compilers (vs. setting environment variables and + then invoking ``./configure``). The above form will save all + variables and values in the ``config.log`` file, which makes + post-mortem analysis easier if problems occur. + +Note that the flags you specify must be compatible across all the +compilers. In particular, flags specified to one language compiler +must generate code that can be compiled and linked against code that +is generated by the other language compilers. For example, on a 64 +bit system where the compiler default is to build 32 bit executables: + +.. code-block:: sh + + # Assuming the GNU compiler suite + shell$ ./configure CFLAGS=-m64 ... + +will produce 64 bit C objects, but 32 bit objects for Fortran. These +codes will be incompatible with each other, and Open MPI will not build +successfully. Instead, you must specify building 64 bit objects for +*all* languages: + +.. code-block:: sh + + # Assuming the GNU compiler suite + shell$ ./configure CFLAGS=-m64 FCFLAGS=-m64 ... + +The above command line will pass ``-m64`` to all the compilers, and +therefore will produce 64 bit objects for all languages. + +.. warning:: Note that setting ``CFLAGS`` (etc.) does *not* affect the + flags used by the wrapper compilers. In the above, example, you + may also need to add ``-m64`` to various ``--with-wrapper-FOO`` + options: + + .. code-block:: + + shell$ ./configure CFLAGS=-m64 FCFLAGS=-m64 \ + --with-wrapper-cflags=-m64 \ + --with-wrapper-cxxflags=-m64 \ + --with-wrapper-fcflags=-m64 ... + + Failure to do this will result in MPI applications failing to + compile / link properly. + +Note that if you intend to compile Open MPI with a ``make`` other than +the default one in your ``PATH``, then you must either set the ``$MAKE`` +environment variable before invoking Open MPI's ``configure`` script, or +pass ``MAKE=your_make_prog`` to configure. For example: + +.. code-block:: sh + + shell$ ./configure MAKE=/path/to/my/make ... + +This could be the case, for instance, if you have a shell alias for +``make``, or you always type ``gmake`` out of habit. Failure to tell +``configure`` which non-default ``make`` you will use to compile Open MPI +can result in undefined behavior (meaning: don't do that). + +Note that you may also want to ensure that the value of +``LD_LIBRARY_PATH`` is set appropriately (or not at all) for your build +(or whatever environment variable is relevant for your operating +system). For example, some users have been tripped up by setting to +use a non-default Fortran compiler via the ``FC`` environment variable, +but then failing to set ``LD_LIBRARY_PATH`` to include the directory +containing that non-default Fortran compiler's support libraries. +This causes Open MPI's ``configure`` script to fail when it tries to +compile / link / run simple Fortran programs. + +It is required that the compilers specified be compile and link +compatible, meaning that object files created by one compiler must be +able to be linked with object files from the other compilers and +produce correctly functioning executables. diff --git a/docs/installing-open-mpi/configure-cli-options/conventions.rst b/docs/installing-open-mpi/configure-cli-options/conventions.rst new file mode 100644 index 00000000000..8fefbf1ee29 --- /dev/null +++ b/docs/installing-open-mpi/configure-cli-options/conventions.rst @@ -0,0 +1,58 @@ +.. _building-ompi-cli-options-conventions-label: + +``configure`` CLI option conventions +------------------------------------ + +``configure`` will, by default, search for header files and/or +libraries for various optional features (e.g., various HPC network +API/support libraries). If the relevant files are found, Open MPI +will built support for that feature. If they are not found, Open MPI +will skip building support for that feature. + +However, if you specify ``--with-FOO`` (where ``FOO`` is the +corresponding CLI option name for the feature) on the ``configure`` +command line and Open MPI is unable to find relevant support for +``FOO``, ``configure`` will assume that it was unable to provide a +feature that was specifically requested, and will therefore abort so +that a human can resolve out the issue. + +.. note:: Using ``--with-FOO`` to force Open MPI's ``configure`` + script to abort it if can't find support for a given feature + may be preferable to unexpectedly discovering at run-time + that Open MPI is missing support for a critical feature. + +Additionally, if a search directory is specified for ``FOO`` in the +form ``--with-FOO=DIR``, Open MPI will: + +#. Search for ``FOO``'s header files in ``DIR/include``. +#. Search for ``FOO``'s library files: + + #. If ``--with-FOO-libdir=LIBDIR`` was specified, search in + ``LIBDIR``. + #. Otherwise, search in ``DIR/lib``, and if they are not found + there, search again in ``DIR/lib64``. + +#. If both the relevant header files and libraries are found: + + #. Open MPI will build support for ``FOO``. + #. If the root path where the FOO libraries are found is neither + ``/usr`` nor ``/usr/local``, Open MPI will compile itself with + RPATH flags pointing to the directory where ``FOO``'s libraries + are located. + + .. important:: Open MPI does not RPATH ``/usr/lib[64]`` and + ``/usr/local/lib[64]`` because many systems + already search these directories for run-time + libraries by default; adding RPATH for them could + have unintended consequences for the search path + ordering. + +.. caution:: The ``--with-FOO-libdir=LIBDIR`` options are not usually + needed; they are typically only needed when ``FOO``'s libraries are + installed in an "unexpected" location. + + Also note the difference between ``--with-FOO=DIR`` and + ``--with-FOO-libdir=LIBDIR``: the former is a directory to which + suffixes such as ``/include`` and ``/lib`` are added, whereas the + latter is assumed to be a full library directory name (e.g., + ``/opt/some_library/lib``). diff --git a/docs/installing-open-mpi/configure-cli-options/index.rst b/docs/installing-open-mpi/configure-cli-options/index.rst new file mode 100644 index 00000000000..f569644a0ed --- /dev/null +++ b/docs/installing-open-mpi/configure-cli-options/index.rst @@ -0,0 +1,21 @@ +``configure`` command line options +================================== + +There are many available ``configure`` command line options; see +``./configure --help`` for a full list. + +Conventions for the command line options and many commonly-used +options (but not all of them!) are described in the sections listed +below. + +.. toctree:: + :maxdepth: 1 + + conventions + installation + networking + runtime + support-libraries + mpi + oshmem + misc diff --git a/docs/installing-open-mpi/configure-cli-options/installation.rst b/docs/installing-open-mpi/configure-cli-options/installation.rst new file mode 100644 index 00000000000..6729fa0b232 --- /dev/null +++ b/docs/installing-open-mpi/configure-cli-options/installation.rst @@ -0,0 +1,245 @@ +.. This file is included by building-open-mpi.rst + +Installation options +^^^^^^^^^^^^^^^^^^^^ + +The following are general installation command line options that can +be used with ``configure``: + +* ``--prefix=DIR``: + Install Open MPI into the base directory named ``DIR``. Hence, Open + MPI will place its executables in ``DIR/bin``, its header files in + ``DIR/include``, its libraries in ``DIR/lib``, etc. + + .. note:: Also see the section on :ref:`installation location + ` for more + information on the installation prefix. + +* ``--disable-shared``: By default, Open MPI and OpenSHMEM build + shared libraries, and all components are included as part of those + shared libraries. This switch disables this default; it is really + only useful when used with ``--enable-static``. Specifically, this + option does *not* imply ``--enable-static``; enabling static + libraries and disabling shared libraries are two independent + options. + +* ``--enable-static``: + Build MPI and OpenSHMEM as static libraries, and statically link in + all components. Note that this option does *not* imply + ``--disable-shared``; enabling static libraries and disabling shared + libraries are two independent options. + + Be sure to read the description of ``--without-memory-manager``, + below; it may have some effect on ``--enable-static``. + +* ``--disable-wrapper-rpath``: + By default, the wrapper compilers (e.g., ``mpicc``) will enable + "rpath" support in generated executables on systems that support it. + That is, they will include a file reference to the location of Open + MPI's libraries in the application executable itself. This means + that the user does not have to set ``LD_LIBRARY_PATH`` to find Open + MPI's libraries (e.g., if they are installed in a location that the + run-time linker does not search by default). + + On systems that utilize the GNU ``ld`` linker, recent enough versions + will actually utilize "runpath" functionality, not "rpath". There + is an important difference between the two: + + #. "rpath": the location of the Open MPI libraries is hard-coded into + the MPI/OpenSHMEM application and cannot be overridden at + run-time. + #. "runpath": the location of the Open MPI libraries is hard-coded into + the MPI/OpenSHMEM application, but can be overridden at run-time + by setting the ``LD_LIBRARY_PATH`` environment variable. + + For example, consider that you install Open MPI vA.B.0 and + compile/link your MPI/OpenSHMEM application against it. Later, you + install Open MPI vA.B.1 to a different installation prefix (e.g., + ``/opt/openmpi/A.B.1`` vs. ``/opt/openmpi/A.B.0``), and you leave the old + installation intact. + + In the rpath case, your MPI application will always use the + libraries from your A.B.0 installation. In the runpath case, you + can set the ``LD_LIBRARY_PATH`` environment variable to point to the + A.B.1 installation, and then your MPI application will use those + libraries. + + Note that in both cases, however, if you remove the original A.B.0 + installation and set ``LD_LIBRARY_PATH`` to point to the A.B.1 + installation, your application will use the A.B.1 libraries. + + This rpath/runpath behavior can be disabled via + ``--disable-wrapper-rpath``. + + If you would like to keep the rpath option, but not enable runpath + a different ``configure`` option is avalabile + ``--disable-wrapper-runpath``. + +* ``--enable-dlopen``: Enable Open MPI to load components as + standalone Dynamic Shared Objects (DSOs) at run-time. This option + is enabled by default. + + The opposite of this option, ``--disable-dlopen``, causes the following: + + #. Open MPI will not attempt to open any DSOs at run-time. + #. configure behaves as if the ``--enable-mca-static`` argument was set. + #. configure will ignore the ``--enable-mca-dso`` argument. + + See the description of ``--enable-mca-static`` / ``--enable-mca-dso`` for + more information. + + .. note:: This option does *not* change how Open MPI's libraries + (``libmpi``, for example) will be built. You can change + whether Open MPI builds static or dynamic libraries via + the ``--enable|disable-static`` and + ``--enable|disable-shared`` arguments. + +.. _building-ompi-cli-options-mca-dso-label: + +* ``--enable-mca-dso[=LIST]`` and ``--enable-mca-static[=LIST]`` + These two options, along with ``--enable-mca-no-build``, govern the + behavior of how Open MPI's frameworks and components are built. + + The ``--enable-mca-dso`` option specifies which frameworks and/or + components are built as Dynamic Shared Objects (DSOs). + Specifically, DSOs are built as "plugins" outside of the core Open + MPI libraries, and are loaded by Open MPI at run time. + + The ``--enable-mca-static`` option specifies which frameworks and/or + components are built as part of the core Open MPI libraries (i.e., + they are not built as DSOs, and therefore do not need to be + separately discovered and opened at run time). + + Both options can be used one of two ways: + + #. ``--enable-mca-OPTION`` (with no value) + #. ``--enable-mca-OPTION=LIST`` + + ``--enable-mca-OPTION=no`` or ``--disable-mca-OPTION`` are both legal + options, but have no impact on the selection logic described below. + Only affirmative options change the selection process. + + ``LIST`` is a comma-delimited list of Open MPI frameworks and/or + framework+component tuples. Examples: + + * ``btl`` specifies the entire BTL framework + * ``btl-tcp`` specifies just the TCP component in the BTL framework + * ``mtl,btl-tcp`` specifies the entire MTL framework and the TCP + component in the BTL framework + + Open MPI's ``configure`` script uses the values of these two options + when evaluating each component to determine how it should be built + by evaluating these conditions in order: + + #. If an individual component's build behavior has been specified + via these two options, ``configure`` uses that behavior. + #. Otherwise, if the component is in a framework whose build + behavior has been specified via these two options, ``configure`` + uses that behavior. + #. Otherwise, ``configure`` uses the global default build behavior. + + At each level of the selection process, if the component is + specified to be built as both a static and dso component, the static + option will win. + + .. note:: As of Open MPI |ompi_ver|, ``configure``'s global default + is to build all components as static (i.e., part of the + Open MPI core libraries, not as DSOs). Prior to Open MPI + 5.0.0, the global default behavior was to build + most components as DSOs. + + .. important:: If the ``--disable-dlopen`` option is specified, then + Open MPI will not be able to search for DSOs at run + time, and the value of the ``--enable-mca-dso`` + option will be silently ignored. + + Some examples: + + #. Default to building all components as static (i.e., as part of + the Open MPI core libraries -- no DSOs):: + + shell$ ./configure + + #. Build all components as static, except the TCP BTL, which will be + built as a DSO:: + + shell$ ./configure --enable-mca-dso=btl-tcp + + #. Build all components as static, except all BTL components, which + will be built as DSOs:: + + shell$ ./configure --enable-mca-dso=btl + + #. Build all components as static, except all MTL components and the + TCP BTL component, which will be built as DSOs:: + + shell$ ./configure --enable-mca-dso=mtl,btl-tcp + + #. Build all BTLs as static, except the TCP BTL, as the + ```` option is more specific than the + ```` option:: + + shell$ ./configure --enable-mca-dso=btl --enable-mca-static=btl-tcp + + #. Build the TCP BTL as static, because the static option at the + same level always wins:: + + shell$ ./configure --enable-mca-dso=btl-tcp --enable-mca-static=btl-tcp + +* ``--enable-mca-no-build=LIST``: Comma-separated list of + ``-`` pairs that will not be built. For + example, ``--enable-mca-no-build=threads-qthreads,pml-monitoring`` will + disable building both the ``qthreads`` threading component and the + ``monitoring`` PML. + + .. note:: This option is typically only useful for components that + would otherwise be built. For example, if you are on a + machine without Libfabric support, it is not necessary to + specify:: + + shell$ ./configure --enable-mca-no-build=cm-ofi + + because the ``configure`` script will naturally see that + you do not have support for Libfabric and will + automatically skip the ``ofi`` CM component. + +* ``--disable-show-load-errors-by-default``: + Set the default value of the ``mca_base_component_show_load_errors`` + MCA variable: the ``--enable`` form of this option sets the MCA + variable to true, the ``--disable`` form sets the MCA variable to + false. The MCA ``mca_base_component_show_load_errors`` variable can + still be overridden at run time via the usual MCA-variable-setting + mechanisms; this configure option simply sets the default value. + + The ``--disable`` form of this option is intended for Open MPI + packagers who tend to enable support for many different types of + networks and systems in their packages. For example, consider a + packager who includes support for both the FOO and BAR networks in + their Open MPI package, both of which require support libraries + (``libFOO.so`` and ``libBAR.so``). If an end user only has BAR + hardware, they likely only have ``libBAR.so`` available on their + systems -- not ``libFOO.so``. Disabling load errors by default will + prevent the user from seeing potentially confusing warnings about + the FOO components failing to load because ``libFOO.so`` is not + available on their systems. + + Conversely, system administrators tend to build an Open MPI that is + targeted at their specific environment, and contains few (if any) + components that are not needed. In such cases, they might want + their users to be warned that the FOO network components failed to + load (e.g., if ``libFOO.so`` was mistakenly unavailable), because Open + MPI may otherwise silently failover to a slower network path for MPI + traffic. + +* ``--with-platform=FILE``: + Load configure options for the build from ``FILE``. Options on the + command line that are not in ``FILE`` are also used. Options on the + command line and in ``FILE`` are replaced by what is in ``FILE``. + +* ``--with-libmpi-name=STRING``: + Replace ``libmpi.*`` and ``libmpi_FOO.*`` (where ``FOO`` is one of the + fortran supporting libraries installed in lib) with ``libSTRING.*`` + and ``libSTRING_FOO.*``. This is provided as a convenience mechanism + for third-party packagers of Open MPI that might want to rename + these libraries for their own purposes. This option is *not* + intended for typical users of Open MPI. diff --git a/docs/installing-open-mpi/configure-cli-options/misc.rst b/docs/installing-open-mpi/configure-cli-options/misc.rst new file mode 100644 index 00000000000..bdce34c99ac --- /dev/null +++ b/docs/installing-open-mpi/configure-cli-options/misc.rst @@ -0,0 +1,63 @@ +.. This file is included by building-open-mpi.rst + +Miscellaneous functionality +^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +The following are command line options that don't fit any any of the +above categories that can be used with ``configure``: + +* ``--without-memory-manager``: + Disable building Open MPI's memory manager. Open MPI's memory + manager is usually built on Linux based platforms, and is generally + only used for optimizations with some OpenFabrics-based networks (it + is not *necessary* for OpenFabrics networks, but some performance + loss may be observed without it). + + However, it may be necessary to disable the memory manager in order + to build Open MPI statically. + +* ``--with-ft=TYPE``: + Specify the type of fault tolerance to enable. The only allowed + values are ``ulfm`` and ``none``. See :ref:`the ULFM section + ` for more details. + +* ``--enable-peruse``: + Enable the PERUSE MPI data analysis interface. + +* ``--enable-heterogeneous``: + Enable support for running on heterogeneous clusters (e.g., machines + with different endian representations). Heterogeneous support is + disabled by default because it imposes a minor performance penalty. + + .. danger:: The heterogeneous functionality is currently broken -- + do not use. + +.. _install-wrapper-flags-label: + +* ``--with-wrapper-cflags=CFLAGS`` +* ``--with-wrapper-cxxflags=CXXFLAGS`` +* ``--with-wrapper-fcflags=FCFLAGS`` +* ``--with-wrapper-ldflags=LDFLAGS`` +* ``--with-wrapper-libs=LIBS``: + Add the specified flags to the default flags that are used in Open + MPI's "wrapper" compilers (e.g., ``mpicc`` -- see below for more + information about Open MPI's wrapper compilers). By default, Open + MPI's wrapper compilers use the same compilers used to build Open + MPI and specify a minimum set of additional flags that are necessary + to compile/link MPI applications. These configure options give + system administrators the ability to embed additional flags in + OMPI's wrapper compilers (which is a local policy decision). The + meanings of the different flags are: + + * ``CFLAGS``: Flags passed by the ``mpicc`` wrapper to the C + compiler + * ``CXXFLAGS``: Flags passed by the ``mpic++`` and ``mpiCC`` + wrappers to the C++ compiler + * ``FCFLAGS``: Flags passed by the ``mpifort`` wrapper to the + Fortran compiler + * ``LDFLAGS``: Flags passed by all the wrappers to the linker + * ``LIBS``: Flags passed by all the wrappers to the linker + + See the section on :ref:`customizing wrapper compiler behavior + ` to see how to alter the + wrapper compiler behavior at run time. diff --git a/docs/installing-open-mpi/configure-cli-options/mpi.rst b/docs/installing-open-mpi/configure-cli-options/mpi.rst new file mode 100644 index 00000000000..0aa082f3e89 --- /dev/null +++ b/docs/installing-open-mpi/configure-cli-options/mpi.rst @@ -0,0 +1,92 @@ +.. This file is included by building-open-mpi.rst + +MPI functionality +^^^^^^^^^^^^^^^^^ + +The following are command line options to set the default for various +MPI API behaviors that can be used with ``configure``: + +* ``--with-mpi-param-check[=VALUE]``: + Whether or not to check MPI function parameters for errors at + runtime. The following ``VALUE``\s are permitted: + + * ``always``: MPI function parameters are always checked for errors + * ``never``: MPI function parameters are never checked for errors + * ``runtime``: Whether MPI function parameters are checked depends on + the value of the MCA parameter ``mpi_param_check`` (default: yes). + * ``yes``: Synonym for "always" (same as ``--with-mpi-param-check``). + * ``no``: Synonym for "never" (same as ``--without-mpi-param-check``). + + If ``--with-mpi-param`` is not specified, ``runtime`` is the default. + +* ``--disable-mpi-thread-multiple``: + Disable the MPI thread level ``MPI_THREAD_MULTIPLE`` (it is enabled by + default). + +* ``--disable-ft``: + Disable the User-Level Fault Mitigation (ULFM) support in Open MPI + (it is enabled by default). + + :ref:`See the ULFM section ` for more information. + +* ``--enable-mpi-java``: + Enable building of an **EXPERIMENTAL** Java MPI interface (disabled + by default). You may also need to specify ``--with-jdk-dir``, + ``--with-jdk-bindir``, and/or ``--with-jdk-headers``. + + .. warning:: Note that this Java interface is **INCOMPLETE** + (meaning: it does not support all MPI functionality) and **LIKELY + TO CHANGE**. The Open MPI developers would very much like to + hear your feedback about this interface. + + :ref:`See the Java section ` for many more + details. + +* ``--enable-mpi-fortran[=VALUE]``: + By default, Open MPI will attempt to build all 3 Fortran bindings: + ``mpif.h``, the ``mpi`` module, and the ``mpi_f08`` module. The following + ``VALUE``\s are permitted: + + * ``all``: Synonym for ``yes``. + * ``yes``: Attempt to build all 3 Fortran bindings; skip + any binding that cannot be built (same as + ``--enable-mpi-fortran``). + * ``mpifh``: Only build ``mpif.h`` support. + * ``usempi``: Only build ``mpif.h`` and ``mpi`` module support. + * ``usempif08``: Build ``mpif.h``, ``mpi`` module, and ``mpi_f08`` + module support. + * ``none``: Synonym for ``no``. + * ``no``: Do not build any MPI Fortran support (same as + ``--disable-mpi-fortran``). This is mutually exclusive + with building the OpenSHMEM Fortran interface. + +* ``--enable-mpi-ext[=LIST]``: + Enable Open MPI's non-portable API extensions. ``LIST`` is a + comma-delmited list of extensions. If no ``LIST`` is specified, all + of the extensions are enabled. + + See the "Open MPI API Extensions" section for more details. + +* ``--disable-mpi-io``: + Disable built-in support for MPI-2 I/O, likely because an + externally-provided MPI I/O package will be used. Default is to use + the internal framework system that uses the ompio component and a + specially modified version of ROMIO that fits inside the romio + component + +* ``--disable-io-romio``: + Disable the ROMIO MPI-IO component + +* ``--with-io-romio-flags=FLAGS``: + Pass ``FLAGS`` to the ROMIO distribution configuration script. This + option is usually only necessary to pass + parallel-filesystem-specific preprocessor/compiler/linker flags back + to the ROMIO system. + +* ``--disable-io-ompio``: + Disable the ompio MPI-IO component + +* ``--enable-sparse-groups``: + Enable the usage of sparse groups. This would save memory + significantly especially if you are creating large + communicators. (Disabled by default) diff --git a/docs/installing-open-mpi/configure-cli-options/networking.rst b/docs/installing-open-mpi/configure-cli-options/networking.rst new file mode 100644 index 00000000000..6c00faedebb --- /dev/null +++ b/docs/installing-open-mpi/configure-cli-options/networking.rst @@ -0,0 +1,109 @@ +.. This file is included by building-open-mpi.rst + +.. _install-network-support-label: + +Networking support / options +^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +The following are command line options for various network types that +can be used with ``configure``: + +* ``--with-fca=DIR``: + Specify the directory where the Mellanox FCA library and + header files are located. + + FCA is the support library for Mellanox switches and HCAs. + +* ``--with-hcoll=DIR``: + Specify the directory where the Mellanox hcoll library and header + files are located. This option is generally only necessary if the + hcoll headers and libraries are not in default compiler/linker + search paths. + + hcoll is the support library for MPI collective operation offload on + Mellanox ConnectX-3 HCAs (and later). + +* ``--with-knem=DIR``: + Specify the directory where the knem libraries and header files are + located. This option is generally only necessary if the knem headers + and libraries are not in default compiler/linker search paths. + + knem is a Linux kernel module that allows direct process-to-process + memory copies (optionally using hardware offload), potentially + increasing bandwidth for large messages sent between messages on the + same server. See `the Knem web site + `_ for details. + +* ``--with-libfabric=DIR``: + Specify the directory where the OpenFabrics Interfaces ``libfabric`` + library and header files are located. This option is generally only + necessary if the libfabric headers and libraries are not in default + compiler/linker search paths. + + Libfabric is the support library for OpenFabrics Interfaces-based + network adapters, such as Cisco usNIC, Intel True Scale PSM, Cray + uGNI, etc. + +* ``--with-libfabric-libdir=DIR``: + Look in directory for the libfabric libraries. By default, Open MPI + will look in ``DIR/lib`` and ``DIR/lib64``, which covers most cases. + This option is only needed for special configurations. + +* ``--with-portals4=DIR``: + Specify the directory where the Portals4 libraries and header files + are located. This option is generally only necessary if the Portals4 + headers and libraries are not in default compiler/linker search + paths. + + Portals is a low-level network API for high-performance networking + on high-performance computing systems developed by Sandia National + Laboratories, Intel Corporation, and the University of New Mexico. + The Portals 4 Reference Implementation is a complete implementation + of Portals 4, with transport over InfiniBand verbs and UDP. + +* ``--with-portals4-libdir=DIR``: + Location of libraries to link with for Portals4 support. + +* ``--with-portals4-max-md-size=SIZE`` and + ``--with-portals4-max-va-size=SIZE``: + Set configuration values for Portals 4 + +* ``--with-psm=``: + Specify the directory where the QLogic InfiniPath / Intel True Scale + PSM library and header files are located. This option is generally + only necessary if the PSM headers and libraries are not in default + compiler/linker search paths. + + PSM is the support library for QLogic InfiniPath and Intel TrueScale + network adapters. + +* ``--with-psm-libdir=DIR``: + Look in directory for the PSM libraries. By default, Open MPI will + look in ``DIR/lib`` and ``DIR/lib64``, which covers most cases. This + option is only needed for special configurations. + +* ``--with-psm2=DIR``: + Specify the directory where the Intel Omni-Path PSM2 library and + header files are located. This option is generally only necessary + if the PSM2 headers and libraries are not in default compiler/linker + search paths. + + PSM is the support library for Intel Omni-Path network adapters. + +* ``--with-psm2-libdir=DIR``: + Look in directory for the PSM2 libraries. By default, Open MPI will + look in ``DIR/lib`` and ``DIR/lib64``, which covers most cases. This + option is only needed for special configurations. + +* ``--with-ucx=DIR``: + Specify the directory where the UCX libraries and header files are + located. This option is generally only necessary if the UCX headers + and libraries are not in default compiler/linker search paths. + +* ``--with-ucx-libdir=DIR``: + Look in directory for the UCX libraries. By default, Open MPI will + look in ``DIR/lib`` and ``DIR/lib64``, which covers most cases. This + option is only needed for special configurations. + +* ``--with-usnic``: + Abort configure if Cisco usNIC support cannot be built. diff --git a/docs/installing-open-mpi/configure-cli-options/oshmem.rst b/docs/installing-open-mpi/configure-cli-options/oshmem.rst new file mode 100644 index 00000000000..73ca3f37ee1 --- /dev/null +++ b/docs/installing-open-mpi/configure-cli-options/oshmem.rst @@ -0,0 +1,16 @@ +.. This file is included by building-open-mpi.rst + +OpenSHMEM functionality +^^^^^^^^^^^^^^^^^^^^^^^ + +The following are command line options to set the default for various +OpenSHMEM API behaviors that can be used with ``configure``: + +* ``--disable-oshmem``: + Disable building the OpenSHMEM implementation (by default, it is + enabled). + +* ``--disable-oshmem-fortran``: + Disable building only the Fortran OpenSHMEM bindings. Please see + the "Compiler Notes" section herein which contains further + details on known issues with various Fortran compilers. diff --git a/docs/installing-open-mpi/configure-cli-options/runtime.rst b/docs/installing-open-mpi/configure-cli-options/runtime.rst new file mode 100644 index 00000000000..eea1cbc9227 --- /dev/null +++ b/docs/installing-open-mpi/configure-cli-options/runtime.rst @@ -0,0 +1,57 @@ +.. This file is included by building-open-mpi.rst + +Run-time system support +^^^^^^^^^^^^^^^^^^^^^^^ + +The following are command line options for various runtime systems that +can be used with ``configure``: + +* ``--enable-mpirun-prefix-by-default``: + This option forces the ``mpirun`` command to always behave as if + ``--prefix $prefix`` was present on the command line (where ``$prefix`` + is the value given to the ``--prefix`` option to configure). This + prevents most ``rsh``/``ssh``-based users from needing to modify their + shell startup files to set the ``PATH`` and/or ``LD_LIBRARY_PATH`` for + Open MPI on remote nodes. Note, however, that such users may still + desire to set ``PATH`` -- perhaps even in their shell startup files -- + so that executables such as ``mpicc`` and ``mpirun`` can be found + without needing to type long path names. + +* ``--with-alps``: + Force the building of for the Cray Alps run-time environment. If + Alps support cannot be found, configure will abort. + +* ``--with-lsf=DIR``: + Specify the directory where the LSF libraries and header files are + located. This option is generally only necessary if the LSF headers + and libraries are not in default compiler/linker search paths. + + LSF is a resource manager system, frequently used as a batch + scheduler in HPC systems. + +* ``--with-lsf-libdir=DIR``: + Look in directory for the LSF libraries. By default, Open MPI will + look in ``DIR/lib`` and ``DIR/lib64``, which covers most cases. This + option is only needed for special configurations. + +* ``--with-slurm``: + Force the building of Slurm scheduler support. + +* ``--with-sge``: + Specify to build support for the Oracle Grid Engine (OGE) resource + manager and/or the Open Grid Engine. OGE support is disabled by + default; this option must be specified to build OMPI's OGE support. + + The Oracle Grid Engine (OGE) and open Grid Engine packages are + resource manager systems, frequently used as a batch scheduler in + HPC systems. It used to be called the "Sun Grid Engine", which is + why the option is still named ``--with-sge``. + +* ``--with-tm=DIR``: + Specify the directory where the TM libraries and header files are + located. This option is generally only necessary if the TM headers + and libraries are not in default compiler/linker search paths. + + TM is the support library for the Torque and PBS Pro resource + manager systems, both of which are frequently used as a batch + scheduler in HPC systems. diff --git a/docs/installing-open-mpi/configure-cli-options/support-libraries.rst b/docs/installing-open-mpi/configure-cli-options/support-libraries.rst new file mode 100644 index 00000000000..e6faee4508d --- /dev/null +++ b/docs/installing-open-mpi/configure-cli-options/support-libraries.rst @@ -0,0 +1,46 @@ +.. _label-building-ompi-cli-options-support-libraries: + +Support libraries +^^^^^^^^^^^^^^^^^ + +:ref:`See this section ` for +information about the CLI options for the support libraries described +in this section. + +The following are command line options for the support libraries that +are used by Open MPI that can be used with ``configure``: + +* ``--with-hwloc[=VALUE]``: +* ``--with-libevent[=VALUE]``: +* ``--with-pmix[=VALUE]``: +* ``--with-prrte[=VALUE]``: These four options specify where to find + the headers and libraries for the Hwloc, Libevent, PMIx, and PRRTE + libraries, respectively. The following ``VALUE``\s are permitted: + + * ``external``: Use an external installation (rely on default + compiler and linker paths to find it). ``configure`` will abort + if it cannot find suitable header files and libraries. + * ``internal``: Use Open MPI's internal/bundled copy.. + * No value specified: Try the ``external`` behavior. If that fails, + fall back to ``internal`` behavior. *This is the default behavior.* + * ``DIR``: Specify the location of a specific installation to use. + ``configure`` will abort if it cannot find suitable header files + and libraries under ``DIR``. + +* ``--with-hwloc-libdir=LIBDIR``: +* ``--with-libevent-libdir=LIBDIR``: +* ``--with-prrte-libdir=LIBDIR``: +* ``--with-pmix-libdir=LIBDIR``: + :ref:`See the configure CLI + options conventions ` + for a description of these four options. + +* ``--with-valgrind[=DIR]``: + Directory where the valgrind software is installed. If Open MPI + finds Valgrind's header files, it will include additional support + for Valgrind's memory-checking debugger. + + Valgrind support is disabled by default in Open MPI. Enabling + Valgrind support will eliminate a lot of false positives when + running Valgrind on MPI applications. There is a minor performance + penalty for enabling this option. diff --git a/docs/installing-open-mpi/configure-output-summary.rst b/docs/installing-open-mpi/configure-output-summary.rst new file mode 100644 index 00000000000..d93ae899893 --- /dev/null +++ b/docs/installing-open-mpi/configure-output-summary.rst @@ -0,0 +1,13 @@ +``configure`` output summary +============================ + +At the end of ``configure``, a summary of many configuration options +are shown. + +The Open MPI Team *highly* recommends examining the ``configure`` +summary to validate that Open MPI plans to build all the features that +you expect it to build. + +If the summary does not agree with what you think should be done, you +can look earlier in ``configure``'s output to see why particular +options and behaviors were selected. diff --git a/docs/installing-open-mpi/definitions.rst b/docs/installing-open-mpi/definitions.rst new file mode 100644 index 00000000000..a05edb11010 --- /dev/null +++ b/docs/installing-open-mpi/definitions.rst @@ -0,0 +1,16 @@ +Definitions +=========== + +* **Source tree:** The tree where the Open MPI source code is located. + It is typically the result of expanding an Open MPI distribution + source code bundle, such as a tarball. +* **Build tree:** The tree where Open MPI was built. It is always + related to a specific source tree, but may actually be a different + tree (:ref:`since Open MPI supports VPATH builds + `). Specifically, this is the tree + where you invoked ``configure``, ``make``, etc. to build and install + Open MPI. +* **Installation tree:** The tree where Open MPI was installed. It is + typically the "prefix" argument given to Open MPI's ``configure`` + script; it is the directory from which you run installed Open MPI + executables. diff --git a/docs/installing-open-mpi/filesystem-requirements.rst b/docs/installing-open-mpi/filesystem-requirements.rst new file mode 100644 index 00000000000..d6dbd0690c1 --- /dev/null +++ b/docs/installing-open-mpi/filesystem-requirements.rst @@ -0,0 +1,24 @@ +Filesystem requirements +======================= + +.. _install-filesystem-timestamp-warning-label: + +.. warning:: If you are building Open MPI on a network filesystem, the + machine you on which you are building *must* be time-synchronized + with the file server. + +Specifically: Open MPI's build system *requires* accurate filesystem +timestamps. If your ``make`` output shows that it ran GNU Automake, +Autoconf, and/or Libtool, or includes warning about timestamps in the +future, perhaps looking something like this:: + + Warning: File `Makefile.am' has modification time 3.6e+04 s in the future + +**Know that this is not normal**, and you likely have an invalid +build. + +In this case, you should remove the Open MPI source directory and +start over (e.g., by re-extracting the Open MPI tarball): either +switch to build on a local filesystem, or ensure that the time on your +build machine is synchronized with the time on your file server before +building again. diff --git a/docs/installing-open-mpi/index.rst b/docs/installing-open-mpi/index.rst new file mode 100644 index 00000000000..e7249d6daad --- /dev/null +++ b/docs/installing-open-mpi/index.rst @@ -0,0 +1,18 @@ +.. _building-and-installing-section-label: + +Building and installing Open MPI +================================ + +.. toctree:: + :maxdepth: 1 + + quickstart + definitions + filesystem-requirements + vpath-builds + compilers-and-flags + required-support-libraries + configure-cli-options/index + configure-output-summary + make-targets + installation-location diff --git a/docs/installing-open-mpi/installation-location.rst b/docs/installing-open-mpi/installation-location.rst new file mode 100644 index 00000000000..6c16dc446d2 --- /dev/null +++ b/docs/installing-open-mpi/installation-location.rst @@ -0,0 +1,241 @@ + +.. _building-open-mpi-installation-location-label: + +Installation location +===================== + +A common environment to run Open MPI is in a "Beowulf"-class or +similar cluster (e.g., a bunch of 1U servers in a bunch of racks). +Simply stated, Open MPI can run on a group of servers or workstations +connected by a network. + +This raises the question for Open MPI system administrators: where to +install the Open MPI binaries, header files, etc.? This discussion +mainly addresses this question for homogeneous clusters (i.e., where +all nodes and operating systems are the same), although elements of +this discussion apply to heterogeneous clusters as well. + +.. important:: For simplicity, the Open MPI team *strongly* recommends + that you install Open MPI at the same path location on all nodes in + your cluster. This *greatly* simplifies the user experience of + running MPI jobs across multiple nodes in your cluster. + + It is *possible* to install Open MPI in unique path locations in + the different nodes in your cluster, but it is not *advisable*. + +Filesystem types +---------------- + +There are two common approaches. + +Network filesystem +^^^^^^^^^^^^^^^^^^ + +Have a common filesystem, such as NFS, between all the machines to be +used. Install Open MPI such that the installation directory is the +*same value* on each node. This will *greatly* simplify user's shell +startup scripts (e.g., ``.bashrc``, ``.cshrc``, ``.profile`` etc.) +|mdash| the ``PATH`` can be set without checking which machine the +user is on. It also simplifies the system administrator's job; when +the time comes to patch or otherwise upgrade Open MPI, only one copy +needs to be modified. + +For example, consider a cluster of four machines: ``inky``, +``blinky``, ``pinky``, and ``clyde``. + +* Install Open MPI on ``inky``'s local hard drive in the directory + ``/opt/openmpi-VERSION``. The system administrator then mounts + ``inky:/opt/openmpi-VERSION`` on the remaining three machines, such + that ``/opt/openmpi-VERSION`` on all machines is effectively "the + same". That is, the following directories all contain the Open MPI + installation: + + .. code-block:: + + inky:/opt/openmpi-VERSION + blinky:/opt/openmpi-VERSION + pinky:/opt/openmpi-VERSION + clyde:/opt/openmpi-VERSION + +* Install Open MPI on ``inky``'s local hard drive in the directory + ``/usr/local/openmpi-VERSION``. The system administrator then + mounts ``inky:/usr/local/openmpi-VERSION`` on *all four* machines in + some other common location, such as ``/opt/openmpi-VERSION`` (a + symbolic link can be installed on ``inky`` instead of a mount point + for efficiency). This strategy is typically used for environments + where one tree is NFS exported, but another tree is typically used + for the location of actual installation. For example, the following + directories all contain the Open MPI installation: + + .. code-block:: + + inky:/opt/openmpi-VERSION + blinky:/opt/openmpi-VERSION + pinky:/opt/openmpi-VERSION + clyde:/opt/openmpi-VERSION + + Notice that there are the same four directories as the previous + example, but on ``inky``, the directory is *actually* located in + ``/usr/local/openmpi-VERSION``. + +There is a bit of a disadvantage in this approach; each of the remote +nodes have to incur NFS (or whatever filesystem is used) delays to +access the Open MPI directory tree. However, both the administration +ease and low cost (relatively speaking) of using a networked file +system usually greatly outweighs the cost. Indeed, once an MPI +application is past MPI initialization, it doesn't use the Open MPI +binaries very much. + +Local filesystem +^^^^^^^^^^^^^^^^ + +If you are concerned with networked filesystem costs of accessing the +Open MPI binaries, you can install Open MPI on the local hard drive of +each node in your system. Again, it is *highly* advisable to install +Open MPI in the *same* directory on each node so that each user's +``PATH`` can be set to the same value, regardless of the node that a +user has logged on to. + +This approach will save some network latency of accessing the Open MPI +binaries, but is typically only used where users are very concerned +about squeezing every single cycle out of their machines, or are +running at extreme scale where a networked filesystem may get +overwhelmed by filesystem requests for Open MPI binaries when running +very large parallel jobs. + +.. _building-open-mpi-install-overwrite-label: + +Installing over a prior Open MPI installation +--------------------------------------------- + +.. warning:: The Open MPI team does not recommend installing a new + version of Open MPI over an existing / older installation of Open + MPI. + +In its default configuration, an Open MPI installation consists of +several shared libraries, header files, executables, and plugins +(dynamic shared objects |mdash| DSOs). These installation files act +together as a single entity. The specific filenames and +contents of these files are subject to change between different +versions of Open MPI. + +.. important:: Installing one version of Open MPI does *not* uninstall + another version. + +If you install a new version of Open MPI over an older version, this +may not overwrite all the files from the older version. Hence, you +may end up with an incompatible muddle of files from two different +installations |mdash| which can cause problems. + +The Open MPI team recommends one of the following methods for +upgrading your Open MPI installation: + +* Install newer versions of Open MPI into a different directory. For + example, install into ``/opt/openmpi-a.b.c`` and + ``/opt/openmpi-x.y.z`` for versions a.b.c and x.y.z, respectively. +* Completely uninstall the old version of Open MPI before installing + the new version. The ``make uninstall`` process from Open MPI a.b.c + build tree should completely uninstall that version from the + installation tree, making it safe to install a new version (e.g., + version x.y.z) into the same installation tree. +* Remove the old installation directory entirely and then install the + new version. For example ``rm -rf /opt/openmpi`` *(assuming that + there is nothing else of value in this tree!)* The installation of + Open MPI x.y.z will safely re-create the ``/opt/openmpi`` tree. + This method is preferable if you no longer have the source and build + trees to Open MPI a.b.c available from which to ``make + uninstall``. +* Go into the Open MPI a.b.c installation directory and manually + remove all old Open MPI files. Then install Open MPI x.y.z into the + same installation directory. This can be a somewhat painful, + annoying, and error-prone process. *We do not recommend it.* + Indeed, if you no longer have access to the original Open MPI a.b.c + source and build trees, it may be far simpler to download Open MPI + version a.b.c again from the Open MPI web site, configure it with + the same installation prefix, and then run ``make uninstall``. Or + use one of the other methods, above. + +Relocating an Open MPI installation +----------------------------------- + +It can be desireable to initially install Open MPI to one location +(e.g., ``/path/to/openmpi``) and then later move it to another +location (e.g., ``/opt/myproduct/bundled-openmpi-a.b.c``). + +.. note:: Open MPI hard-codes some directory paths in its executables + based on installation paths specified by the ``configure`` + script. For example, if you configure with an installation + prefix of ``/opt/openmpi/``, Open MPI encodes in its + executables that it should be able to find its help files in + ``/opt/openmpi/share/openmpi``. + +The "installdirs" functionality in Open MPI lets you change any of +these hard-coded directory paths at run time (*assuming* that you have +already adjusted your ``PATH`` and/or ``LD_LIBRARY_PATH`` environment +variables to the new location where Open MPI now resides). + +There are three methods. + +Move an existing Open MPI installation to a new prefix +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Set the ``OPAL_PREFIX`` environment variable before launching Open +MPI. For example, if Open MPI had initially been installed to +``/opt/openmpi`` and the entire ``openmpi`` tree was later moved to +``/home/openmpi``, setting ``OPAL_PREFIX`` to ``/home/openmpi`` will +enable Open MPI to function properly. + +"Stage" an Open MPI installation in a temporary location +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +When *creating* self-contained installation packages, systems such as +RPM install Open MPI into temporary locations. The package system +then bundles up everything under the temporary location into a package +that can be installed into its real location later. For example, when +*creating* an RPM that will be installed to ``/opt/openmpi``, the RPM +system will transparently prepend a "destination directory" (or +"destdir") to the installation directory. As such, Open MPI will +think that it is installed in ``/opt/openmpi``, but it is actually +temporarily installed in (for example) +``/var/rpm/build.1234/opt/openmpi``. If it is necessary to *use* Open +MPI while it is installed in this staging area, the ``OPAL_DESTDIR`` +environment variable can be used; setting ``OPAL_DESTDIR`` to +``/var/rpm/build.1234`` will automatically prefix every directory such +that Open MPI can function properly. + +Overriding individual directories +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Open MPI uses the GNU-specified directories (per Autoconf/Automake), +and can be overridden by setting environment variables directly +related to their common names. The list of environment variables that +can be used is: + +* ``OPAL_PREFIX`` +* ``OPAL_EXEC_PREFIX`` +* ``OPAL_BINDIR`` +* ``OPAL_SBINDIR`` +* ``OPAL_LIBEXECDIR`` +* ``OPAL_DATAROOTDIR`` +* ``OPAL_DATADIR`` +* ``OPAL_SYSCONFDIR`` +* ``OPAL_SHAREDSTATEDIR`` +* ``OPAL_LOCALSTATEDIR`` +* ``OPAL_LIBDIR`` +* ``OPAL_INCLUDEDIR`` +* ``OPAL_INFODIR`` +* ``OPAL_MANDIR`` +* ``OPAL_PKGDATADIR`` +* ``OPAL_PKGLIBDIR`` +* ``OPAL_PKGINCLUDEDIR`` + +Note that not all of the directories listed above are used by Open +MPI; they are listed here in entirety for completeness. + +Also note that several directories listed above are defined in terms +of other directories. For example, the ``$bindir`` is defined by +default as ``$prefix/bin``. Hence, overriding the ``$prefix`` (via +``OPAL_PREFIX``) will automatically change the first part of the +``$bindir`` (which is how method 1 described above works). +Alternatively, ``OPAL_BINDIR`` can be set to an absolute value that +ignores ``$prefix`` altogether. diff --git a/docs/installing-open-mpi/make-targets.rst b/docs/installing-open-mpi/make-targets.rst new file mode 100644 index 00000000000..44716be0a87 --- /dev/null +++ b/docs/installing-open-mpi/make-targets.rst @@ -0,0 +1,20 @@ +``make`` targets +================ + +Open MPI supports all the ``make`` targets that are provided by GNU +Automake, such as: + +* ``all``: build the entire Open MPI package +* ``install``: install Open MPI +* ``uninstall``: remove all traces of Open MPI from the installation tree +* ``clean``: clean out the build tree + +Once Open MPI has been built and installed, it is safe to run ``make +clean`` and/or remove the entire build tree. + +Generally speaking, the only thing that users need to do to use Open +MPI is ensure that ``PREFIX/bin`` is in their ``PATH`` and +``PREFIX/lib`` is in their ``LD_LIBRARY_PATH``. Users may need to +ensure to set the ``PATH`` and ``LD_LIBRARY_PATH`` in their shell +setup files (e.g., ``.bashrc``, ``.cshrc``) so that non-interactive +``ssh``-based logins will be able to find the Open MPI executables. diff --git a/docs/installing-open-mpi/quickstart.rst b/docs/installing-open-mpi/quickstart.rst new file mode 100644 index 00000000000..f448f5b9380 --- /dev/null +++ b/docs/installing-open-mpi/quickstart.rst @@ -0,0 +1,59 @@ +.. _label-quickstart-building-open-mpi: + +Quick start: Installing Open MPI +================================ + +Although this section skips many details, it offers examples that will +probably work in many environments. + +.. caution:: Note that this section is a "Quick start" |mdash| it does + not attempt to be comprehensive or describe how to build Open MPI + in all supported environments. The examples below may therefore + not work exactly as shown in your environment. + + Please consult the other sections in this chapter for more details, + if necessary. + +.. important:: If you have checked out a *developer's copy* of Open MPI + (i.e., you cloned from Git), you really need to read :doc:`the + Developer's Guide ` before attempting to build Open + MPI. Really. + +Open MPI uses a traditional ``configure`` script paired with ``make`` +to build. Typical installs can be of the pattern: + +.. code-block:: sh + + shell$ tar xf openmpi-.tar.bz2 + shell$ cd openmpi- + shell$ ./configure --prefix= [...options...] 2>&1 | tee config.out + <... lots of output ...> + + # Use an integer value of N for parallel builds + shell$ make [-j N] all 2>&1 | tee make.out + + # ...lots of output... + + # Depending on the chosen above, you may need root access + # for the following: + shell$ make install 2>&1 | tee install.out + + # ...lots of output... + +Note that VPATH builds are fully supported. For example: + +.. code-block:: sh + + shell$ tar xf openmpi-.tar.bz2 + shell$ cd openmpi- + shell$ mkdir build + shell$ cd build + shell$ ../configure --prefix= 2>&1 | tee config.out + # ...etc. + +The above patterns can be used in many environments. + +Note that there are many, many configuration options available in the +``./configure`` step. Some of them may be needed for your particular +HPC network interconnect type and/or computing environmnet; see the +rest of this chapter for desciptions of the available options. diff --git a/docs/installing-open-mpi/required-support-libraries-dependency-graph.png b/docs/installing-open-mpi/required-support-libraries-dependency-graph.png new file mode 100644 index 00000000000..c32e3b9eed1 Binary files /dev/null and b/docs/installing-open-mpi/required-support-libraries-dependency-graph.png differ diff --git a/docs/installing-open-mpi/required-support-libraries-dependency-graph.pptx b/docs/installing-open-mpi/required-support-libraries-dependency-graph.pptx new file mode 100644 index 00000000000..61a2bb6a484 Binary files /dev/null and b/docs/installing-open-mpi/required-support-libraries-dependency-graph.pptx differ diff --git a/docs/installing-open-mpi/required-support-libraries.rst b/docs/installing-open-mpi/required-support-libraries.rst new file mode 100644 index 00000000000..e044f5fb03a --- /dev/null +++ b/docs/installing-open-mpi/required-support-libraries.rst @@ -0,0 +1,259 @@ +.. _label-install-required-support-libraries: + +Required support libraries +========================== + +Open MPI uses the following support libraries: + +#. `Hardware Locality (hwloc) + `_: This library is + required; Open MPI will not build without it. + +#. `Libevent `_: This library is required; Open + MPI will not build without it. + +#. `PMIx `_: This library is required; Open MPI + will not build without it. + +#. `PRRTE `_: This library is + optional in some environments. PRRTE provides Open MPI's + full-featured ``mpirun`` / ``mpiexec`` MPI application launchers + (the two are identical; they are symbolic links to the same executable). + + * If your environment uses another MPI application launcher + (e.g., Slurm users can use the ``srun`` launcher to "direct + launch" Open MPI applications), then the use of PRRTE is + optional. + * If your environment has no other MPI application launcher, then + you need to install PRRTE and build Open MPI with PRRTE + support. + +Since these support libraries are fundamental to Open MPI's operation, +they are directly incorporated into Open MPI's configure, build, and +installation process. More on this below. + +Library dependencies +-------------------- + +These support libraries have dependencies upon each other: + +.. The "source code" for this figure is simple a PPTX file by the same + name in this same directory. If you ever need to edit this image, + edit the PPTX, export it to PNG, and then trim the whitespace from + the sides of the image. + +.. figure:: required-support-libraries-dependency-graph.png + :align: center + + Open MPI required support library dependency graph. + +The higher-level boxes depend on the lower-level boxes. Specifically: + +* Open MPI depends on PRRTE, PMIx, Hwloc, and Libevent (i.e., + everything). +* PRRTE depends on PMIx, Hwloc, and Libevent (i.e., everything except + Open MPI). +* PMIx depends on Hwloc and Libevent. +* Hwloc does not depend on anything. +* Libevent does not depend on anything. + +At run time, it is critical that the run-time linker loads *exactly +one copy* of each of these libraries. + +.. note:: The required support libraries can have other dependencies, + but for simplicitly and relevance to building Open MPI, + those other dependencies are not discussed here. + +Potential problems +------------------ + +Problems can (will) arise if multiple different copies of the above +shared libraries are loaded into a single process. For example, +consider if: + +* Loading the Open MPI shared library causes the loading of Libevent + shared library vA.B.C. +* But then the subsequent loading of the PMIx shared library causes + the loading of Libevent shared library vX.Y.Z. + +Since there are now two different versions of the Libevent shared +library loaded into the same process (yes, this can happen!), +unpredictable behavior can (will) occur. + +Many variations on this same basic erroneous scenario are possible. +All of them are bad, and can be extremely difficult to diagnose. + +Avoiding the problems +--------------------- + +A simple way to avoid these problems is to configure your system such +that it has exactly one copy of each of the required support libraries. + +.. important:: If possible, use your OS / environment's package + manager to install as many of these support libraries |mdash| + including their development headers |mdash| as possible before + invoking Open MPI's ``configure`` script. + +Not all package managers provide all of the required support +libraries. But even if your package manager installs |mdash| for +example |mdash| only Libevent and Hwloc, that somewhat simplifies the +final Open MPI configuration, and therefore avoids some potentially +erroneous configurations. + +How ``configure`` finds the required libraries +---------------------------------------------- + +In an attempt to strike a balance between end-user convenience and +flexibility, Open MPI bundles these four required support libraries in +its official distribution tarball. + +Generally, if Open MPI cannot find a required support library, it will +automatically configure, build, install, and use its bundled version +as part of the main Open MPI configure, build, and installation +process. + +Put differently: Open MPI's ``configure`` script will examine the +build machine and see if it can find each of the required support +header files and libraries. If it cannot find them, it will attempt +to fall back and use the corresponding bundled support library +instead. + +.. important:: Note, however, that ``configure`` is smart enough to + understand the dependencies between the required support libraries. + + Specifically: If ``configure`` finds the development headers and + libraries for a given support library already installed on the + system, then it will ignore both the corresponding bundled support + library, *and it will also ignore all bundled support libraries + that are below it in the dependency graph shown above.* + +Build example 1 +^^^^^^^^^^^^^^^ + +``configure`` finds the PRRTE development headers and libraries in +``/usr/local``. This will cause the following to occur: + +#. ``configure`` will ignore the PRRTE library that is bundled in the + Open MPI source tree and will use the PRRTE that is already + installed in ``/usr/local``. +#. ``configure`` will also ignore the bundled PMIx, Hwloc, and + Libevent libraries in the Open MPI source tree. + + * If ``configure`` is unable to find header files and libraries for + PMIx, Hwloc, and Libevent elsewhere on the build machine (i.e., + assumedly the same PMIx, Hwloc, and Libevent than the PRRTE in + ``/usr/local`` is using), this is an error: ``configure`` will + abort, and therefore refuse to build Open MPI. + +Build example 2 +^^^^^^^^^^^^^^^ + +``configure`` does *not* find PRRTE on the build machine, but *does* +find PMIx development headers and libraries in ``/opt/local``. This +will cause the following to occur: + +#. ``configure`` will set up to build the PRRTE library that is + bundled in the Open MPI source tree. +#. ``configure`` will ignore the PMIx library that is bundled in the + Open MPI source tree and will use the PMIx that is already + installed in ``/opt/local``. +#. ``configure`` will also ignore the bundled Hwloc and Libevent + libraries in the Open MPI source tree. + + * If ``configure`` is unable to find header files and libraries for + Hwloc and Libevent elsewhere on the build machine (i.e., + assumedly the same Hwloc and Libevent than the PMIx in + ``/opt/local`` is using), this is an error: ``configure`` will + abort, and therefore refuse to build Open MPI. + +Build example 3 +^^^^^^^^^^^^^^^ + +``configure`` only finds the development headers and libraries for +Libevent on the build machine. This will cause the following to +occur: + +#. ``configure`` will set up to build the PRRTE, PMIx, and Hwloc + libraries that are bundled in the Open MPI source tree. +#. ``configure`` will ignore the Libevent library that is bundled in + the Open MPI source tree and will use the Libevent that is already + installed. + + +Overriding ``configure`` behavior +--------------------------------- + +If ``configure``'s default searching behavior is not sufficient for +your environment, you can use :ref:`command line options to override +its default behavior +`. + +For example, if PMIx and/or PRRTE are installed such that the default +header file and linker search paths will not find them, you can +provide command line options telling Open MPI's ``configure`` where to +search. Here's an example ``configure`` invocation where PMIx and +PRRTE have both been installed to ``/opt/open-mpi-stuff``: + +.. code-block:: sh + + ./configure --prefix=$HOME/openmpi-install \ + --with-pmix=/opt/open-mpi-stuff \ + --with-prrte=/opt/open-mpi-stuff ... + +As another example, if you do not have root-level privileges to use +the OS / environment package manager, and if you have a simple MPI +application (e.g., that has no external library dependencies), you may +wish to configure Open MPI something like this: + +.. code-block:: sh + + ./configure --prefix=$HOME/openmpi-install \ + --with-libevent=internal --with-hwloc=internal \ + --with-pmix=internal --with-prrte=internal ... + +The ``internal`` keywords force ``configure`` to use all four bundled +versions of the required libraries. + +.. danger:: Be very, very careful when overriding ``configure``'s + default search behavior for these libraries. Remember the critical + requirement: that Open MPI infrastructure and applications load + *exactly one copy* of each support library. For simplicity, it may + be desireable to ensure to use exactly the support libraries that + Open MPI was compiled and built against. + + For example, using the Open MPI installed from the sample + ``configure`` line (above), you may want to prefix your run-time + linker search path (e.g., ``LD_LIBRARY_PATH`` on Linux) with + ``$HOME/openmpi-install/lib``. This will ensure that linker finds + the four support libraries from your Open MPI installation tree, + even if other copies of the same support libraries are present + elsewhere on your system. + +(Strong) Advice for packagers +----------------------------- + +If you are an Open MPI packager, we **strongly** suggest that your +Open MPI package should not include Hwloc, Libevent, PMIx, or PRRTE. +Instead, it should depend on independently-built versions of these +packages. + +You may wish to configure Open MPI with something like the +following: + +.. code-block:: sh + + ./configure --with-libevent=external --with-hwloc=external \ + --with-pmix=external --with-prrte=external ... + +The ``external`` keywords will force ``configure`` to ignore all the +bundled libraries and only look for external versions of these support +libraries. This also has the benefit of causing ``configure`` to fail +if it cannot find the required support libraries outside of the Open +MPI source tree |mdash| a good sanity check to ensure that your +package is correctly relying on the independently-built and installed +versions. + +:ref:`See this section +` for more +information about the required support library ``--with-FOO`` command +line options. diff --git a/docs/installing-open-mpi/vpath-builds.rst b/docs/installing-open-mpi/vpath-builds.rst new file mode 100644 index 00000000000..e61ee5413e4 --- /dev/null +++ b/docs/installing-open-mpi/vpath-builds.rst @@ -0,0 +1,16 @@ +.. _building-open-mpi-vpath-label: + +VPATH builds +============ + +VPATH build are fully supported. For example: + +.. code-block:: sh + + shell$ tar xf openmpi-VERSION.tar.bz2 + shell$ cd openmpi-VERSION + shell$ mkdir build + shell$ cd build + shell$ ../configure [...options...] + <... lots of output ...> + shell$ make -j 4 diff --git a/docs/license/hwloc.txt b/docs/license/hwloc.txt new file mode 100644 index 00000000000..e77516e1801 --- /dev/null +++ b/docs/license/hwloc.txt @@ -0,0 +1,39 @@ +Copyright © 2004-2006 The Trustees of Indiana University and Indiana University Research and Technology Corporation. All rights reserved. +Copyright © 2004-2005 The University of Tennessee and The University of Tennessee Research Foundation. All rights reserved. +Copyright © 2004-2005 High Performance Computing Center Stuttgart, University of Stuttgart. All rights reserved. +Copyright © 2004-2005 The Regents of the University of California. All rights reserved. +Copyright © 2009 CNRS +Copyright © 2009-2016 Inria. All rights reserved. +Copyright © 2009-2015 Université Bordeaux +Copyright © 2009-2015 Cisco Systems, Inc. All rights reserved. +Copyright © 2009-2012 Oracle and/or its affiliates. All rights reserved. +Copyright © 2010 IBM +Copyright © 2010 Jirka Hladky +Copyright © 2012 Aleksej Saushev, The NetBSD Foundation +Copyright © 2012 Blue Brain Project, EPFL. All rights reserved. +Copyright © 2013-2014 University of Wisconsin-La Crosse. All rights reserved. +Copyright © 2015 Research Organization for Information Science and Technology (RIST). All rights reserved. +Copyright © 2015-2016 Intel, Inc. All rights reserved. +See COPYING in top-level directory. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: +1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. +3. The name of the author may not be used to endorse or promote products + derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR +IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES +OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, +INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT +NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF +THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/docs/license/index.rst b/docs/license/index.rst new file mode 100644 index 00000000000..b12aad8910d --- /dev/null +++ b/docs/license/index.rst @@ -0,0 +1,55 @@ +License +========= + +Open MPI license +---------------- + +The Open MPI license is as follows: + +.. literalinclude:: ompi.txt + :language: text + +Additional licenses +------------------- + +Open MPI includes code from the MPICH project. + +.. literalinclude:: mpich.txt + :language: text + +Redistributed code +------------------ + +The Open MPI distribution tarball includes the following other open +source code bases in their entirety. + +Libevent +^^^^^^^^ + +.. literalinclude:: libevent.txt + :language: text + +Hwloc +^^^^^ + +.. literalinclude:: hwloc.txt + :language: text + +PMIx +^^^^ + +.. literalinclude:: pmix.txt + :language: text + +PRRTE +^^^^^ + +.. literalinclude:: prrte.txt + :language: text + +Treematch +^^^^^^^^^ + +.. literalinclude:: treematch.txt + :language: text + :encoding: latin-1 diff --git a/docs/license/libevent.txt b/docs/license/libevent.txt new file mode 100644 index 00000000000..402ca50896c --- /dev/null +++ b/docs/license/libevent.txt @@ -0,0 +1,99 @@ +Libevent is available for use under the following license, commonly known +as the 3-clause (or "modified") BSD license: + +============================== +Copyright (c) 2000-2007 Niels Provos +Copyright (c) 2007-2012 Niels Provos and Nick Mathewson + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: +1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. +3. The name of the author may not be used to endorse or promote products + derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR +IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES +OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, +INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT +NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF +THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +============================== + +Portions of Libevent are based on works by others, also made available by +them under the three-clause BSD license above. The copyright notices are +available in the corresponding source files; the license is as above. Here's +a list: + +log.c: + Copyright (c) 2000 Dug Song + Copyright (c) 1993 The Regents of the University of California. + +strlcpy.c: + Copyright (c) 1998 Todd C. Miller + +win32select.c: + Copyright (c) 2003 Michael A. Davis + +evport.c: + Copyright (c) 2007 Sun Microsystems + +ht-internal.h: + Copyright (c) 2002 Christopher Clark + +minheap-internal.h: + Copyright (c) 2006 Maxim Yegorushkin + +============================== + +The arc4module is available under the following, sometimes called the +"OpenBSD" license: + + Copyright (c) 1996, David Mazieres + Copyright (c) 2008, Damien Miller + + Permission to use, copy, modify, and distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +============================== + +The Windows timer code is based on code from libutp, which is +distributed under this license, sometimes called the "MIT" license. + + +Copyright (c) 2010 BitTorrent, Inc. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/docs/license/mpich.txt b/docs/license/mpich.txt new file mode 100644 index 00000000000..13bdd8a38d3 --- /dev/null +++ b/docs/license/mpich.txt @@ -0,0 +1,36 @@ +The following is a notice of limited availability of the code, and disclaimer +which must be included in the prologue of the code and in all source listings +of the code. + +Copyright Notice + + 2002 University of Chicago + +Permission is hereby granted to use, reproduce, prepare derivative works, and +to redistribute to others. This software was authored by: + +Mathematics and Computer Science Division +Argonne National Laboratory, Argonne IL 60439 + +(and) + +Department of Computer Science +University of Illinois at Urbana-Champaign + + + GOVERNMENT LICENSE + +Portions of this material resulted from work developed under a U.S. +Government Contract and are subject to the following license: the Government +is granted for itself and others acting on its behalf a paid-up, nonexclusive, +irrevocable worldwide license in this computer software to reproduce, prepare +derivative works, and perform publicly and display publicly. + + DISCLAIMER + +This computer code material was prepared, in part, as an account of work +sponsored by an agency of the United States Government. Neither the United +States, nor the University of Chicago, nor any of their employees, makes any +warranty express or implied, or assumes any legal liability or responsibility +for the accuracy, completeness, or usefulness of any information, apparatus, +product, or process disclosed, or represents that its use would not infringe +privately owned rights. diff --git a/docs/license/ompi.txt b/docs/license/ompi.txt new file mode 100644 index 00000000000..0bd083daaeb --- /dev/null +++ b/docs/license/ompi.txt @@ -0,0 +1,94 @@ +Copyright (c) 2004-2012 The Trustees of Indiana University and Indiana + University Research and Technology + Corporation. All rights reserved. +Copyright (c) 2004-2021 The University of Tennessee and The University + of Tennessee Research Foundation. All rights + reserved. +Copyright (c) 2004-2018 High Performance Computing Center Stuttgart, + University of Stuttgart. All rights reserved. +Copyright (c) 2004-2008 The Regents of the University of California. + All rights reserved. +Copyright (c) 2006-2018 Los Alamos National Security, LLC. All rights + reserved. +Copyright (c) 2006-2021 Cisco Systems, Inc. All rights reserved. +Copyright (c) 2006-2010 Voltaire, Inc. All rights reserved. +Copyright (c) 2006-2021 Sandia National Laboratories. All rights reserved. +Copyright (c) 2006-2010 Sun Microsystems, Inc. All rights reserved. + Use is subject to license terms. +Copyright (c) 2006-2021 The University of Houston. All rights reserved. +Copyright (c) 2006-2009 Myricom, Inc. All rights reserved. +Copyright (c) 2007-2017 UT-Battelle, LLC. All rights reserved. +Copyright (c) 2007-2021 IBM Corporation. All rights reserved. +Copyright (c) 1998-2005 Forschungszentrum Juelich, Juelich Supercomputing + Centre, Federal Republic of Germany +Copyright (c) 2005-2008 ZIH, TU Dresden, Federal Republic of Germany +Copyright (c) 2007 Evergrid, Inc. All rights reserved. +Copyright (c) 2008-2016 Chelsio, Inc. All rights reserved. +Copyright (c) 2008-2009 Institut National de Recherche en + Informatique. All rights reserved. +Copyright (c) 2007 Lawrence Livermore National Security, LLC. + All rights reserved. +Copyright (c) 2007-2019 Mellanox Technologies. All rights reserved. +Copyright (c) 2006-2010 QLogic Corporation. All rights reserved. +Copyright (c) 2008-2017 Oak Ridge National Labs. All rights reserved. +Copyright (c) 2006-2012 Oracle and/or its affiliates. All rights reserved. +Copyright (c) 2009-2015 Bull SAS. All rights reserved. +Copyright (c) 2010 ARM ltd. All rights reserved. +Copyright (c) 2016 ARM, Inc. All rights reserved. +Copyright (c) 2010-2011 Alex Brick . All rights reserved. +Copyright (c) 2012 The University of Wisconsin-La Crosse. All rights reserved. +Copyright (c) 2013-2020 Intel, Inc. All rights reserved. +Copyright (c) 2011-2021 NVIDIA Corporation. All rights reserved. +Copyright (c) 2016-2018 Broadcom Limited. All rights reserved. +Copyright (c) 2011-2021 Fujitsu Limited. All rights reserved. +Copyright (c) 2014-2015 Hewlett-Packard Development Company, LP. All + rights reserved. +Copyright (c) 2013-2021 Research Organization for Information Science (RIST). + All rights reserved. +Copyright (c) 2017-2021 Amazon.com, Inc. or its affiliates. All Rights + reserved. +Copyright (c) 2018 DataDirect Networks. All rights reserved. +Copyright (c) 2018-2021 Triad National Security, LLC. All rights reserved. +Copyright (c) 2019-2021 Hewlett Packard Enterprise Development, LP. +Copyright (c) 2020-2021 Google, LLC. All rights reserved. +Copyright (c) 2002 University of Chicago +Copyright (c) 2001 Argonne National Laboratory +Copyright (c) 2020-2021 Cornelis Networks, Inc. All rights reserved. +Copyright (c) 2021 Nanook Consulting +Copyright (c) 2017-2019 Iowa State University Research Foundation, Inc. + All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +- Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + +- Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer listed + in this license in the documentation and/or other materials + provided with the distribution. + +- Neither the name of the copyright holders nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +The copyright holders provide no reassurances that the source code +provided does not infringe any patent, copyright, or any other +intellectual property rights of third parties. The copyright holders +disclaim any liability to any recipient for claims brought against +recipient by any third party for infringement of that parties +intellectual property rights. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/docs/license/pmix.txt b/docs/license/pmix.txt new file mode 100644 index 00000000000..eb5504e3236 --- /dev/null +++ b/docs/license/pmix.txt @@ -0,0 +1,87 @@ +Copyright (c) 2004-2010 The Trustees of Indiana University and Indiana + University Research and Technology + Corporation. All rights reserved. +Copyright (c) 2004-2010 The University of Tennessee and The University + of Tennessee Research Foundation. All rights + reserved. +Copyright (c) 2004-2010 High Performance Computing Center Stuttgart, + University of Stuttgart. All rights reserved. +Copyright (c) 2004-2008 The Regents of the University of California. + All rights reserved. +Copyright (c) 2006-2010 Los Alamos National Security, LLC. All rights + reserved. +Copyright (c) 2006-2010 Cisco Systems, Inc. All rights reserved. +Copyright (c) 2006-2010 Voltaire, Inc. All rights reserved. +Copyright (c) 2006-2011 Sandia National Laboratories. All rights reserved. +Copyright (c) 2006-2010 Sun Microsystems, Inc. All rights reserved. + Use is subject to license terms. +Copyright (c) 2006-2010 The University of Houston. All rights reserved. +Copyright (c) 2006-2009 Myricom, Inc. All rights reserved. +Copyright (c) 2007-2008 UT-Battelle, LLC. All rights reserved. +Copyright (c) 2007-2019 IBM Corporation. All rights reserved. +Copyright (c) 1998-2005 Forschungszentrum Juelich, Juelich Supercomputing + Centre, Federal Republic of Germany +Copyright (c) 2005-2008 ZIH, TU Dresden, Federal Republic of Germany +Copyright (c) 2007 Evergrid, Inc. All rights reserved. +Copyright (c) 2008 Chelsio, Inc. All rights reserved. +Copyright (c) 2008-2009 Institut National de Recherche en + Informatique. All rights reserved. +Copyright (c) 2007 Lawrence Livermore National Security, LLC. + All rights reserved. +Copyright (c) 2007-2019 Mellanox Technologies. All rights reserved. +Copyright (c) 2006-2010 QLogic Corporation. All rights reserved. +Copyright (c) 2008-2010 Oak Ridge National Labs. All rights reserved. +Copyright (c) 2006-2010 Oracle and/or its affiliates. All rights reserved. +Copyright (c) 2009 Bull SAS. All rights reserved. +Copyright (c) 2010 ARM ltd. All rights reserved. +Copyright (c) 2010-2011 Alex Brick . All rights reserved. +Copyright (c) 2012 The University of Wisconsin-La Crosse. All rights + reserved. +Copyright (c) 2013-2019 Intel, Inc. All rights reserved. +Copyright (c) 2011-2014 NVIDIA Corporation. All rights reserved. +Copyright (c) 2019 Amazon.com, Inc. or its affiliates. All Rights + reserved. + +$COPYRIGHT$ + +Additional copyrights may follow + +$HEADER$ + +The following LICENSE pertains to both PMIx and any code ported +from Open MPI. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +- Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + +- Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer listed + in this license in the documentation and/or other materials + provided with the distribution. + +- Neither the name of the copyright holders nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +The copyright holders provide no reassurances that the source code +provided does not infringe any patent, copyright, or any other +intellectual property rights of third parties. The copyright holders +disclaim any liability to any recipient for claims brought against +recipient by any third party for infringement of that parties +intellectual property rights. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/docs/license/prrte.txt b/docs/license/prrte.txt new file mode 100644 index 00000000000..e33930a4733 --- /dev/null +++ b/docs/license/prrte.txt @@ -0,0 +1,92 @@ +Copyright (c) 2004-2010 The Trustees of Indiana University and Indiana + University Research and Technology + Corporation. All rights reserved. +Copyright (c) 2004-2017 The University of Tennessee and The University + of Tennessee Research Foundation. All rights + reserved. +Copyright (c) 2004-2010 High Performance Computing Center Stuttgart, + University of Stuttgart. All rights reserved. +Copyright (c) 2004-2008 The Regents of the University of California. + All rights reserved. +Copyright (c) 2006-2017 Los Alamos National Security, LLC. All rights + reserved. +Copyright (c) 2006-2017 Cisco Systems, Inc. All rights reserved. +Copyright (c) 2006-2010 Voltaire, Inc. All rights reserved. +Copyright (c) 2006-2017 Sandia National Laboratories. All rights reserved. +Copyright (c) 2006-2010 Sun Microsystems, Inc. All rights reserved. + Use is subject to license terms. +Copyright (c) 2006-2017 The University of Houston. All rights reserved. +Copyright (c) 2006-2009 Myricom, Inc. All rights reserved. +Copyright (c) 2007-2017 UT-Battelle, LLC. All rights reserved. +Copyright (c) 2007-2017 IBM Corporation. All rights reserved. +Copyright (c) 1998-2005 Forschungszentrum Juelich, Juelich Supercomputing + Centre, Federal Republic of Germany +Copyright (c) 2005-2008 ZIH, TU Dresden, Federal Republic of Germany +Copyright (c) 2007 Evergrid, Inc. All rights reserved. +Copyright (c) 2008 Chelsio, Inc. All rights reserved. +Copyright (c) 2008-2009 Institut National de Recherche en + Informatique. All rights reserved. +Copyright (c) 2007 Lawrence Livermore National Security, LLC. + All rights reserved. +Copyright (c) 2007-2017 Mellanox Technologies. All rights reserved. +Copyright (c) 2006-2010 QLogic Corporation. All rights reserved. +Copyright (c) 2008-2017 Oak Ridge National Labs. All rights reserved. +Copyright (c) 2006-2012 Oracle and/or its affiliates. All rights reserved. +Copyright (c) 2009-2015 Bull SAS. All rights reserved. +Copyright (c) 2010 ARM ltd. All rights reserved. +Copyright (c) 2016 ARM, Inc. All rights reserved. +Copyright (c) 2010-2011 Alex Brick . All rights reserved. +Copyright (c) 2012 The University of Wisconsin-La Crosse. All rights + reserved. +Copyright (c) 2013-2018 Intel, Inc. All rights reserved. +Copyright (c) 2011-2017 NVIDIA Corporation. All rights reserved. +Copyright (c) 2016 Broadcom Limited. All rights reserved. +Copyright (c) 2011-2017 Fujitsu Limited. All rights reserved. +Copyright (c) 2014-2015 Hewlett-Packard Development Company, LP. All + rights reserved. +Copyright (c) 2013-2017 Research Organization for Information Science (RIST). + All rights reserved. +Copyright (c) 2017-2018 Amazon.com, Inc. or its affiliates. All Rights + reserved. +Copyright (c) 2018 DataDirect Networks. All rights reserved. + +$COPYRIGHT$ + +Additional copyrights may follow + +$HEADER$ + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +- Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + +- Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer listed + in this license in the documentation and/or other materials + provided with the distribution. + +- Neither the name of the copyright holders nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +The copyright holders provide no reassurances that the source code +provided does not infringe any patent, copyright, or any other +intellectual property rights of third parties. The copyright holders +disclaim any liability to any recipient for claims brought against +recipient by any third party for infringement of that parties +intellectual property rights. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/docs/license/treematch.txt b/docs/license/treematch.txt new file mode 100644 index 00000000000..186bd9c7d2f --- /dev/null +++ b/docs/license/treematch.txt @@ -0,0 +1,514 @@ +CeCILL-B FREE SOFTWARE LICENSE AGREEMENT + + + Notice + +This Agreement is a Free Software license agreement that is the result +of discussions between its authors in order to ensure compliance with +the two main principles guiding its drafting: + + * firstly, compliance with the principles governing the distribution + of Free Software: access to source code, broad rights granted to + users, + * secondly, the election of a governing law, French law, with which + it is conformant, both as regards the law of torts and + intellectual property law, and the protection that it offers to + both authors and holders of the economic rights over software. + +The authors of the CeCILL-B (for Ce[a] C[nrs] I[nria] L[ogiciel] L[ibre]) +license are: + +Commissariat � l'Energie Atomique - CEA, a public scientific, technical +and industrial research establishment, having its principal place of +business at 25 rue Leblanc, immeuble Le Ponant D, 75015 Paris, France. + +Centre National de la Recherche Scientifique - CNRS, a public scientific +and technological establishment, having its principal place of business +at 3 rue Michel-Ange, 75794 Paris cedex 16, France. + +Institut National de Recherche en Informatique et en Automatique - +INRIA, a public scientific and technological establishment, having its +principal place of business at Domaine de Voluceau, Rocquencourt, BP +105, 78153 Le Chesnay cedex, France. + + + Preamble + +This Agreement is an open source software license intended to give users +significant freedom to modify and redistribute the software licensed +hereunder. + +The exercising of this freedom is conditional upon a strong obligation +of giving credits for everybody that distributes a software +incorporating a software ruled by the current license so as all +contributions to be properly identified and acknowledged. + +In consideration of access to the source code and the rights to copy, +modify and redistribute granted by the license, users are provided only +with a limited warranty and the software's author, the holder of the +economic rights, and the successive licensors only have limited liability. + +In this respect, the risks associated with loading, using, modifying +and/or developing or reproducing the software by the user are brought to +the user's attention, given its Free Software status, which may make it +complicated to use, with the result that its use is reserved for +developers and experienced professionals having in-depth computer +knowledge. Users are therefore encouraged to load and test the +suitability of the software as regards their requirements in conditions +enabling the security of their systems and/or data to be ensured and, +more generally, to use and operate it in the same conditions of +security. This Agreement may be freely reproduced and published, +provided it is not altered, and that no provisions are either added or +removed herefrom. + +This Agreement may apply to any or all software for which the holder of +the economic rights decides to submit the use thereof to its provisions. + + + Article 1 - DEFINITIONS + +For the purpose of this Agreement, when the following expressions +commence with a capital letter, they shall have the following meaning: + +Agreement: means this license agreement, and its possible subsequent +versions and annexes. + +Software: means the software in its Object Code and/or Source Code form +and, where applicable, its documentation, "as is" when the Licensee +accepts the Agreement. + +Initial Software: means the Software in its Source Code and possibly its +Object Code form and, where applicable, its documentation, "as is" when +it is first distributed under the terms and conditions of the Agreement. + +Modified Software: means the Software modified by at least one +Contribution. + +Source Code: means all the Software's instructions and program lines to +which access is required so as to modify the Software. + +Object Code: means the binary files originating from the compilation of +the Source Code. + +Holder: means the holder(s) of the economic rights over the Initial +Software. + +Licensee: means the Software user(s) having accepted the Agreement. + +Contributor: means a Licensee having made at least one Contribution. + +Licensor: means the Holder, or any other individual or legal entity, who +distributes the Software under the Agreement. + +Contribution: means any or all modifications, corrections, translations, +adaptations and/or new functions integrated into the Software by any or +all Contributors, as well as any or all Internal Modules. + +Module: means a set of sources files including their documentation that +enables supplementary functions or services in addition to those offered +by the Software. + +External Module: means any or all Modules, not derived from the +Software, so that this Module and the Software run in separate address +spaces, with one calling the other when they are run. + +Internal Module: means any or all Module, connected to the Software so +that they both execute in the same address space. + +Parties: mean both the Licensee and the Licensor. + +These expressions may be used both in singular and plural form. + + + Article 2 - PURPOSE + +The purpose of the Agreement is the grant by the Licensor to the +Licensee of a non-exclusive, transferable and worldwide license for the +Software as set forth in Article 5 hereinafter for the whole term of the +protection granted by the rights over said Software. + + + Article 3 - ACCEPTANCE + +3.1 The Licensee shall be deemed as having accepted the terms and +conditions of this Agreement upon the occurrence of the first of the +following events: + + * (i) loading the Software by any or all means, notably, by + downloading from a remote server, or by loading from a physical + medium; + * (ii) the first time the Licensee exercises any of the rights + granted hereunder. + +3.2 One copy of the Agreement, containing a notice relating to the +characteristics of the Software, to the limited warranty, and to the +fact that its use is restricted to experienced users has been provided +to the Licensee prior to its acceptance as set forth in Article 3.1 +hereinabove, and the Licensee hereby acknowledges that it has read and +understood it. + + + Article 4 - EFFECTIVE DATE AND TERM + + + 4.1 EFFECTIVE DATE + +The Agreement shall become effective on the date when it is accepted by +the Licensee as set forth in Article 3.1. + + + 4.2 TERM + +The Agreement shall remain in force for the entire legal term of +protection of the economic rights over the Software. + + + Article 5 - SCOPE OF RIGHTS GRANTED + +The Licensor hereby grants to the Licensee, who accepts, the following +rights over the Software for any or all use, and for the term of the +Agreement, on the basis of the terms and conditions set forth hereinafter. + +Besides, if the Licensor owns or comes to own one or more patents +protecting all or part of the functions of the Software or of its +components, the Licensor undertakes not to enforce the rights granted by +these patents against successive Licensees using, exploiting or +modifying the Software. If these patents are transferred, the Licensor +undertakes to have the transferees subscribe to the obligations set +forth in this paragraph. + + + 5.1 RIGHT OF USE + +The Licensee is authorized to use the Software, without any limitation +as to its fields of application, with it being hereinafter specified +that this comprises: + + 1. permanent or temporary reproduction of all or part of the Software + by any or all means and in any or all form. + + 2. loading, displaying, running, or storing the Software on any or + all medium. + + 3. entitlement to observe, study or test its operation so as to + determine the ideas and principles behind any or all constituent + elements of said Software. This shall apply when the Licensee + carries out any or all loading, displaying, running, transmission + or storage operation as regards the Software, that it is entitled + to carry out hereunder. + + + 5.2 ENTITLEMENT TO MAKE CONTRIBUTIONS + +The right to make Contributions includes the right to translate, adapt, +arrange, or make any or all modifications to the Software, and the right +to reproduce the resulting software. + +The Licensee is authorized to make any or all Contributions to the +Software provided that it includes an explicit notice that it is the +author of said Contribution and indicates the date of the creation thereof. + + + 5.3 RIGHT OF DISTRIBUTION + +In particular, the right of distribution includes the right to publish, +transmit and communicate the Software to the general public on any or +all medium, and by any or all means, and the right to market, either in +consideration of a fee, or free of charge, one or more copies of the +Software by any means. + +The Licensee is further authorized to distribute copies of the modified +or unmodified Software to third parties according to the terms and +conditions set forth hereinafter. + + + 5.3.1 DISTRIBUTION OF SOFTWARE WITHOUT MODIFICATION + +The Licensee is authorized to distribute true copies of the Software in +Source Code or Object Code form, provided that said distribution +complies with all the provisions of the Agreement and is accompanied by: + + 1. a copy of the Agreement, + + 2. a notice relating to the limitation of both the Licensor's + warranty and liability as set forth in Articles 8 and 9, + +and that, in the event that only the Object Code of the Software is +redistributed, the Licensee allows effective access to the full Source +Code of the Software at a minimum during the entire period of its +distribution of the Software, it being understood that the additional +cost of acquiring the Source Code shall not exceed the cost of +transferring the data. + + + 5.3.2 DISTRIBUTION OF MODIFIED SOFTWARE + +If the Licensee makes any Contribution to the Software, the resulting +Modified Software may be distributed under a license agreement other +than this Agreement subject to compliance with the provisions of Article +5.3.4. + + + 5.3.3 DISTRIBUTION OF EXTERNAL MODULES + +When the Licensee has developed an External Module, the terms and +conditions of this Agreement do not apply to said External Module, that +may be distributed under a separate license agreement. + + + 5.3.4 CREDITS + +Any Licensee who may distribute a Modified Software hereby expressly +agrees to: + + 1. indicate in the related documentation that it is based on the + Software licensed hereunder, and reproduce the intellectual + property notice for the Software, + + 2. ensure that written indications of the Software intended use, + intellectual property notice and license hereunder are included in + easily accessible format from the Modified Software interface, + + 3. mention, on a freely accessible website describing the Modified + Software, at least throughout the distribution term thereof, that + it is based on the Software licensed hereunder, and reproduce the + Software intellectual property notice, + + 4. where it is distributed to a third party that may distribute a + Modified Software without having to make its source code + available, make its best efforts to ensure that said third party + agrees to comply with the obligations set forth in this Article . + +If the Software, whether or not modified, is distributed with an +External Module designed for use in connection with the Software, the +Licensee shall submit said External Module to the foregoing obligations. + + + 5.3.5 COMPATIBILITY WITH THE CeCILL AND CeCILL-C LICENSES + +Where a Modified Software contains a Contribution subject to the CeCILL +license, the provisions set forth in Article 5.3.4 shall be optional. + +A Modified Software may be distributed under the CeCILL-C license. In +such a case the provisions set forth in Article 5.3.4 shall be optional. + + + Article 6 - INTELLECTUAL PROPERTY + + + 6.1 OVER THE INITIAL SOFTWARE + +The Holder owns the economic rights over the Initial Software. Any or +all use of the Initial Software is subject to compliance with the terms +and conditions under which the Holder has elected to distribute its work +and no one shall be entitled to modify the terms and conditions for the +distribution of said Initial Software. + +The Holder undertakes that the Initial Software will remain ruled at +least by this Agreement, for the duration set forth in Article 4.2. + + + 6.2 OVER THE CONTRIBUTIONS + +The Licensee who develops a Contribution is the owner of the +intellectual property rights over this Contribution as defined by +applicable law. + + + 6.3 OVER THE EXTERNAL MODULES + +The Licensee who develops an External Module is the owner of the +intellectual property rights over this External Module as defined by +applicable law and is free to choose the type of agreement that shall +govern its distribution. + + + 6.4 JOINT PROVISIONS + +The Licensee expressly undertakes: + + 1. not to remove, or modify, in any manner, the intellectual property + notices attached to the Software; + + 2. to reproduce said notices, in an identical manner, in the copies + of the Software modified or not. + +The Licensee undertakes not to directly or indirectly infringe the +intellectual property rights of the Holder and/or Contributors on the +Software and to take, where applicable, vis-�-vis its staff, any and all +measures required to ensure respect of said intellectual property rights +of the Holder and/or Contributors. + + + Article 7 - RELATED SERVICES + +7.1 Under no circumstances shall the Agreement oblige the Licensor to +provide technical assistance or maintenance services for the Software. + +However, the Licensor is entitled to offer this type of services. The +terms and conditions of such technical assistance, and/or such +maintenance, shall be set forth in a separate instrument. Only the +Licensor offering said maintenance and/or technical assistance services +shall incur liability therefor. + +7.2 Similarly, any Licensor is entitled to offer to its licensees, under +its sole responsibility, a warranty, that shall only be binding upon +itself, for the redistribution of the Software and/or the Modified +Software, under terms and conditions that it is free to decide. Said +warranty, and the financial terms and conditions of its application, +shall be subject of a separate instrument executed between the Licensor +and the Licensee. + + + Article 8 - LIABILITY + +8.1 Subject to the provisions of Article 8.2, the Licensee shall be +entitled to claim compensation for any direct loss it may have suffered +from the Software as a result of a fault on the part of the relevant +Licensor, subject to providing evidence thereof. + +8.2 The Licensor's liability is limited to the commitments made under +this Agreement and shall not be incurred as a result of in particular: +(i) loss due the Licensee's total or partial failure to fulfill its +obligations, (ii) direct or consequential loss that is suffered by the +Licensee due to the use or performance of the Software, and (iii) more +generally, any consequential loss. In particular the Parties expressly +agree that any or all pecuniary or business loss (i.e. loss of data, +loss of profits, operating loss, loss of customers or orders, +opportunity cost, any disturbance to business activities) or any or all +legal proceedings instituted against the Licensee by a third party, +shall constitute consequential loss and shall not provide entitlement to +any or all compensation from the Licensor. + + + Article 9 - WARRANTY + +9.1 The Licensee acknowledges that the scientific and technical +state-of-the-art when the Software was distributed did not enable all +possible uses to be tested and verified, nor for the presence of +possible defects to be detected. In this respect, the Licensee's +attention has been drawn to the risks associated with loading, using, +modifying and/or developing and reproducing the Software which are +reserved for experienced users. + +The Licensee shall be responsible for verifying, by any or all means, +the suitability of the product for its requirements, its good working +order, and for ensuring that it shall not cause damage to either persons +or properties. + +9.2 The Licensor hereby represents, in good faith, that it is entitled +to grant all the rights over the Software (including in particular the +rights set forth in Article 5). + +9.3 The Licensee acknowledges that the Software is supplied "as is" by +the Licensor without any other express or tacit warranty, other than +that provided for in Article 9.2 and, in particular, without any warranty +as to its commercial value, its secured, safe, innovative or relevant +nature. + +Specifically, the Licensor does not warrant that the Software is free +from any error, that it will operate without interruption, that it will +be compatible with the Licensee's own equipment and software +configuration, nor that it will meet the Licensee's requirements. + +9.4 The Licensor does not either expressly or tacitly warrant that the +Software does not infringe any third party intellectual property right +relating to a patent, software or any other property right. Therefore, +the Licensor disclaims any and all liability towards the Licensee +arising out of any or all proceedings for infringement that may be +instituted in respect of the use, modification and redistribution of the +Software. Nevertheless, should such proceedings be instituted against +the Licensee, the Licensor shall provide it with technical and legal +assistance for its defense. Such technical and legal assistance shall be +decided on a case-by-case basis between the relevant Licensor and the +Licensee pursuant to a memorandum of understanding. The Licensor +disclaims any and all liability as regards the Licensee's use of the +name of the Software. No warranty is given as regards the existence of +prior rights over the name of the Software or as regards the existence +of a trademark. + + + Article 10 - TERMINATION + +10.1 In the event of a breach by the Licensee of its obligations +hereunder, the Licensor may automatically terminate this Agreement +thirty (30) days after notice has been sent to the Licensee and has +remained ineffective. + +10.2 A Licensee whose Agreement is terminated shall no longer be +authorized to use, modify or distribute the Software. However, any +licenses that it may have granted prior to termination of the Agreement +shall remain valid subject to their having been granted in compliance +with the terms and conditions hereof. + + + Article 11 - MISCELLANEOUS + + + 11.1 EXCUSABLE EVENTS + +Neither Party shall be liable for any or all delay, or failure to +perform the Agreement, that may be attributable to an event of force +majeure, an act of God or an outside cause, such as defective +functioning or interruptions of the electricity or telecommunications +networks, network paralysis following a virus attack, intervention by +government authorities, natural disasters, water damage, earthquakes, +fire, explosions, strikes and labor unrest, war, etc. + +11.2 Any failure by either Party, on one or more occasions, to invoke +one or more of the provisions hereof, shall under no circumstances be +interpreted as being a waiver by the interested Party of its right to +invoke said provision(s) subsequently. + +11.3 The Agreement cancels and replaces any or all previous agreements, +whether written or oral, between the Parties and having the same +purpose, and constitutes the entirety of the agreement between said +Parties concerning said purpose. No supplement or modification to the +terms and conditions hereof shall be effective as between the Parties +unless it is made in writing and signed by their duly authorized +representatives. + +11.4 In the event that one or more of the provisions hereof were to +conflict with a current or future applicable act or legislative text, +said act or legislative text shall prevail, and the Parties shall make +the necessary amendments so as to comply with said act or legislative +text. All other provisions shall remain effective. Similarly, invalidity +of a provision of the Agreement, for any reason whatsoever, shall not +cause the Agreement as a whole to be invalid. + + + 11.5 LANGUAGE + +The Agreement is drafted in both French and English and both versions +are deemed authentic. + + + Article 12 - NEW VERSIONS OF THE AGREEMENT + +12.1 Any person is authorized to duplicate and distribute copies of this +Agreement. + +12.2 So as to ensure coherence, the wording of this Agreement is +protected and may only be modified by the authors of the License, who +reserve the right to periodically publish updates or new versions of the +Agreement, each with a separate number. These subsequent versions may +address new issues encountered by Free Software. + +12.3 Any Software distributed under a given version of the Agreement may +only be subsequently distributed under the same version of the Agreement +or a subsequent version. + + + Article 13 - GOVERNING LAW AND JURISDICTION + +13.1 The Agreement is governed by French law. The Parties agree to +endeavor to seek an amicable solution to any disagreements or disputes +that may arise during the performance of the Agreement. + +13.2 Failing an amicable solution within two (2) months as from their +occurrence, and unless emergency proceedings are necessary, the +disagreements or disputes shall be referred to the Paris Courts having +jurisdiction, by the more diligent Party. + + +Version 1.0 dated 2006-09-05. diff --git a/docs/man-openmpi/index.rst b/docs/man-openmpi/index.rst new file mode 100644 index 00000000000..2291fea87b8 --- /dev/null +++ b/docs/man-openmpi/index.rst @@ -0,0 +1,9 @@ +Open MPI manual pages +===================== + +.. toctree:: + :maxdepth: 1 + + man1/index + man3/index + man5/index diff --git a/docs/man-openmpi/man1/index.rst b/docs/man-openmpi/man1/index.rst new file mode 100644 index 00000000000..f9afaa10d5c --- /dev/null +++ b/docs/man-openmpi/man1/index.rst @@ -0,0 +1,11 @@ +Commands (section 1) +==================== + +.. toctree:: + :maxdepth: 1 + + ompi-wrapper-compiler.1.rst + mpirun.1.rst + mpisync.1.rst + ompi_info.1.rst + opal_wrapper.1.rst diff --git a/docs/man-openmpi/man1/mpic++.1 b/docs/man-openmpi/man1/mpic++.1 new file mode 100644 index 00000000000..7b464ffa102 --- /dev/null +++ b/docs/man-openmpi/man1/mpic++.1 @@ -0,0 +1 @@ +.so man1/ompi-wrapper-compiler.1 diff --git a/docs/man-openmpi/man1/mpicc.1 b/docs/man-openmpi/man1/mpicc.1 new file mode 100644 index 00000000000..7b464ffa102 --- /dev/null +++ b/docs/man-openmpi/man1/mpicc.1 @@ -0,0 +1 @@ +.so man1/ompi-wrapper-compiler.1 diff --git a/docs/man-openmpi/man1/mpicxx.1 b/docs/man-openmpi/man1/mpicxx.1 new file mode 100644 index 00000000000..7b464ffa102 --- /dev/null +++ b/docs/man-openmpi/man1/mpicxx.1 @@ -0,0 +1 @@ +.so man1/ompi-wrapper-compiler.1 diff --git a/docs/man-openmpi/man1/mpif77.1 b/docs/man-openmpi/man1/mpif77.1 new file mode 100644 index 00000000000..7b464ffa102 --- /dev/null +++ b/docs/man-openmpi/man1/mpif77.1 @@ -0,0 +1 @@ +.so man1/ompi-wrapper-compiler.1 diff --git a/docs/man-openmpi/man1/mpif90.1 b/docs/man-openmpi/man1/mpif90.1 new file mode 100644 index 00000000000..7b464ffa102 --- /dev/null +++ b/docs/man-openmpi/man1/mpif90.1 @@ -0,0 +1 @@ +.so man1/ompi-wrapper-compiler.1 diff --git a/docs/man-openmpi/man1/mpifort.1 b/docs/man-openmpi/man1/mpifort.1 new file mode 100644 index 00000000000..7b464ffa102 --- /dev/null +++ b/docs/man-openmpi/man1/mpifort.1 @@ -0,0 +1 @@ +.so man1/ompi-wrapper-compiler.1 diff --git a/docs/man-openmpi/man1/mpijavac.1 b/docs/man-openmpi/man1/mpijavac.1 new file mode 100644 index 00000000000..7b464ffa102 --- /dev/null +++ b/docs/man-openmpi/man1/mpijavac.1 @@ -0,0 +1 @@ +.so man1/ompi-wrapper-compiler.1 diff --git a/docs/man-openmpi/man1/mpirun.1.rst b/docs/man-openmpi/man1/mpirun.1.rst new file mode 100644 index 00000000000..f68ef72d52d --- /dev/null +++ b/docs/man-openmpi/man1/mpirun.1.rst @@ -0,0 +1,1578 @@ +.. _man1-mpirun: +.. _man1-mpiexec: + + +mpirun / mpiexec +================ + +.. include_body + +mpirun, mpiexec |mdash| Execute serial and parallel jobs in Open MPI. + +.. note:: ``mpirun`` and ``mpiexec`` are all synonyms for each other. + Using either of the names will produce the same behavior. + +SYNOPSIS +-------- + +Single Process Multiple Data (SPMD) Model: + +.. code:: + + mpirun [ options ] [ ] + +Multiple Instruction Multiple Data (MIMD) Model: + +.. code:: + + mpirun [ global_options ] + [ local_options1 ] [ ] : + [ local_options2 ] [ ] : + ... : + [ local_optionsN ] [ ] + +Note that in both models, invoking ``mpirun`` via an absolute path +name is equivalent to specifying the ``--prefix`` option with a +```` value equivalent to the directory where ``mpirun`` resides, +minus its last subdirectory. For example: + +.. code:: sh + + shell$ /usr/local/bin/mpirun ... + +is equivalent to + +.. code:: sh + + shell$ mpirun --prefix /usr/local + +QUICK SUMMARY +------------- + +If you are simply looking for how to run an MPI application, you +probably want to use a command line of the following form: + +.. code:: sh + + shell$ mpirun [ -np X ] [ --hostfile ] + +This will run X copies of in your current run-time +environment (if running under a supported resource manager, Open MPI's +mpirun will usually automatically use the corresponding resource +manager process starter, as opposed to, for example, rsh or ssh, which +require the use of a hostfile, or will default to running all X copies +on the localhost), scheduling (by default) in a round-robin fashion by +CPU slot. See the rest of this page for more details. + +Please note that mpirun automatically binds processes as of the start +of the v1.8 series. Three binding patterns are used in the absence of +any further directives: + +* Bind to core: when the number of processes is <= 2 +* Bind to socket: when the number of processes is > 2 +* Bind to none: when oversubscribed + +If your application uses threads, then you probably want to ensure +that you are either not bound at all (by specifying --bind-to none), +or bound to multiple cores using an appropriate binding level or +specific number of processing elements per application process. + +.. _man1-mpirun-definition-of-slot: + +DEFINITION OF 'SLOT' +-------------------- + +The term "slot" is used extensively in the rest of this manual page. +A slot is an allocation unit for a process. The number of slots on a +node indicate how many processes can potentially execute on that node. +By default, Open MPI will allow one process per slot. + +If Open MPI is not explicitly told how many slots are available on a +node (e.g., if a hostfile is used and the number of slots is not +specified for a given node), it will determine a maximum number of +slots for that node in one of two ways: + +#. Default behavior: By default, Open MPI will attempt to discover the + number of processor cores on the node, and use that as the number + of slots available. + +#. When ``--use-hwthread-cpus`` is used: If ``--use-hwthread-cpus`` is + specified on the ``mpirun`` command line, then Open MPI will attempt to + discover the number of hardware threads on the node, and use that + as the number of slots available. + +This default behavior also occurs when specifying the ``--host`` +option with a single host. Thus, the command: + +.. code:: sh + + shell$ mpirun --host node1 ./a.out + +launches a number of processes equal to the number of cores on node +``node1``, whereas: + +.. code:: sh + + shell$ mpirun --host node1 --use-hwthread-cpus ./a.out + +launches a number of processes equal to the number of hardware +threads on ``node1``. + +When Open MPI applications are invoked in an environment managed by a +resource manager (e.g., inside of a SLURM job), and Open MPI was built +with appropriate support for that resource manager, then Open MPI will +be informed of the number of slots for each node by the resource +manager. For example: + +.. code:: sh + + shell$ mpirun ./a.out + +launches one process for every slot (on every node) as dictated by +the resource manager job specification. + +Also note that the one-process-per-slot restriction can be overridden +in unmanaged environments (e.g., when using hostfiles without a +resource manager) if oversubscription is enabled (by default, it is +disabled). Most MPI applications and HPC environments do not +oversubscribe; for simplicity, the majority of this documentation +assumes that oversubscription is not enabled. + +Slots are not hardware resources +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Slots are frequently incorrectly conflated with hardware resources. +It is important to realize that slots are an entirely different metric +than the number (and type) of hardware resources available. + +Here are some examples that may help illustrate the difference: + +#. More processor cores than slots: Consider a resource manager job + environment that tells Open MPI that there is a single node with 20 + processor cores and 2 slots available. By default, Open MPI will + only let you run up to 2 processes. + + Meaning: you run out of slots long before you run out of processor + cores. + +#. More slots than processor cores: Consider a hostfile with a single + node listed with a ``slots=50`` qualification. The node has 20 + processor cores. By default, Open MPI will let you run up to 50 + processes. + + Meaning: you can run many more processes than you have processor + cores. + +.. _man1-mpirun-definition-of-processor-element: + +DEFINITION OF 'PROCESSOR ELEMENT' +--------------------------------- + +By default, Open MPI defines that a "processing element" is a +processor core. However, if ``--use-hwthread-cpus`` is specified on the +mpirun command line, then a "processing element" is a hardware thread. + +OPTIONS +------- + +mpirun will send the name of the directory where it was invoked on the +local node to each of the remote nodes, and attempt to change to that +directory. See the "Current Working Directory" section below for +further details. + +* ````: The program executable. This is identified as the + first non-recognized argument to mpirun. + +* ````: Pass these run-time arguments to every new process. + These must always be the last arguments to mpirun. If an app context + file is used, ```` will be ignored. + +* ``-h``, ``--help``: Display help for this command + +* ``-q``, ``--quiet``: Suppress informative messages from orterun + during application execution. + +* ``-v``, ``--verbose``:` Be verbose + +* ``-V``, ``--version``: Print version number. If no other arguments + are given, this will also cause orterun to exit. + +* ``-N ``: Launch num processes per node on all allocated nodes + (synonym for ``--npernode``). + +* ``--display-map``: Display a table showing the mapped location of + each process prior to launch. + +* ``--display-allocation``: Display the detected resource allocation. + +* ``--output-proctable``: Output the debugger proctable after launch. + +* ``--dvm``: Create a persistent distributed virtual machine (DVM). + +* ``--max-vm-size ``: Number of processes to run. + +* ``--novm``: Execute without creating an allocation-spanning virtual + machine (only start daemons on nodes hosting application procs). + +* ``--hnp ``: Specify the URI of the Head Node Process (HNP), or + the name of the file (specified as ``file:filename``) that contains + that info. + +Use one of the following options to specify which hosts (nodes) of the +cluster to run on. Note that as of the start of the v1.8 release, +mpirun will launch a daemon onto each host in the allocation (as +modified by the following options) at the very beginning of execution, +regardless of whether or not application processes will eventually be +mapped to execute there. This is done to allow collection of hardware +topology information from the remote nodes, thus allowing us to map +processes against known topology. However, it is a change from the +behavior in prior releases where daemons were only launched after +mapping was complete, and thus only occurred on nodes where +application processes would actually be executing. + +* ``-H``, ``--host ``: ist of hosts on which to + invoke processes. + +* ``--hostfile ``: Provide a hostfile to use. + +* ``--default-hostfile ``: Provide a default hostfile. + +* ``--machinefile ``: Synonym for ``--hostfile``. + +* ``--cpu-set ``: Restrict launched processes to the specified + logical CPUs on each node (comma-separated list). Note that the + binding options will still apply within the specified envelope + |mdash| e.g., you can elect to bind each process to only one CPU + within the specified CPU set. + +The following options specify the number of processes to launch. Note +that none of the options imply a particular binding policy |mdash| e.g., +requesting N processes for each socket does not imply that the +processes will be bound to the socket. + +* ``-c``, ``-n``, ``--n``, ``-np <#>``: Run this many copies of the + program on the given nodes. This option indicates that the + specified file is an executable program and not an application + context. If no value is provided for the number of copies to execute + (i.e., neither the ``-np`` nor its synonyms are provided on the + command line), Open MPI will automatically execute a copy of the + program on each process slot (see below for description of a + "process slot"). This feature, however, can only be used in the SPMD + model and will return an error (without beginning execution of the + application) otherwise. + +* ``--map-by ppr:N:``: Launch N times the number of objects of + the specified type on each node. + +* ``--npersocket <#persocket>``: On each node, launch this many + processes times the number of processor sockets on the node. + The -npersocket option also turns on the ``--bind-to-socket`` + option. (deprecated in favor of ``--map-by ppr:n:socket``) + +* ``--npernode <#pernode>``: On each node, launch this many processes. + (deprecated in favor of ``--map-by ppr:n:node``). + +* ``--pernode``: On each node, launch one process |mdash| equivalent to + ``--npernode 1``. (deprecated in favor of ``--map-by ppr:1:node``) + +To map processes: + +* ``--map-by ``: Map to the specified object, defaults to + socket. Supported options include ``slot``, ``hwthread``, ``core``, + ``L1cache``, ``L2cache``, ``L3cache``, ``socket``, ``numa``, + ``board``, ``node``, ``sequential``, ``distance``, and ``ppr``. Any + object can include modifiers by adding a ``:`` and any combination + of ``PE=n`` (bind n processing elements to each proc), ``SPAN`` + (load balance the processes across the allocation), + ``OVERSUBSCRIBE`` (allow more processes on a node than processing + elements), and ``NOOVERSUBSCRIBE``. This includes ``PPR``, where + the pattern would be terminated by another colon to separate it from + the modifiers. + +* ``--bycore``: Map processes by core (deprecated in favor of + ``--map-by core``) + +* ``--byslot``: Map and rank processes round-robin by slot. + +* ``--nolocal``: Do not run any copies of the launched application on + the same node as orterun is running. This option will override + listing the localhost with ``--host`` or any other host-specifying + mechanism. + +* ``--nooversubscribe``: Do not oversubscribe any nodes; error + (without starting any processes) if the requested number of + processes would cause oversubscription. This option implicitly sets + "max_slots" equal to the "slots" value for each node. (Enabled by + default). + +* ``--oversubscribe``: Nodes are allowed to be oversubscribed, even on + a managed system, and overloading of processing elements. + +* ``--bynode``: Launch processes one per node, cycling by node in a + round-robin fashion. This spreads processes evenly among nodes and + assigns MPI_COMM_WORLD ranks in a round-robin, "by node" manner. + +* ``--cpu-list ``: Comma-delimited list of processor IDs to + which to bind processes [default=NULL]. Processor IDs are + interpreted as hwloc logical core IDs. + + .. note:: You can run Run the hwloc ``lstopo(1)`` command to see a + list of available cores and their logical IDs. + +To order processes' ranks in MPI_COMM_WORLD: + +* ``--rank-by ``: Rank in round-robin fashion according to the + specified object, defaults to slot. Supported options include + ``slot``, ``hwthread``, ``core``, ``L1cache``, ``L2cache``, + ``L3cache``, ``socket``, ``numa``, ``board``, and ``node``. + +For process binding: + +* ``--bind-to ``: Bind processes to the specified object, + defaults to ``core``. Supported options include ``slot``, + ``hwthread``, ``core``, ``l1cache``, ``l2cache``, ``l3cache``, + ``socket``, ``numa``, ``board``, ``cpu-list``, and ``none``. + +* ``--cpus-per-proc <#perproc>``: Bind each process to the specified + number of cpus. (deprecated in favor of ``--map-by :PE=n``) + +* ``--cpus-per-rank <#perrank>``: Alias for ``--cpus-per-proc``. + (deprecated in favor of ``--map-by :PE=n``) + +* ``--bind-to-core`` Bind processes to cores (deprecated in favor of + ``--bind-to core``) + +* ``--bind-to-socket``: Bind processes to processor sockets + (deprecated in favor of ``--bind-to socket``) + +* ``--report-bindings``: Report any bindings for launched processes. + +For rankfiles: + +* ``--rankfile ``: Provide a rankfile file. + +To manage standard I/O: + +* ``--output-filename ``: Redirect the stdout, stderr, and + stddiag of all processes to a process-unique version of the + specified filename. Any directories in the filename will + automatically be created. Each output file will consist of + ``filename.id``, where the ``id`` will be the processes' rank in + MPI_COMM_WORLD, left-filled with zero's for correct ordering in + listings. A relative path value will be converted to an absolute + path based on the cwd where mpirun is executed. Note that this will + not work on environments where the file system on compute nodes + differs from that where :ref:`mpirun(1) ` is + executed. + +* ``--stdin ``: The MPI_COMM_WORLD rank of the process that is + to receive stdin. The default is to forward stdin to MPI_COMM_WORLD + rank 0, but this option can be used to forward stdin to any + process. It is also acceptable to specify none, indicating that no + processes are to receive stdin. + +* ``--merge-stderr-to-stdout``: Merge stderr to stdout for each + process. + +* ``--tag-output``: Tag each line of output to stdout, stderr, and + stddiag with ``[jobid, MCW_rank]`` indicating the process + jobid and MPI_COMM_WORLD rank of the process that generated the + output, and the channel which generated it. + +* ``--timestamp-output``: Timestamp each line of output to stdout, + stderr, and stddiag. + +* ``--xml``: Provide all output to stdout, stderr, and stddiag in an + XML format. + +* ``--xml-file `` Provide all output in XML format to the + specified file. + +* ``--xterm ``: Display the output from the processes + identified by their MPI_COMM_WORLD ranks in separate xterm + windows. The ranks are specified as a comma-separated list of + ranges, with a -1 indicating all. A separate window will be created + for each specified process. + + .. note:: xterm will normally terminate the window upon termination + of the process running within it. However, by adding a + ``!`` to the end of the list of specified ranks, the + proper options will be provided to ensure that xterm keeps + the window open after the process terminates, thus + allowing you to see the process' output. Each xterm + window will subsequently need to be manually closed. + Note: In some environments, xterm may require that the + executable be in the user's path, or be specified in + absolute or relative terms. Thus, it may be necessary to + specify a local executable as ``./my_mpi_app`` instead of just + ``my_mpi_app``. If xterm fails to find the executable, ``mpirun`` + will hang, but still respond correctly to a ctrl-C. If + this happens, please check that the executable is being + specified correctly and try again. + +To manage files and runtime environment: + +* ``--path ``: ```` that will be used when attempting to + locate the requested executables. This is used prior to using the + local ``PATH`` environment variable setting. + +* ``--prefix ``: Prefix directory that will be used to set the + ``PATH`` and ``LD_LIBRARY_PATH`` on the remote node before invoking + Open MPI or the target process. See the :ref:`Remote Execution + ` section, below. + +* ``--noprefix``: Disable the automatic ``--prefix`` behavior + +* ``--preload-binary``: Copy the specified executable(s) to remote + machines prior to starting remote processes. The executables will be + copied to the Open MPI session directory and will be deleted upon + completion of the job. + +* ``--preload-files ``: Preload the comma-separated list of + files to the current working directory of the remote machines where + processes will be launched prior to starting those processes. + +* ``--set-cwd-to-session-dir``: Set the working directory of the + started processes to their session directory. + +* ``--wd ``: Synonym for ``-wdir``. + +* ``--wdir ``: Change to the directory ```` before the + user's program executes. See the :ref:`Current Working Directory + ` section for notes on + relative paths. Note: If the ``--wdir`` option appears both on the + command line and in an application context, the context will take + precedence over the command line. Thus, if the path to the desired + wdir is different on the backend nodes, then it must be specified as + an absolute path that is correct for the backend node. + +* ``-x ``: Export the specified environment variables to the + remote nodes before executing the program. Only one environment + variable can be specified per ``-x`` option. Existing environment + variables can be specified or new variable names specified with + corresponding values. For example: + + .. code:: sh + + shell$ mpirun -x DISPLAY -x OFILE=/tmp/out ... + + The parser for the ``-x`` option is not very sophisticated; it does + not even understand quoted values. Users are advised to set + variables in the environment, and then use ``-x`` to export (not + define) them. + +Setting MCA parameters: + +* ``--gmca ``: Pass global MCA parameters that are + applicable to all contexts. ```` is the parameter name; + ```` is the parameter value. + +* ``--mca ``: Send arguments to various MCA modules. See + the :ref:`Setting MCA Parameters + ` section for mode details. + +* ``--am ``: Aggregate MCA parameter set file list. + +* ``--tune ``: Specify a tune file to set arguments for + various MCA modules and environment variables. See the :ref:` + Setting MCA parameters and environment variables from file + ` + +For debugging: + +* ``--debug``: Invoke the user-level debugger indicated by the + ``orte_base_user_debugger`` MCA parameter. + +* ``--get-stack-traces``: When paired with the ``--timeout`` option, + ``mpirun`` will obtain and print out stack traces from all launched + processes that are still alive when the timeout expires. Note that + obtaining stack traces can take a little time and produce a lot of + output, especially for large process-count jobs. + +* ``--debugger ``: Sequence of debuggers to search for when + ``--debug`` is used (i.e., a synonym for the + ``orte_base_user_debugger`` MCA parameter). + +* ``--timeout ``: The maximum number of seconds that + ``mpirun`` will run. After this many seconds, ``mpirun`` will abort + the launched job and exit with a non-zero exit status. Using + ``--timeout`` can be also useful when combined with the + ``--get-stack-traces`` option. + +* ``--tv``: Launch processes under a debugger. Deprecated backwards + compatibility flag. Synonym for ``--debug``. + +There are also other options: + +* ``--allow-run-as-root``: Allow ``mpirun`` to run when executed by + the root user (``mpirun`` defaults to aborting when launched as the + root user). Be sure to see the :ref:`Running as root + ` section for more detail. + +* ``--app ``: Provide an appfile, ignoring all other command + line options. + +* ``--cartofile ``: Provide a cartography file. + +* ``--continuous``: Job is to run until explicitly terminated. + +* ``--disable-recovery``: Disable recovery (resets all recovery + options to off). + +* ``--do-not-launch``: Perform all necessary operations to prepare to + launch the application, but do not actually launch it. + +* ``--do-not-resolve``: Do not attempt to resolve interfaces. + +* ``--enable-recovery``: Enable recovery from process failure (default: + disabled) + +* ``--index-argv-by-rank``: Uniquely index argv[0] for each process + using its rank. + +* ``--leave-session-attached``: Do not detach back-end daemons used by + this application. This allows error messages from the daemons as + well as the underlying environment (e.g., when failing to launch a + daemon) to be output. + +* ``--max-restarts ``: Max number of times to restart a failed + process. + +* ``--ompi-server ``: Specify the URI of the Open MPI + server (or the mpirun to be used as the server), the name of the + file (specified as ``file:filename``) that contains that info, or + the PID (specified as ``pid:#``) of the mpirun to be used as the + server. The Open MPI server is used to support multi-application + data exchange via the :ref:`MPI_Publish_name(3) ` + and :ref:`MPI_Lookup_name(3) ` functions. + +* ``--personality ``: Comma-separated list of programming model, + languages, and containers being used (default=``ompi``). + +* ``--ppr ``: Comma-separated list of number of processes on a + given resource type (default: none). + +* ``--report-child-jobs-separately``: Return the exit status of the + primary job only. + +* ``--report-events ``: Report events to a tool listening at the + specified URI. + +* ``--report-pid ``: Print out ``mpirun``'s PID during + startup. The channel must be either a ``-`` to indicate that the PID + is to be output to stdout, a ``+`` to indicate that the PID is to be + output to stderr, or a filename to which the PID is to be written. + +* ``--report-uri ``: Print out ``mpirun``'s URI during + startup. The channel must be either a ``-`` to indicate that the URI + is to be output to stdout, a ``+`` to indicate that the URI is to be + output to stderr, or a filename to which the URI is to be written. + +* ``--show-progress``: Output a brief periodic report on launch + progress. + +* ``--terminate``: Terminate the DVM. + +* ``--use-hwthread-cpus``: Use hardware threads as independent CPUs. + + Note that if a number of slots is not provided to Open MPI (e.g., + via the ``slots`` keyword in a hostfile or from a resource manager + such as SLURM), the use of this option changes the default + calculation of number of slots on a node. See the :ref:`DEFINITION + OF 'SLOT' ` section. + + Also note that the use of this option changes the Open MPI's + definition of a "processor element" from a processor core to a + hardware thread. See the :ref:`DEFINITION OF 'PROCESSOR ELEMENT' + ` section. + +* ``--use-regexp``: Use regular expressions for launch. + +The following options are useful for developers; they are not +generally useful to most Open MPI users: + +* ``-d``, ``--debug-devel``: Enable debugging of the back-end run-time + system. This is not generally useful for most users. + +* ``--debug-daemons``: Enable debugging of the run-time daemons used + by this application. + +* ``--debug-daemons-file``: Enable debugging of the run-time daemons + used by this application, storing output in files. + +* ``--display-devel-allocation``: + Display a detailed list of the allocation being used by this job. + +* ``--display-devel-map``: Display a more detailed table showing the + mapped location of each process prior to launch. + +* ``--display-diffable-map``: Display a diffable process map just + before launch. + +* ``--display-topo``: Display the topology as part of the process map + just before launch. + +* ``--launch-agent``: Name of the executable that is to be used to + start processes on the remote nodes. The default is ``prted``. This + option can be used to test new daemon concepts, or to pass options + back to the daemons without having mpirun itself see them. For + example, specifying a launch agent of ``prted -mca odls_base_verbose + 5`` allows the developer to ask the ``prted`` for debugging output + without clutter from ``mpirun`` itself. + +* ``--report-state-on-timeout``: When paired with the ``--timeout`` + command line option, report the run-time subsystem state of each + process when the timeout expires. + +There may be other options listed with ``mpirun --help``. + +Environment Variables +^^^^^^^^^^^^^^^^^^^^^ + +* ``MPIEXEC_TIMEOUT``: Synonym for the ``--timeout`` command line option. + +DESCRIPTION +----------- + +One invocation of ``mpirun`` starts an MPI application running under Open +MPI. If the application is single process multiple data (SPMD), the +application can be specified on the ``mpirun`` command line. + +If the application is multiple instruction multiple data (MIMD), +comprising of multiple programs, the set of programs and argument can +be specified in one of two ways: Extended Command Line Arguments, and +Application Context. + +An application context describes the MIMD program set including all +arguments in a separate file. This file essentially contains multiple +mpirun command lines, less the command name itself. The ability to +specify different options for different instantiations of a program is +another reason to use an application context. + +Extended command line arguments allow for the description of the +application layout on the command line using colons (``:``) to +separate the specification of programs and arguments. Some options are +globally set across all specified programs (e.g., ``--hostfile``), +while others are specific to a single program (e.g., ``-np``). + +Specifying Host Nodes +^^^^^^^^^^^^^^^^^^^^^ + +Host nodes can be identified on the ``mpirun`` command line with the +``--host`` option or in a hostfile. + +For example: + +.. code:: sh + + shell$ mpirun -H aa,aa,bb ./a.out + +Launches two processes on node ``aa`` and one on ``bb``. + +Or, consider the hostfile: + +.. code:: sh + + shell$ cat myhostfile + aa slots=2 + bb slots=2 + cc slots=2 + +Here, we list both the host names (``aa``, ``bb``, and ``cc``) but +also how many slots there are for each. + +.. code:: sh + + shell$ mpirun --hostfile myhostfile ./a.out + +will launch two processes on each of the three nodes. + +.. code:: sh + + shell$ mpirun --hostfile myhostfile --host aa ./a.out + +will launch two processes, both on node ``aa``. + +.. code:: sh + + shell$ mpirun --hostfile myhostfile --host dd ./a.out + +will find no hosts to run on and will abort with an error. That is, +the specified host ``dd`` is not in the specified hostfile. + +When running under resource managers (e.g., SLURM, Torque, etc.), Open +MPI will obtain both the hostnames and the number of slots directly +from the resource manger. + +Specifying Number of Processes +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +As we have just seen, the number of processes to run can be set using the +hostfile. Other mechanisms exist. + +The number of processes launched can be specified as a multiple of the +number of nodes or processor sockets available. For example, + +.. code:: sh + + shell$ mpirun -H aa,bb --npersocket 2 ./a.out + +launches processes 0-3 on node ``aa`` and process 4-7 on node ``bb`` +(assuming ``aa`` and ``bb`` both contain 4 slots each). The +``--npersocket`` option also turns on the ``--bind-to-socket`` option, +which is discussed in a later section. + +.. code:: sh + + shell$ mpirun -H aa,bb --npernode 2 ./a.out + +launches processes 0-1 on node ``aa`` and processes 2-3 on node ``bb``. + +.. code:: sh + + shell$ mpirun -H aa,bb --npernode 1 ./a.out + +launches one process per host node. + +.. code:: sh + + mpirun -H aa,bb --pernode ./a.out + +is the same as ``--npernode 1``. + +Another alternative is to specify the number of processes with the ``-np`` +option. Consider now the hostfile: + +.. code:: sh + + shell$ cat myhostfile + aa slots=4 + bb slots=4 + cc slots=4 + +Now run with ``myhostfile``: + +.. code:: sh + + shell$ mpirun --hostfile myhostfile -np 6 ./a.out + +will launch processes 0-3 on node ``aa`` and processes 4-5 on node +``bb``. The remaining slots in the hostfile will not be used since +the ``-np`` option indicated that only 6 processes should be launched. + +Mapping Processes to Nodes: Using Policies +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +The examples above illustrate the default mapping of process processes +to nodes. This mapping can also be controlled with various ``mpirun`` +options that describe mapping policies. + +Consider the same hostfile as above, again with ``-np 6``. The table +below lists a few ``mpirun`` variations, and shows which +MPI_COMM_WORLD ranks end up on which node: + +.. list-table:: + :header-rows: 1 + + * - Command + - Node ``aa`` + - Node ``bb`` + - Node ``cc`` + + * - ``mpirun`` + - 0 1 2 3 + - 4 5 + - + + * - ``mpirun --map-by node`` + - 0 3 + - 1 4 + - 2 5 + + * - ``mpirun --nolocal`` + - + - 0 1 2 3 + - 4 5 + +The ``--map-by node`` option will load balance the processes across the +available nodes, numbering each process in a round-robin fashion. + +The ``--nolocal`` option prevents any processes from being mapped onto +the local host (in this case node ``aa``). While ``mpirun`` typically +consumes few system resources, ``--nolocal`` can be helpful for +launching very large jobs where mpirun may actually need to use +noticeable amounts of memory and/or processing time. + +Just as ``-np`` can specify fewer processes than there are slots, it +can also oversubscribe the slots. For example, with the same +hostfile: + +.. code:: sh + + shell$ mpirun --hostfile myhostfile -np 14 ./a.out + +will launch processes 0-3 on node ``aa``, 4-7 on ``bb``, and 8-11 on +``cc``. It will then add the remaining two processes to whichever +nodes it chooses. + +One can also specify limits to oversubscription. For example, with the +same hostfile: + +.. code:: sh + + shell$ mpirun --hostfile myhostfile -np 14 --nooversubscribe ./a.out + +will produce an error since ``--nooversubscribe`` prevents +oversubscription. + +Limits to oversubscription can also be specified in the hostfile +itself: + +.. code:: sh + + shell$ cat myhostfile + aa slots=4 max_slots=4 + bb max_slots=4 + cc slots=4 + +The ``max_slots`` field specifies such a limit. When it does, the slots +value defaults to the limit. Now: + +.. code:: sh + + shell$ mpirun --hostfile myhostfile -np 14 ./a.out + +causes the first 12 processes to be launched as before, but the +remaining two processes will be forced onto node ``cc``. The other +two nodes are protected by the hostfile against oversubscription by +this job. + +Using the ``--nooversubscribe`` option can be helpful since Open MPI +currently does not get ``max_slots`` values from the resource manager. + +Of course, ``-np`` can also be used with the ``-H`` or ``-host`` +option. For example: + +.. code:: sh + + shell$ mpirun -H aa,bb -np 8 ./a.out + +launches 8 processes. Since only two hosts are specified, after the +first two processes are mapped, one to ``aa`` and one to ``bb``, the +remaining processes oversubscribe the specified hosts. + +And here is a MIMD example: + +.. code:: sh + + shell$ mpirun -H aa -np 1 hostname : -H bb,cc -np 2 uptime + +will launch process 0 running hostname on node ``aa`` and processes 1 +and 2 each running uptime on nodes ``bb`` and ``cc``, respectively. + +Mapping, Ranking, and Binding: Oh My! +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Open MPI employs a three-phase procedure for assigning process locations +and ranks: + +#. Mapping: Assigns a default location to each process +#. Ranking: Assigns an MPI_COMM_WORLD rank value to each process +#. Binding: Constrains each process to run on specific processors + +The mapping step is used to assign a default location to each process +based on the mapper being employed. Mapping by slot, node, and +sequentially results in the assignment of the processes to the node +level. In contrast, mapping by object, allows the mapper to assign the +process to an actual object on each node. + +Note that the location assigned to the process is independent of where +it will be bound |mdash| the assignment is used solely as input to the +binding algorithm. + +The mapping of process processes to nodes can be defined not just with +general policies but also, if necessary, using arbitrary mappings that +cannot be described by a simple policy. One can use the "sequential +mapper," which reads the hostfile line by line, assigning processes to +nodes in whatever order the hostfile specifies. Use the ``--mca rmaps +seq`` option. For example, using the same hostfile as before: + +.. code:: sh + + shell$ mpirun -hostfile myhostfile -mca rmaps seq ./a.out + +will launch three processes, one on each of nodes aa, bb, and cc, +respectively. The slot counts don't matter; one process is launched +per line on whatever node is listed on the line. + +Another way to specify arbitrary mappings is with a rankfile, which +gives you detailed control over process binding as well. Rankfiles +are discussed below. + +The second phase focuses on the ranking of the process within the +job's MPI_COMM_WORLD. Open MPI separates this from the mapping +procedure to allow more flexibility in the relative placement of MPI +processes. This is best illustrated by considering the following two +cases where we used the ``map-by ppr:2:socket`` option: + +.. list-table:: + :header-rows: 1 + + * - Option + - Node ``aa`` + - Node ``bb`` + + * - ``--rank-by core`` + - 0 1 | 2 3 + - 4 5 | 6 7 + + * - ``--rank-by socket`` + - 0 2 | 1 3 + - 4 6 | 5 7 + + * - ``--rank-by socket:span`` + - 0 4 | 1 5 + - 2 6 | 3 7 + +Ranking by core and by slot provide the identical result |mdash| a +simple progression of MPI_COMM_WORLD ranks across each node. Ranking +by socket does a round-robin ranking within each node until all +processes have been assigned an MCW rank, and then progresses to the +next node. Adding the ``span`` modifier to the ranking directive causes +the ranking algorithm to treat the entire allocation as a single +entity |mdash| thus, the MCW ranks are assigned across all sockets +before circling back around to the beginning. + +The binding phase actually binds each process to a given set of +processors. This can improve performance if the operating system is +placing processes suboptimally. For example, it might oversubscribe +some multi-core processor sockets, leaving other sockets idle; this +can lead processes to contend unnecessarily for common resources. Or, +it might spread processes out too widely; this can be suboptimal if +application performance is sensitive to interprocess communication +costs. Binding can also keep the operating system from migrating +processes excessively, regardless of how optimally those processes +were placed to begin with. + +The processors to be used for binding can be identified in terms of +topological groupings |mdash| e.g., binding to an ``l3cache`` will +bind each process to all processors within the scope of a single L3 +cache within their assigned location. Thus, if a process is assigned +by the mapper to a certain socket, then a ``--bind-to l3cache`` +directive will cause the process to be bound to the processors that +share a single L3 cache within that socket. + +Alternatively, processes can be assigned to processors based on their +local rank on a node using the ``--bind-to cpu-list:ordered`` option with +an associated ``--cpu-list 0,2,5``. In this example, the first process +on a node will be bound to CPU 0, the second process on the node will +be bound to CPU 2, and the third process on the node will be bound to +CPU 5. + +``--bind-to`` will also accept ``cpulist:ordered`` as a synonym to +``cpu-list:ordered``. Note that an error will result if more +processes are assigned to a node than CPUs are provided. + +To help balance loads, the binding directive uses a round-robin method +when binding to levels lower than used in the mapper. For example, +consider the case where a job is mapped to the socket level, and then +bound to core. Each socket will have multiple cores, so if multiple +processes are mapped to a given socket, the binding algorithm will +assign each process located to a socket to a unique core in a +round-robin manner. + +Alternatively, processes mapped by ``l2cache`` and then bound to socket +will simply be bound to all the processors in the socket where they +are located. In this manner, users can exert detailed control over +relative MCW rank location and binding. + +Finally, ``--report-bindings`` can be used to report bindings. + +As an example, consider a node with two processor sockets, each +comprised of four cores, and each of those cores contains one hardware +thread. We run mpirun with ``-np 4 --report-bindings`` and the +following additional options: + +.. code:: + + shell$ mpirun ... --map-by core --bind-to core + [...] ... binding child [...,0] to cpus 0001 + [...] ... binding child [...,1] to cpus 0002 + [...] ... binding child [...,2] to cpus 0004 + [...] ... binding child [...,3] to cpus 0008 + + shell$ mpirun ... --map-by socket --bind-to socket + [...] ... binding child [...,0] to socket 0 cpus 000f + [...] ... binding child [...,1] to socket 1 cpus 00f0 + [...] ... binding child [...,2] to socket 0 cpus 000f + [...] ... binding child [...,3] to socket 1 cpus 00f0 + + shell$ mpirun ... --map-by slot:PE=2 --bind-to core + [...] ... binding child [...,0] to cpus 0003 + [...] ... binding child [...,1] to cpus 000c + [...] ... binding child [...,2] to cpus 0030 + [...] ... binding child [...,3] to cpus 00c0 + + shell$ mpirun ... --bind-to none + +.. error:: TODO Is this still right? Don't we show something more + user-friendly these days? + +Here, ``--report-bindings`` shows the binding of each process as a +mask. In the first case, the processes bind to successive cores as +indicated by the masks 0001, 0002, 0004, and 0008. In the second +case, processes bind to all cores on successive sockets as indicated +by the masks 000f and 00f0. The processes cycle through the processor +sockets in a round-robin fashion as many times as are needed. + +In the third case, the masks show us that 2 cores have been bound per +process. Specifically, the mapping by slot with the PE=2 qualifier +indicated that each slot (i.e., process) should consume two processor +elements. Since ``--use-hwthread-cpus`` was not specified, Open MPI +defined "processor element" as "core", and therefore the ``--bind-to +core`` caused each process to be bound to both of the cores to which +it was mapped. + +In the fourth case, binding is turned off and no bindings are reported. + +Open MPI's support for process binding depends on the underlying +operating system. Therefore, certain process binding options may not +be available on every system. + +Process binding can also be set with MCA parameters. Their usage is +less convenient than that of ``mpirun`` options. On the other hand, +MCA parameters can be set not only on the mpirun command line, but +alternatively in a system or user ``mca-params.conf`` file or as +environment variables, as described in the :ref:`Setting MCA +Parameters `. Some examples +include: + +.. list-table:: + :header-rows: 1 + + * - Option + - MCA parameter key + - Value + + * - ``--map-by core`` + - ``rmaps_base_mapping_policy`` + - ``core`` + + * - ``--map-by socket`` + - ``rmaps_base_mapping_policy`` + - ``socket`` + + * - ``--rank-by core`` + - ``rmaps_base_ranking_policy`` + - ``core`` + + * - ``--bind-to core`` + - ``hwloc_base_binding_policy`` + - ``core`` + + * - ``--bind-to socket`` + - ``hwloc_base_binding_policy`` + - ``socket`` + + * - ``--bind-to none`` + - ``hwloc_base_binding_policy`` + - ``none`` + +Rankfiles +^^^^^^^^^ + +Rankfiles are text files that specify detailed information about how +individual processes should be mapped to nodes, and to which +processor(s) they should be bound. Each line of a rankfile specifies +the location of one process (for MPI jobs, the process' "rank" refers +to its rank in MPI_COMM_WORLD). The general form of each line in the +rankfile is: + +.. code:: + + rank = slot= + +For example: + +.. code:: + + shell$ cat myrankfile + rank 0=aa slot=1:0-2 + rank 1=bb slot=0:0,1 + rank 2=cc slot=1-2 + shell$ mpirun -H aa,bb,cc,dd -rf myrankfile ./a.out + +Means that: + +* Rank 0 runs on node aa, bound to logical socket 1, cores 0-2. +* Rank 1 runs on node bb, bound to logical socket 0, cores 0 and 1. +* Rank 2 runs on node cc, bound to logical cores 1 and 2. + +Rankfiles can alternatively be used to specify physical processor +locations. In this case, the syntax is somewhat different. Sockets are +no longer recognized, and the slot number given must be the number of +the physical PU as most OS's do not assign a unique physical +identifier to each core in the node. Thus, a proper physical rankfile +looks something like the following: + +.. code:: + + shell$ cat myphysicalrankfile + rank 0=aa slot=1 + rank 1=bb slot=8 + rank 2=cc slot=6 + +This means that + +* Rank 0 will run on node aa, bound to the core that contains physical + PU 1 +* Rank 1 will run on node bb, bound to the core that contains physical + PU 8 +* Rank 2 will run on node cc, bound to the core that contains physical + PU 6 + +Rankfiles are treated as logical by default, and the MCA parameter +``rmaps_rank_file_physical`` must be set to 1 to indicate that the +rankfile is to be considered as physical. + +The hostnames listed above are "absolute," meaning that actual +resolveable hostnames are specified. However, hostnames can also be +specified as "relative," meaning that they are specified in relation +to an externally-specified list of hostnames (e.g., by ``mpirun``'s +``--host`` argument, a hostfile, or a job scheduler). + +The "relative" specification is of the form ``+n``, where X is an +integer specifying the Xth hostname in the set of all available +hostnames, indexed from 0. For example: + +.. code:: + + shell$ cat myrankfile + rank 0=+n0 slot=1:0-2 + rank 1=+n1 slot=0:0,1 + rank 2=+n2 slot=1-2 + shell$ mpirun -H aa,bb,cc,dd -rf myrankfile ./a.out + +All socket/core slot locations are specified as logical indexes. + +.. note:: The Open MPI v1.6 series used physical indexes. + +You can use tools such as Hwloc's `lstopo(1)` to find the logical +indexes of socket and cores. + +Application Context or Executable Program? +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +To distinguish the two different forms, mpirun looks on the command +line for ``--app`` option. If it is specified, then the file named on +the command line is assumed to be an application context. If it is +not specified, then the file is assumed to be an executable program. + +Locating Files +^^^^^^^^^^^^^^ + +If no relative or absolute path is specified for a file, Open MPI will +first look for files by searching the directories specified by the +``--path`` option. If there is no ``--path`` option set or if the +file is not found at the ``--path`` location, then Open MPI will +search the user's ``PATH`` environment variable as defined on the +source node(s). + +If a relative directory is specified, it must be relative to the +initial working directory determined by the specific starter used. For +example when using the ssh starter, the initial directory is ``$HOME`` +by default. Other starters may set the initial directory to the +current working directory from the invocation of ``mpirun``. + +.. _man1-mpirun-current-working-directory: + +Current Working Directory +^^^^^^^^^^^^^^^^^^^^^^^^^ + +The ``--wdir`` ``mpirun`` option (and its synonym, ``--wd``) allows +the user to change to an arbitrary directory before the program is +invoked. It can also be used in application context files to specify +working directories on specific nodes and/or for specific +applications. + +If the ``--wdir`` option appears both in a context file and on the +command line, the context file directory will override the command +line value. + +If the ``-wdir`` option is specified, Open MPI will attempt to change +to the specified directory on all of the remote nodes. If this fails, +``mpirun`` will abort. + +If the ``-wdir`` option is not specified, Open MPI will send the +directory name where ``mpirun`` was invoked to each of the remote +nodes. The remote nodes will try to change to that directory. If +they are unable (e.g., if the directory does not exist on that node), +then Open MPI will use the default directory determined by the +starter. + +All directory changing occurs before the user's program is invoked; it +does not wait until :ref:`MPI_INIT(3) ` is called. + +Standard I/O +^^^^^^^^^^^^ + +Open MPI directs UNIX standard input to ``/dev/null`` on all processes +except the MPI_COMM_WORLD rank 0 process. The MPI_COMM_WORLD rank 0 +process inherits standard input from ``mpirun``. + +.. note:: The node that invoked ``mpirun`` need not be the same as the + node where the MPI_COMM_WORLD rank 0 process resides. Open + MPI handles the redirection of ``mpirun``'s standard input + to the rank 0 process. + +Open MPI directs UNIX standard output and error from remote nodes to +the node that invoked ``mpirun`` and prints it on the standard +output/error of ``mpirun``. Local processes inherit the standard +output/error of ``mpirun`` and transfer to it directly. + +Thus it is possible to redirect standard I/O for Open MPI applications +by using the typical shell redirection procedure on ``mpirun``. For +example: + +.. code:: sh + + shell$ mpirun -np 2 my_app < my_input > my_output + +Note that in this example only the MPI_COMM_WORLD rank 0 process will +receive the stream from ``my_input`` on stdin. The stdin on all the other +nodes will be tied to ``/dev/null``. However, the stdout from all nodes +will be collected into the ``my_output`` file. + +Signal Propagation +^^^^^^^^^^^^^^^^^^ + +When ``mpirun`` receives a SIGTERM and SIGINT, it will attempt to kill +the entire job by sending all processes in the job a SIGTERM, waiting +a small number of seconds, then sending all processes in the job a +SIGKILL. + +SIGUSR1 and SIGUSR2 signals received by ``mpirun`` are propagated to all +processes in the job. + +A SIGTSTOP signal to ``mpirun`` will cause a SIGSTOP signal to be sent +to all of the programs started by ``mpirun`` and likewise a SIGCONT +signal to ``mpirun`` will cause a SIGCONT sent. + +Other signals are not currently propagated by ``mpirun``. + +Process Termination / Signal Handling +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +During the run of an MPI application, if any process dies abnormally +(either exiting before invoking :ref:`MPI_FINALIZE(3) `, +or dying as the result of a signal), ``mpirun`` will print out an +error message and kill the rest of the MPI application. + +User signal handlers should probably avoid trying to cleanup MPI state +(Open MPI is currently not async-signal-safe; see +:ref:`MPI_INIT_THREAD(3) ` for details about +MPI_THREAD_MULTIPLE and thread safety). For example, if a +segmentation fault occurs in :ref:`MPI_SEND(3) ` (perhaps +because a bad buffer was passed in) and a user signal handler is +invoked, if this user handler attempts to invoke :ref:`MPI_FINALIZE(3) +`, Bad Things could happen since Open MPI was already +"in" MPI when the error occurred. Since ``mpirun`` will notice that the +process died due to a signal, it is probably not necessary (and +safest) for the user to only clean up non-MPI state. + +Process Environment +^^^^^^^^^^^^^^^^^^^ + +Processes in the MPI application inherit their environment from the +Open RTE daemon upon the node on which they are running. The +environment is typically inherited from the user's shell. On remote +nodes, the exact environment is determined by the boot MCA module +used. The rsh launch module, for example, uses either rsh/ssh to +launch the Open RTE daemon on remote nodes, and typically executes one +or more of the user's shell-setup files before launching the Open RTE +daemon. When running dynamically linked applications which require +the ``LD_LIBRARY_PATH`` environment variable to be set, care must be +taken to ensure that it is correctly set when booting Open MPI. + +See the :ref:`Remote Execution ` section +for more details. + +.. _man1-mpirun-remote-execution: + +Remote Execution +^^^^^^^^^^^^^^^^ + +Open MPI requires that the ``PATH`` environment variable be set to +find executables on remote nodes (this is typically only necessary in +rsh- or ssh-based environments |mdash| batch/scheduled environments +typically copy the current environment to the execution of remote +jobs, so if the current environment has ``PATH`` and/or +``LD_LIBRARY_PATH`` set properly, the remote nodes will also have it +set properly). If Open MPI was compiled with shared library support, +it may also be necessary to have the ``LD_LIBRARY_PATH`` environment +variable set on remote nodes as well (especially to find the shared +libraries required to run user MPI applications). + +However, it is not always desirable or possible to edit shell startup +files to set ``PATH`` and/or ``LD_LIBRARY_PATH``. The ``--prefix`` +option is provided for some simple configurations where this is not +possible. + +The ``--prefix`` option takes a single argument: the base directory on +the remote node where Open MPI is installed. Open MPI will use this +directory to set the remote ``PATH`` and ``LD_LIBRARY_PATH`` before +executing any Open MPI or user applications. This allows running Open +MPI jobs without having pre-configured the ``PATH`` and +``LD_LIBRARY_PATH`` on the remote nodes. + +Open MPI adds the basename of the current node's ``$bindir`` (the +directory where Open MPI's executables were installed) to the prefix +and uses that to set the ``PATH`` on the remote node. Similarly, Open +MPI adds the basename of the current node's ``$libdir`` (the directory +where Open MPI's libraries were installed) to the prefix and uses that +to set the ``LD_LIBRARY_PATH`` on the remote node. For example: + +* Local bindir: ``/local/node/directory/bin`` +* Local libdir: ``/local/node/directory/lib64`` + +If the following command line is used: + +.. code:: sh + + shell$ mpirun --prefix /remote/node/directory + +Open MPI will add ``/remote/node/directory/bin`` to the ``PATH`` and +``/remote/node/directory/lib64`` to the ``LD_LIBRARY_PATH`` on the +remote node before attempting to execute anything. + +The ``--prefix`` option is not sufficient if the installation paths on +the remote node are different than the local node (e.g., if ``/lib`` +is used on the local node, but ``/lib64`` is used on the remote node), +or if the installation paths are something other than a subdirectory +under a common prefix. + +Note that executing ``mpirun`` via an absolute pathname is equivalent +to specifying ``--prefix`` without the last subdirectory in the +absolute pathname to ``mpirun``. For example: + +.. code:: sh + + shell$ /usr/local/bin/mpirun ... + +is equivalent to + +.. code:: sh + + shell$ mpirun --prefix /usr/local + +Exported Environment Variables +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +All environment variables that are named in the form ``OMPI_*`` will +automatically be exported to new processes on the local and remote +nodes. Environmental parameters can also be set/forwarded to the new +processes using the MCA parameter ``mca_base_env_list``. The ``-x`` +option to mpirun has been deprecated, but the syntax of the MCA param +follows that prior example. While the syntax of the ``-x`` option and +MCA param allows the definition of new variables, note that the parser +for these options are currently not very sophisticated |mdash| it does +not even understand quoted values. Users are advised to set variables +in the environment and use the option to export them; not to define +them. + +.. _man1-mpirun-setting-mca-parameters: + +Setting MCA Parameters +^^^^^^^^^^^^^^^^^^^^^^ + +The ``--mca`` switch allows the passing of parameters to various MCA +(Modular Component Architecture) modules. MCA modules have direct +impact on MPI programs because they allow tunable parameters to be set +at run time (such as which BTL communication device driver to use, +what parameters to pass to that BTL, etc.). + +The ``--mca`` switch takes two arguments: ```` and ````. +The ```` argument generally specifies which MCA module will +receive the value. For example, the ```` ``btl`` is used to +select which BTL to be used for transporting MPI messages. The +```` argument is the value that is passed. For example: + +.. code:: sh + + shell$ mpirun --mca btl tcp,self -np 1 my_mpi_app + +This tells Open MPI to use the ``tcp`` and ``self`` BTLs, and to run a +single copy of ``my_mpi_app`` an allocated node. + +.. code:: sh + + shell$ mpirun --mca btl self -np 1 my_mpi_app + +Tells Open MPI to use the ``self`` BTL, and to run a single copy of +``my_mpi_app`` an allocated node. + +The ``--mca`` switch can be used multiple times to specify different + and/or ```` arguments. If the same ```` is +specified more than once, the ````s are concatenated with a +comma (``,``) separating them. + +Note that the ``--mca`` switch is simply a shortcut for setting +environment variables. The same effect may be accomplished by setting +corresponding environment variables before running ``mpirun``. The form +of the environment variables that Open MPI sets is: + +.. code:: sh + + OMPI_MCA_= + +Thus, the ``--mca`` switch overrides any previously set environment +variables. The ``--mca`` settings similarly override MCA parameters +set in the ``$OPAL_PREFIX/etc/openmpi-mca-params.conf`` or +``$HOME/.openmpi/mca-params.conf`` file. + +Unknown ```` arguments are still set as environment variable -- +they are not checked (by mpirun) for correctness. Illegal or +incorrect ```` arguments may or may not be reported |mdash| it +depends on the specific MCA module. + +To find the available component types under the MCA architecture, or +to find the available parameters for a specific component, use the +ompi_info command. See the :ref:`ompi_info(1) ` man +page for detailed information on this command. + +.. _man1-mpirun-setting-mca-params-from-file: + +Setting MCA parameters and environment variables from file +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +The ``--tune`` command line option and its synonym ``--mca`` +``mca_base_envar_file_prefix`` allows a user to set MCA parameters and +environment variables with the syntax described below. This option +requires a single file or list of files separated by "," to follow. + +A valid line in the file may contain zero or more ``-x`` or +``--mca``. The following patterns are supported: + +* ``--mca var val`` +* ``--mca var "val"`` +* ``-x var=val`` +* ``-x var`` + +If any argument is duplicated in the file, the last value read will be +used. + +MCA parameters and environment specified on the command line +have higher precedence than variables specified in the file. + +.. _man1-mpirun-running-as-root: + +Running as root +^^^^^^^^^^^^^^^ + +.. warning:: The Open MPI team **strongly** advises against executing + ``mpirun`` as the root user. MPI applications should be + run as regular (non-root) users. + +``mpirun`` will refuse to run as root by default. + +To override this default, you can add the ``--allow-run-as-root`` +option to the mpirun command line, or you can set the environmental +parameters ``OMPI_ALLOW_RUN_AS_ROOT=1`` and +``OMPI_ALLOW_RUN_AS_ROOT_CONFIRM=1``. Note that it takes setting two +environment variables to effect the same behavior as +``--allow-run-as-root`` in order to stress the Open MPI team's strong +advice against running as the root user. + +After extended discussions with communities who use containers (where +running as the root user is the default), there was a persistent +desire to be able to enable root execution of ``mpirun`` via an +environmental control (vs. the existing ``--allow-run-as-root`` +command line parameter). The compromise of using two environment +variables was reached: it allows root execution via an environmental +control, but it conveys the Open MPI team's strong recomendation +against this behavior. + +Exit status +^^^^^^^^^^^ + +There is no standard definition for what ``mpirun`` should return as +an exit status. After considerable discussion, we settled on the +following method for assigning the ``mpirun`` exit status (note: in +the following description, the "primary" job is the initial +application started by mpirun |mdash| all jobs that are spawned by +that job are designated "secondary" jobs): + +* If all processes in the primary job normally terminate with exit + status 0, ``mpirun`` returns 0. + +* If one or more processes in the primary job normally terminate with + non-zero exit status, ``mpirun`` returns the exit status of the + process with the lowest MPI_COMM_WORLD rank to have a non-zero + status. + +* If all processes in the primary job normally terminate with exit + status 0, and one or more processes in a secondary job normally + terminate with non-zero exit status, ``mpirun``: + + #. Returns the exit status of the process with the lowest + MPI_COMM_WORLD rank in the lowest jobid to have a non-zero + status, and + #. Outputs a message summarizing the exit status of the primary and + all secondary jobs. + +* If the command line option ``--report-child-jobs-separately`` is + set, we will return *only* the exit status of the primary job. Any + non-zero exit status in secondary jobs will be reported solely in a + summary print statement. + +By default, the job will abort when any process terminates with +non-zero status. The MCA parameter ``orte_abort_on_non_zero_status`` +can be set to "false" (or "0") to cause Open MPI to not abort a job if +one or more processes return a non-zero status. In that situation the +Open MPI records and notes that processes exited with non-zero +termination status to report the approprate exit status of ``mpirun`` (per +bullet points above). + +.. error:: TODO The ``orte_abort...`` name above is definitely wrong for + Open MPI 5.0.0. + +EXAMPLES +-------- + +Be sure also to see the examples throughout the sections above. + +.. code:: sh + + shell$ mpirun -np 4 --mca btl tcp,sm,self prog1 + +Run 4 copies of ``prog1`` using the ``tcp``, ``sm`` (shared memory), +and ``self`` (process loopback) BTL's for the transport of MPI +messages. + + +RETURN VALUE +------------ + +``mpirun`` returns 0 if all processes started by mpirun exit after +calling :ref:`MPI_FINALIZE(3) `. A non-zero value is +returned if an internal error occurred in mpirun, or one or more +processes exited before calling :ref:`MPI_FINALIZE(3) `. +If an internal error occurred in mpirun, the corresponding error code +is returned. In the event that one or more processes exit before +calling :ref:`MPI_FINALIZE(3) `, the return value of +the MPI_COMM_WORLD rank of the process that mpirun first notices died +before calling :ref:`MPI_FINALIZE(3) ` will be +returned. Note that, in general, this will be the first process that +died but is not guaranteed to be so. + +If the ``--timeout`` command line option is used and the timeout +expires before the job completes (thereby forcing mpirun to kill the +job) mpirun will return an exit status equivalent to the value of +ETIMEDOUT (which is typically 110 on Linux and OS X systems). + + +.. seealso:: + :ref:`MPI_INIT(3) `, + :ref:`MPI_INIT_THREAD(3) `, + :ref:`MPI_FINALIZE(3) `, + :ref:`ompi_info(1) ` diff --git a/docs/man-openmpi/man1/mpisync.1.rst b/docs/man-openmpi/man1/mpisync.1.rst new file mode 100644 index 00000000000..65f283148a8 --- /dev/null +++ b/docs/man-openmpi/man1/mpisync.1.rst @@ -0,0 +1,55 @@ +.. _mpisync: + + +mpisync +======= + +.. include_body + +Open MPI timing tools + + +SYNTAX +------ + +| **mpisync** [*options*] +| **mpirun_prof** [*options*] +| **ompi_timing_post** [**] [**] + + +DESCRIPTION +----------- + +``mpisync``: determines clock offsets relative to Head Node Process +(HNP). It accepts the following options: + +* ``-o``, ``--output``: The name of output file where offsets related + to HNP will be written + +* ``-h``, ``--help``: Print help information + +``ompi_timing_post`` takes the timing output file as input parameter. +The events are sorted by the timestamps. Next, the timestamps are +replaced with time offsets relative to the ``first`` : ``previous`` +event. + +``mpirun_prof`` is a wrapper around :ref:`mpirun ` that +performs clock synchronisation and post-processing of the timing +output file. + + +NOTES +----- + +The mpisync code was derived from MPIPerf project: +http://mpiperf.cpct.sibsutis.ru/index.php/Main/Documentation + + +FILES +----- + +The output file has following format: + +.. code:: + + diff --git a/docs/man-openmpi/man1/ompi-wrapper-compiler.1.rst b/docs/man-openmpi/man1/ompi-wrapper-compiler.1.rst new file mode 100644 index 00000000000..cf0c96dd631 --- /dev/null +++ b/docs/man-openmpi/man1/ompi-wrapper-compiler.1.rst @@ -0,0 +1,258 @@ +.. _man1-mpicc: +.. _man1-mpic++: +.. _man1-mpicxx: +.. _man1-mpifort: +.. _man1-mpijavac: + +Open MPI Wrapper Compilers +========================== + +.. include_body + +mpicc, mpic++, mpicxx, mpifort, mpijavac -- Open MPI wrapper compilers + +SYNTAX +------ + +``mpicc [-showme|-showme:compile|-showme:link] ...`` + +``mpic++ [-showme|-showme:compile|-showme:link] ...`` + +``mpicxx [-showme|-showme:compile|-showme:link] ...`` + +``mpifort [-showme|-showme:compile|-showme:link] ...`` + +``mpijavac [-showme|-showme:compile|-showme:link] ...`` + +The following deprecated commands are also available |mdash| but +``mpifort`` should be used instead: + +``mpif77 [-showme|-showme:compile|-showme:link] ...`` + +``mpif90 [-showme|-showme:compile|-showme:link] ...`` + +On case-sensitive filesystems, the following command will also be +available: + +``mpiCC [-showme|-showme:compile|-showme:link] ...`` + + +OPTIONS +------- + +The options below apply to all of the wrapper compilers: + +* ``--showme``: This option comes in several different variants (see + below). None of the variants invokes the underlying compiler; they + all provide information on how the underlying compiler would have + been invoked had ``--showme`` not been used. The basic ``--showme`` + option outputs the command line that would be executed to compile + the program. + + .. note:: If a non-filename argument is passed on the command line, + the *-showme* option will *not* display any additional + flags. For example, both ``"mpicc --showme`` and + ``mpicc --showme my_source.c`` will show all the + wrapper-supplied flags. But ``mpicc + --showme -v`` will only show the underlying compiler name + and ``-v``. + +* ``--showme:compile``: Output the compiler flags that would have been + supplied to the underlying compiler. + +* ``--showme:link``: Output the linker flags that would have been + supplied to the underlying compiler. + +* ``--showme:command``: Outputs the underlying compiler + command (which may be one or more tokens). + +* ``--showme:incdirs``: Outputs a space-delimited (but otherwise + undecorated) list of directories that the wrapper compiler would + have provided to the underlying compiler to indicate + where relevant header files are located. + +* ``--showme:libdirs``: Outputs a space-delimited (but otherwise + undecorated) list of directories that the wrapper compiler would + have provided to the underlying linker to indicate where relevant + libraries are located. + +* ``--showme:libs`` Outputs a space-delimited (but otherwise + undecorated) list of library names that the wrapper compiler would + have used to link an application. For example: ``mpi open-pal + util``. + +* ``--showme:version``: Outputs the version number of Open MPI. + +* ``--showme:help``: Output a brief usage help message. + +See the man page for your underlying compiler for other options that +can be passed through mpicc. + + +DESCRIPTION +----------- + +Conceptually, the role of these commands is quite simple: +transparently add relevant compiler and linker flags to the user's +command line that are necessary to compile / link Open MPI programs, +and then invoke the underlying compiler to actually perform the +command. + +As such, these commands are frequently referred to as "wrapper" +compilers because they do not actually compile or link applications +themselves; they only add in command line flags and invoke the +back-end compiler. + +Background +---------- + +Open MPI provides wrapper compilers for several languages: + +* ``mpicc``: C + +* ``mpic++``, ``mpicxx`` (and on systems with case-senstive file + systems, ``mpiCC``): C++ + + .. note:: ``mpic++``, ``mpicxx``, and ``mpiCC`` all invoke the same + underlying C++ compiler with the same options. All are + provided as compatibility with other MPI implementations. + +* ``mpifort`` (and its legacy/deprecated aliaes ``mpif77`` and + ``mpif90``): Fortran + +* ``mpijavac``: Java + +The wrapper compilers for each of the languages are identical; they +can be use interchangably. The different names are provided solely +for backwards compatibility. + + +Fortran Notes +------------- + +The Fortran wrapper compiler for MPI (``mpifort``, and its +legacy/deprecated names ``mpif77`` and ``mpif90``) can compile and +link MPI applications that use any/all of the MPI Fortran bindings: +``mpif.h``, the ``mpi`` module, and the ``mpi_f08`` module (assuming +Open MPI was installed with support for each of these Fortran +bindings). Specifically: it is no longer necessary to use different +wrapper compilers for applications that use ``mpif.h`` +vs. applications that use the ``mpi`` module -- just use ``mpifort`` +for all Fortran MPI applications. + +Note, however, that the Fortran compiler may require additional +command-line options to enforce a specific Fortran dialect. For +example, in some versions of the IBM XLF compiler, if ``xlf90`` is the +underlying Fortran compiler, ``-qfixed`` may be necessary to compile +fixed-format Fortran source files. + +Finally, note that ``mpifort`` will be inoperative and will return an +error on use if Fortran support was not built into the MPI layer. + +Overview +-------- + +``mpicc`` is a convenience wrappers for the underlying C compiler. +Translation of an Open MPI program requires the linkage of the Open +MPI-specific libraries which may not reside in one of the standard +search directories of ``ld(1)``. It also often requires the inclusion +of header files what may also not be found in a standard location. + +``mpicc`` passes its arguments to the underlying C compiler along with +the ``-I``, ``-L`` and ``-l`` options required by Open MPI programs. + +The same is true for all the other language wrapper compilers. + +The Open MPI Team *strongly* encourages using the wrapper compilers +instead of attempting to link to the Open MPI libraries manually. This +allows the specific implementation of Open MPI to change without +forcing changes to linker directives in users' Makefiles. Indeed, the +specific set of flags and libraries used by the wrapper compilers +depends on how Open MPI was configured and built; the values can change +between different installations of the same version of Open MPI. + +Indeed, since the wrappers are simply thin shells on top of an +underlying compiler, there are very, very few compelling reasons *not* +to use Open MPI's wrapper compilers. When it is not possible to use +the wrappers directly, the ``-showme:compile`` and ``-showme:link`` +options should be used to determine what flags the wrappers would have +used. For example: + +.. code:: sh + + shell$ cc -c file1.c `mpicc -showme:compile` + + shell$ cc -c file2.c `mpicc -showme:compile` + + shell$ cc file1.o file2.o `mpicc -showme:link` -o my_mpi_program + + +NOTES +----- + +It is possible to make the wrapper compilers multi-lib aware. That is, +the libraries and includes specified may differ based on the compiler +flags specified (for example, with the GNU compilers on Linux, a +different library path may be used if ``-m32`` is seen versus ``-m64`` +being seen). This is not the default behavior in a standard build, but +can be activated (for example, in a binary package providing both 32 +and 64 bit support). `More information can be found here +`_. + + +.. _man1-ompi-wrapper-compiler-files: + +FILES +----- + +The strings that the wrapper compilers insert into the command line +before invoking the underlying compiler are stored in a text file +created by Open MPI and installed to +``$pkgdata/NAME-wrapper-data.txt``, where: + +* ``$pkgdata`` is typically ``$prefix/share/openmpi`` +* ``$prefix`` is the top installation directory of Open MPI +* ``NAME`` is the name of the wrapper compiler (e.g., + ``$pkgdata/mpicc-wrapper-data.txt``) + +It is rarely necessary to edit these files, but they can be examined to +gain insight into what flags the wrappers are placing on the command +line. + + +ENVIRONMENT VARIABLES +--------------------- + +By default, the wrappers use the compilers that were selected when +Open MPI was configured. These compilers were either found +automatically by Open MPI's "configure" script, or were selected by +the user in the ``CC``, ``CXX``, and/or ``FC`` environment variables +before ``configure`` was invoked. Additionally, other arguments specific +to the compiler may have been selected by configure. + +These values can be selectively overridden by either editing the text +files containing this configuration information (see the :ref:`FILES +` section), or by setting selected +environment variables of the form ``ompi_value``. + +Valid value names are: + +* ``CPPFLAGS``: Flags added when invoking the preprocessor (C or C++) + +* ``LDFLAGS``: Flags added when invoking the linker (C, C++, or + Fortran) + +* ``LIBS``: Libraries added when invoking the linker (C, C++, or + Fortran) + +* ``CC``: C compiler + +* ``CFLAGS``: C compiler flags + +* ``CXX``: C++ compiler + +* ``CXXFLAGS``: C++ compiler flags + +* ``FC``: Fortran compiler + +* ``FCFLAGS``: Fortran compiler flags diff --git a/docs/man-openmpi/man1/ompi_info.1.rst b/docs/man-openmpi/man1/ompi_info.1.rst new file mode 100644 index 00000000000..fee6dc0bc7a --- /dev/null +++ b/docs/man-openmpi/man1/ompi_info.1.rst @@ -0,0 +1,227 @@ +.. _man1-ompi_info: + + +ompi_info +========= + +.. include_body + +ompi_info - Display information about the Open MPI installation + + +SYNOPSIS +-------- + +``ompi_info [options]`` + + +DESCRIPTION +----------- + +``ompi_info`` provides detailed information about the Open MPI +installation. It can be useful for at least three common scenarios: + +#. Checking local configuration and seeing how Open MPI was installed. + +#. Submitting bug reports / help requests to the Open MPI community + (see :doc:`Getting help `). + +#. Seeing a list of installed Open MPI plugins and querying what MCA + parameters they support. + +.. note:: ``ompi_info`` defaults to only showing a few MCA parameters + by default (i.e., level 1 parameters). Use the ``--level`` + option to enable showing more options (see the :ref:`LEVELS + ` section for more information). + + +OPTIONS +------- + +``ompi_info`` accepts the following options: + +* ``-a``, ``--all``: Show all configuration options and MCA + parameters. Also changes the default MCA parameter level to 9, + unless ``--level`` is also specified. + +* ``--arch``: Show architecture on which Open MPI was compiled. + +* ``-c``, ``--config``: Show configuration options + +* ``-gmca``, ``--gmca ``: Pass global MCA parameters + that are applicable to all contexts. + +* ``-h``, ``--help``: Shows help / usage message. + +* ``--hostname``: Show the hostname on which Open MPI was configured + and built. + +* ``--internal``: Show internal MCA parameters (not meant to be + modified by users). + +* ``--level ``: Show only variables with at most this level + (1-9). The default is 1 unless ``--all`` is specified without + ``--level``, in which case the default is 9. See the :ref:`LEVELS + ` section for more information. + +* ``-mca``, ``--mca ``: Pass context-specific MCA + parameters; they are considered global if ``--gmca`` is not used and + only one context is specified. + +* ``--param ``: Show MCA parameters. The first + parameter is the type of the component to display; the second + parameter is the specific component to display (or the keyword + ``all``, meaning "display all components of this type"). + +* ``-t``, ``--type``: Show MCA parameters of the type specified in the + parameter. Accepts the following parameters: ``unsigned_int``, + ``unsigned_long``, ``unsigned_long_long``, ``size_t``, ``string``, + ``version_string``, ``bool``, ``double``. By default level is 1 + unless it is specified with ``--level``. + +* ``--parsable``: When used in conjunction with other parameters, the + output is displayed in a machine-parsable format ``--parseable`` + Synonym for ``--parsable``. + +* ``--path ``: Show paths that Open MPI was configured + with. Accepts the following parameters: ``prefix``, ``bindir``, + ``libdir``, ``incdir``, ``pkglibdir``, ``sysconfdir``. + +* ``--pretty``: When used in conjunction with other parameters, the output is + displayed in "prettyprint" format (default) + +* ``--selected-only``: Show only variables from selected components. + +* ``-V``, ``--version``: Show version of Open MPI. + +.. _man1-ompi_info-levels: + +LEVELS +------ + +Open MPI has many, many run-time tunable parameters (called "MCA +parameters"), and usually only a handfull of them are useful to a given +user. + +As such, Open MPI has divided these parameters up into nine distinct +levels, broken down into three categories, each with three +sub-categories. + +Note that since each MCA parameter is accessible through the MPI_T +control variable API (introduced in MPI-3.0), these levels exactly +correspond to the nine MPI_T cvar levels. + +The three categories are: + +#. **End user**: Generally, these are parameters that are required for + correctness, meaning that a user may need to set these just to get + their MPI application to run correctly. For example, BTL + ``if_include`` and ``if_exclude`` parameters fit into this + category. + +#. **Application tuner**: Generally, these are parameters that can be + used to tweak MPI application performance. This even includes + parameters that control resource exhaustion levels (e.g., number of + free list entries, size of buffers, etc.), and could be considered + "correctness" parameters if they're set too low. But, really -- + they're tuning parameters. + +#. **Open MPI developer**: Parameters in this category either don't + fit in the other two, or are specifically intended for debugging / + development of Open MPI itself. + +And within each category, there are three sub-categories: + +#. **Basic**: This sub-category is for parameters that everyone in + this category will want to see -- even less-advanced end users, + application tuners, and new OMPI developers. + +#. **Detailed**: This sub-category is for parameters that are + generally useful, but users probably won't need to change them + often. + +#. **All**: This sub-category is for all other parameters. Such + parameters are likely fairly esoteric. + +Combining the categories and sub-categories, here's how Open MPI +defines all nine levels: + +#. Basic information of interest to end users. +#. Detailed information of interest to end users. +#. All remaining information of interest to end users. +#. Basic information required for application tuners. +#. Detailed information required for application tuners. +#. All remaining information required for application tuners. +#. Basic information for Open MPI implementors. +#. Detailed information for Open MPI implementors. +#. All remaining information for Open MPI implementors. + +By default, ``ompi_info`` only shows level 1 MCA parameters. To see +more MCA parameters, use the ``--level`` command line option. + + +EXAMPLES +-------- + +.. code-block:: + + ompi_info + +Show the default output of options and listing of installed +components in a human-readable / prettyprint format. + +.. code-block:: + + ompi_info --parsable + +Show the default output of options and listing of installed components +in a machine-parsable format. + +.. code-block:: + + ompi_info --param btl tcp + +Show the level 1 MCA parameters of the "tcp" BTL component in a +human-readable / prettyprint format. + +.. code-block:: + + ompi_info --param btl tcp --level 6 + +Show the level 1 through level 6 MCA parameters of the "tcp" BTL +component in a human-readable / prettyprint format. + +.. code-block:: + + ompi_info --param btl tcp --parsable + +Show the level 1 MCA parameters of the "tcp" BTL component in a +machine-parsable format. + +.. code-block:: + + ompi_info --type string --pretty-print --level 3 + +Show the level 3 MCA parameters of string type in a human-readable / +prettyprint format. + +.. code-block:: + + ompi_info --path bindir + +Show the "bindir" that Open MPI was configured with. + +.. code-block:: + + ompi_info --version + +Show the version of Open MPI version numbers in a prettyprint format. + +.. code-block:: + + ompi_info --all + +Show *all* information about the Open MPI installation, including all +components that can be found, all the MCA parameters that they support +(i.e., levels 1 through 9), versions of Open MPI and the components, +etc. diff --git a/docs/man-openmpi/man1/opal_wrapper.1.rst b/docs/man-openmpi/man1/opal_wrapper.1.rst new file mode 100644 index 00000000000..80002dbd015 --- /dev/null +++ b/docs/man-openmpi/man1/opal_wrapper.1.rst @@ -0,0 +1,33 @@ +.. _man1-opal_wrapper: + + +opal_wrapper +============ + +.. include_body + +opal_wrapper - Back-end Open MPI wrapper command + + +DESCRIPTION +----------- + +``opal_wrapper`` is not meant to be called directly by end users. It +is automatically invoked as the back-end by the Open MPI wrapper +commands such as: ``mpicc``, ``mpicxx``, and ``mpifort`` +(and its legacy/deprecated aliases ``mpif77`` and ``mpif90``). + +Some Open MPI installations may have additional wrapper commands, +and/or have renamed the wrapper compilers listed above to avoid +executable name conflicts with other MPI implementations. Hence, you +may also have wrapper compilers installed including the following +names: ``mpifort.openmpi`` (and the legacy/deprecated aliases +``mpif90.openmpi`` and ``mpif77.openmpi``), ``mpicxx.openmpi``, +``mpicc.openmpi``. + +.. seealso:: + The following may exist depending on your particular Open MPI + installation: + :ref:`mpicc(1) `, + :ref:`mpicxx(1) `, + :ref:`mpifort(1) ` diff --git a/docs/man-openmpi/man3/MPIX_Query_cuda_support.3.rst b/docs/man-openmpi/man3/MPIX_Query_cuda_support.3.rst new file mode 100644 index 00000000000..de3ab156cd8 --- /dev/null +++ b/docs/man-openmpi/man3/MPIX_Query_cuda_support.3.rst @@ -0,0 +1,72 @@ +.. _mpix_query_cuda_support: + + +MPIX_Query_cuda_support +======================= + +.. include_body + +**MPIX_Query_cuda_support** - Returns 1 if there is CUDA aware support +and 0 if there is not. + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + #include + + int MPIX_Query_cuda_support(void) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +There is no Fortran binding for this function. + + +C++ Syntax +^^^^^^^^^^ + +There is no C++ binding for this function. + + +DESCRIPTION +----------- + +This routine return 1 if MPI library is build with CUDA and runtime +supports CUDA buffers. This routine must be called after MPI is +initialized by a call to :ref:`MPI_Init` or :ref:`MPI_Init_thread`. + + +Examples +^^^^^^^^ + +:: + + + #include + #include "mpi.h" + + #include "mpi-ext.h" /* Needed for CUDA-aware check */ + + int main(int argc, char *argv[]) + { + + MPI_Init(&argc, &argv); + + if (MPIX_Query_cuda_support()) { + printf("This MPI library has CUDA-aware support.); + } else { + printf("This MPI library does not have CUDA-aware support.); + } + MPI_Finalize(); + + return 0; + } diff --git a/docs/man-openmpi/man3/MPI_Abort.3.rst b/docs/man-openmpi/man3/MPI_Abort.3.rst new file mode 100644 index 00000000000..24d166f443f --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Abort.3.rst @@ -0,0 +1,86 @@ +.. _mpi_abort: + + +MPI_Abort +========= + +.. include_body + +:ref:`MPI_Abort` - Terminates MPI execution environment. + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Abort(MPI_Comm comm, int errorcode) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_ABORT(COMM, ERRORCODE, IERROR) + INTEGER COMM, ERRORCODE, IERROR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Abort(comm, errorcode, ierror) + TYPE(MPI_Comm), INTENT(IN) :: comm + INTEGER, INTENT(IN) :: errorcode + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETERS +---------------- +* ``comm``: Communicator of tasks to abort. +* ``errorcode``: Error code to return to invoking environment. + +OUTPUT PARAMETER +---------------- +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +This routine makes a "best attempt" to abort all tasks in the group of +comm. This function does not require that the invoking environment take +any action with the error code. However, a UNIX or POSIX environment +should handle this as a return errorcode from the main program or an +abort (errorcode). + +The long-term goal of the Open MPI implementation is to terminate all +processes in all tasks that contain a process in *comm, and the error +code is not returned to the invoking environment. At the moment, this +isn't fully implemented and :ref:`MPI_Abort` will terminate the entire job.* + +Note: All associated processes are sent a SIGTERM. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. diff --git a/docs/man-openmpi/man3/MPI_Accumulate.3.rst b/docs/man-openmpi/man3/MPI_Accumulate.3.rst new file mode 100644 index 00000000000..7bf3697b491 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Accumulate.3.rst @@ -0,0 +1,184 @@ +.. _mpi_accumulate: + + +MPI_Accumulate +============== + +.. include_body + +:ref:`MPI_Accumulate`, :ref:`MPI_Raccumulate` - Combines the contents of the +origin buffer with that of a target buffer. + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Accumulate(const void *origin_addr, int origin_count, + MPI_Datatype origin_datatype, int target_rank, + MPI_Aint target_disp, int target_count, + MPI_Datatype target_datatype, MPI_Op op, MPI_Win win) + + int MPI_Raccumulate(const void *origin_addr, int origin_count, + MPI_Datatype origin_datatype, int target_rank, + MPI_Aint target_disp, int target_count, + MPI_Datatype target_datatype, MPI_Op op, MPI_Win win, + MPI_Request *request) + + +Fortran Syntax (see FORTRAN 77 NOTES) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_ACCUMULATE(ORIGIN_ADDR, ORIGIN_COUNT, ORIGIN_DATATYPE, TARGET_RANK, + TARGET_DISP, TARGET_COUNT, TARGET_DATATYPE, OP, WIN, IERROR) + ORIGIN_ADDR(*) + INTEGER(KIND=MPI_ADDRESS_KIND) TARGET_DISP + INTEGER ORIGIN_COUNT, ORIGIN_DATATYPE, TARGET_RANK, TARGET_COUNT, + TARGET_DATATYPE, OP, WIN, IERROR + + MPI_RACCUMULATE(ORIGIN_ADDR, ORIGIN_COUNT, ORIGIN_DATATYPE, TARGET_RANK, + TARGET_DISP, TARGET_COUNT, TARGET_DATATYPE, OP, WIN, REQUEST, IERROR) + ORIGIN_ADDR(*) + INTEGER(KIND=MPI_ADDRESS_KIND) TARGET_DISP + INTEGER ORIGIN_COUNT, ORIGIN_DATATYPE, TARGET_RANK, TARGET_COUNT, + TARGET_DATATYPE, OP, WIN, REQUEST, IERROR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Accumulate(origin_addr, origin_count, origin_datatype, target_rank, + target_disp, target_count, target_datatype, op, win, ierror) + TYPE(*), DIMENSION(..), INTENT(IN), ASYNCHRONOUS :: origin_addr + INTEGER, INTENT(IN) :: origin_count, target_rank, target_count + TYPE(MPI_Datatype), INTENT(IN) :: origin_datatype, target_datatype + INTEGER(KIND=MPI_ADDRESS_KIND), INTENT(IN) :: target_disp + TYPE(MPI_Op), INTENT(IN) :: op + TYPE(MPI_Win), INTENT(IN) :: win + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + MPI_Raccumulate(origin_addr, origin_count, origin_datatype, target_rank, + target_disp, target_count, target_datatype, op, win, request, + ierror) + TYPE(*), DIMENSION(..), INTENT(IN), ASYNCHRONOUS :: origin_addr + INTEGER, INTENT(IN) :: origin_count, target_rank, target_count + TYPE(MPI_Datatype), INTENT(IN) :: origin_datatype, target_datatype + INTEGER(KIND=MPI_ADDRESS_KIND), INTENT(IN) :: target_disp + TYPE(MPI_Op), INTENT(IN) :: op + TYPE(MPI_Win), INTENT(IN) :: win + TYPE(MPI_Request), INTENT(OUT) :: request + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETERS +---------------- +* ``origin_addr``: Initial address of buffer (choice). +* ``origin_count``: Number of entries in buffer (nonnegative integer). +* ``origin_datatype``: Data type of each buffer entry (handle). +* ``target_rank``: Rank of target (nonnegative integer). +* ``target_disp``: Displacement from start of window to beginning of target buffer (nonnegative integer). +* ``target_count``: Number of entries in target buffer (nonnegative integer). +* ``target_datatype``: Data type of each entry in target buffer (handle). +* ``op``: Reduce operation (handle). +* ``win``: Window object (handle). + +OUTPUT PARAMETER +---------------- +* ``MPI_Raccumulate``: RMA request +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +:ref:`MPI_Accumulate` is a function used for one-sided MPI communication +that adds the contents of the origin buffer (as defined by +*origin_addr*, *origin_count*, and *origin_datatype*) to the buffer +specified by the arguments *target_count* and *target_datatype*, at +offset *target_disp*, in the target window specified by *target_rank* +and *win*, using the operation *op*. The target window can only be +accessed by processes within the same node. This is similar to :ref:`MPI_Put`, +except that data is combined into the target area instead of overwriting +it. + +Any of the predefined operations for :ref:`MPI_Reduce` can be used. +User-defined functions cannot be used. For example, if *op* is MPI_SUM, +each element of the origin buffer is added to the corresponding element +in the target, replacing the former value in the target. + +Each datatype argument must be a predefined data type or a derived data +type, where all basic components are of the same predefined data type. +Both datatype arguments must be constructed from the same predefined +data type. The operation *op* applies to elements of that predefined +type. The *target_datatype* argument must not specify overlapping +entries, and the target buffer must fit in the target window. + +A new predefined operation, MPI_REPLACE, is defined. It corresponds to +the associative function f(a, b) =b; that is, the current value in the +target memory is replaced by the value supplied by the origin. + +:ref:`MPI_Raccumulate` is similar to :ref:`MPI_Accumulate`, except that it +allocates a communication request object and associates it with the +request handle (the argument *request*) that can be used to wait or test +for completion. The completion of an :ref:`MPI_Raccumulate` operation +indicates that the *origin_addr* buffer is free to be updated. It does +not indicate that the operation has completed at the target window. + + +FORTRAN 77 NOTES +---------------- + +The MPI standard prescribes portable Fortran syntax for the +*TARGET_DISP* argument only for Fortran 90. FORTRAN 77 users may use the +non-portable syntax + +:: + + INTEGER*MPI_ADDRESS_KIND TARGET_DISP + +where MPI_ADDRESS_KIND is a constant defined in mpif.h and gives the +length of the declared integer in bytes. + + +NOTES +----- + +:ref:`MPI_Put` is a special case of :ref:`MPI_Accumulate`, with the operation +MPI_REPLACE. Note, however, that :ref:`MPI_Put` and :ref:`MPI_Accumulate` have +different constraints on concurrent updates. + +It is the user's responsibility to guarantee that, when using the +accumulate functions, the target displacement argument is such that +accesses to the window are properly aligned according to the data type +arguments in the call to the :ref:`MPI_Accumulate` function. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. + + +.. seealso:: + :ref:`MPI_Put` :ref:`MPI_Get_accumulate` :ref:`MPI_Reduce` diff --git a/docs/man-openmpi/man3/MPI_Add_error_class.3.rst b/docs/man-openmpi/man3/MPI_Add_error_class.3.rst new file mode 100644 index 00000000000..0df12e8a9dd --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Add_error_class.3.rst @@ -0,0 +1,95 @@ +.. _mpi_add_error_class: + + +MPI_Add_error_class +=================== + +.. include_body + +:: + + MPI_Add_error_class - Creates a new error class and returns its value + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Add_error_class(int *errorclass) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_ADD_ERROR_CLASS(ERRORCLASS, IERROR) + INTEGER ERRORCLASS, IERROR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Add_error_class(errorclass, ierror) + INTEGER, INTENT(OUT) :: errorclass + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +OUTPUT PARAMETERS +----------------- +* ``errorclass``: New error class (integer). +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +The function :ref:`MPI_Add_error_class` creates a new, local error class. + + +NOTES +----- + +Because this function is local, the same value of *errorclass* may not +be returned on all processes that make this call, even if they call the +function concurrently. Thus, same error on different processes may not +cause the same value of *errorclass* to be returned. To reduce the +potential for confusion, :ref:`MPI_Add_error_string` may be used on multiple +processes to associate the same error string with the newly created +*errorclass*. Even though *errorclass* may not be consistent across +processes, using :ref:`MPI_Add_error_string` will ensure the error string +associated with it will be the same everywhere. + +No function is provided to free error classes, as it is not expected +that an application will create them in significant numbers. + +The value returned is always greater than or equal to MPI_ERR_LASTCODE. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. + + +.. seealso:: + :ref:`MPI_Add_error_code` :ref:`MPI_Add_error_string` :ref:`MPI_Error_class` :ref:`MPI_Error_string` diff --git a/docs/man-openmpi/man3/MPI_Add_error_code.3.rst b/docs/man-openmpi/man3/MPI_Add_error_code.3.rst new file mode 100644 index 00000000000..f853e7dd859 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Add_error_code.3.rst @@ -0,0 +1,90 @@ +.. _mpi_add_error_code: + + +MPI_Add_error_code +================== + +.. include_body + +:ref:`MPI_Add_error_code` - Creates a new error code associated with +*errorclass* + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Add_error_code(int errorclass, int *errorcode) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_ADD_ERROR_CODE(ERRORCLASS, ERRORCODE, IERROR) + INTEGER ERRORCLASS, ERRORCODE, IERROR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Add_error_code(errorclass, errorcode, ierror) + INTEGER, INTENT(IN) :: errorclass + INTEGER, INTENT(OUT) :: errorcode + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETER +--------------- +* ``errorclass``: MPI error class (integer). + +OUTPUT PARAMETERS +----------------- +* ``errorcode``: Error code returned by an MPI routine or an MPI error class (integer). +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +Creates a new error code associated with *errorclass* and returns its +value in *errorcode*. + + +NOTES +----- + +No function is provided to free error codes, as it is not expected that +an application will create them in significant numbers. + +The value returned is always greater than or equal to MPI_ERR_LASTCODE. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. + + +.. seealso:: + :ref:`MPI_Add_error_class` :ref:`MPI_Error_class` diff --git a/docs/man-openmpi/man3/MPI_Add_error_string.3.rst b/docs/man-openmpi/man3/MPI_Add_error_string.3.rst new file mode 100644 index 00000000000..fa3a81b9972 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Add_error_string.3.rst @@ -0,0 +1,87 @@ +.. _mpi_add_error_string: + + +MPI_Add_error_string +==================== + +.. include_body + +:: + + MPI_Add_error_string - Associates a string with an error code or class + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Add_error_string(int errorcode, const char *string) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_ADD_ERROR_STRING(ERRORCODE, STRING, IERROR) + INTEGER ERRORCODE, IERROR + CHARACTER*(*) STRING + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Add_error_string(errorcode, string, ierror) + INTEGER, INTENT(IN) :: errorcode + CHARACTER(LEN=*), INTENT(IN) :: string + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETERS +---------------- +* ``errorcode``: MPI error class, or an error code returned by an MPI routine (integer). +* ``string``: Text that corresponds to the error code or class (string). + +OUTPUT PARAMETER +---------------- +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +This routine associates an error string with an error code or class. +Calling :ref:`MPI_Add_error_string` for an error code or class that already has +an associated error string will replace the old string with the new one. +It is erroneous to call :ref:`MPI_Add_error_string` for an error value not +generated via :ref:`MPI_Add_error_class` or :ref:`MPI_Add_error_code` (e.g., an error +code or class with a value not greater than MPI_LAST_ERRCODE). + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. + + +.. seealso:: + :ref:`MPI_Add_error_class` :ref:`MPI_Add_error_code` :ref:`MPI_Error_class` :ref:`MPI_Error_string` diff --git a/docs/man-openmpi/man3/MPI_Address.3.rst b/docs/man-openmpi/man3/MPI_Address.3.rst new file mode 100644 index 00000000000..2bae9a3a153 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Address.3.rst @@ -0,0 +1,104 @@ +.. _mpi_address: + + +MPI_Address +=========== + +.. include_body + +:ref:`MPI_Address` - Gets the address of a location in memory -- use of +this routine is deprecated. + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Address(void *location, MPI_Aint *address) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + INCLUDE 'mpif.h' + MPI_ADDRESS(LOCATION, ADDRESS, IERROR) + LOCATION (*) + INTEGER ADDRESS, IERROR + + +INPUT PARAMETER +--------------- +* ``location``: Location in caller memory (choice). + +OUTPUT PARAMETERS +----------------- +* ``address``: Address of location (integer). +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +Note that use of this routine is *deprecated* as of MPI-2. Please use +:ref:`MPI_Get_address` instead. + +The address of a location in memory can be found by invoking this +function. Returns the (byte) address of location. + +Example: Using :ref:`MPI_Address` for an array. + +:: + + REAL A(100,100) + +| +| INTEGER I1, I2, DIFF +| CALL MPI_ADDRESS(A(1,1), I1, IERROR) +| CALL MPI_ADDRESS(A(10,10), I2, IERROR) +| DIFF = I2 - I1 +| ! The value of DIFF is 909*sizeofreal; the values of I1 and I2 are +| ! implementation dependent. + + +NOTES +----- + +This routine is provided for both Fortran and C programmers and may be +useful when writing portable code. In the current release, the address +returned by this routine will be the same as that produced by the C & +operator. + +C users may be tempted to avoid using :ref:`MPI_Address` and rely on the +availability of the address operator &. Note, however, that & +cast-expression is a pointer, not an address. ANSI C does not require +that the value of a pointer (or the pointer cast to int) be the absolute +address of the object pointed at although this is commonly the case. +Furthermore, referencing may not have a unique definition on machines +with a segmented address space. The use of :ref:`MPI_Address` to "reference" C +variables guarantees portability to such machines as well. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. + + +.. seealso:: + :ref:`MPI_Get_address` diff --git a/docs/man-openmpi/man3/MPI_Aint_add.3.rst b/docs/man-openmpi/man3/MPI_Aint_add.3.rst new file mode 100644 index 00000000000..8ff646eff1f --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Aint_add.3.rst @@ -0,0 +1,94 @@ +.. _mpi_aint_add: + + +MPI_Aint_add +============ + +.. include_body + +:ref:`MPI_Aint_add`, :ref:`MPI_Aint_diff` - Portable functions for arithmetic +on MPI_Aint values. + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + MPI_Aint MPI_Aint_add(MPI_Aint base, MPI_Aint disp) + + MPI_Aint MPI_Aint_diff(MPI_Aint addr1, MPI_Aint addr2) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + INTEGER(KIND=MPI_ADDRESS_KIND) MPI_AINT_ADD(BASE, DISP) + INTEGER(KIND=MPI_ADDRESS_KIND) BASE, DISP + + INTEGER(KIND=MPI_ADDRESS_KIND) MPI_AINT_DIFF(ADDR1, ADDR2) + INTEGER(KIND=MPI_ADDRESS_KIND) ADDR1, ADDR2 + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + INTEGER(KIND=MPI_ADDRESS_KIND) MPI_AINT_ADD(BASE, DISP) + INTEGER(KIND=MPI_ADDRESS_KIND) BASE, DISP + + INTEGER(KIND=MPI_ADDRESS_KIND) MPI_AINT_DIFF(ADDR1, ADDR2) + INTEGER(KIND=MPI_ADDRESS_KIND) ADDR1, ADDR2 + + +INPUT PARAMETERS +---------------- +* ``base``: Base address (integer). +* ``disp``: Displacement (integer). +* ``addr1``: Minuend address (integer). +* ``addr2``: Subtrahend address (integer). + +DESCRIPTION +----------- + +:ref:`MPI_Aint_add` produces a new MPI_Aint value that is equivalent to the +sum of the *base* and *disp* arguments, where *base* represents a base +address returned by a call to :ref:`MPI_Get_address` and *disp* represents +a signed integer displacement. The resulting address is valid only at +the process that generated *base*, and it must correspond to a location +in the same object referenced by *base*, as described in MPI-3.1 section +4.1.12. The addition is performed in a manner that results in the +correct MPI_Aint representation of the output address, as if the process +that originally produced *base* had called: + +:: + + MPI_Get_address ((char *) base + disp, &result); + +**MPI_Aint_diff** produces a new MPI_Aint value that is equivalent to +the difference between *addr1* and *addr2* arguments, where *addr1* +and *addr2* represent addresses returned by calls to +:ref:`MPI_Get_address`. The resulting address is valid only at the +process that generated *addr1* and *addr2*, and *addr1* and *addr2* +must correspond to locations in the same object in the same process, +as described in MPI-3.1 section 4.1.12. The difference is calculated +in a manner that results in the signed difference from *addr1* to +*addr2*, as if the process that originally produced the addresses had +called (char \*) *addr1* - (char \*) *addr2* on the addresses +initially passed to :ref:`MPI_Get_address`. + + +.. seealso:: + :ref:`MPI_Get_address` diff --git a/docs/man-openmpi/man3/MPI_Aint_diff.3.rst b/docs/man-openmpi/man3/MPI_Aint_diff.3.rst new file mode 100644 index 00000000000..b431dca032e --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Aint_diff.3.rst @@ -0,0 +1,9 @@ +.. _mpi_aint_diff: + +MPI_Aint_diff +============= + .. include_body + +.. include:: ../man3/MPI_Aint_add.3.rst + :start-after: .. include_body + diff --git a/docs/man-openmpi/man3/MPI_Allgather.3.rst b/docs/man-openmpi/man3/MPI_Allgather.3.rst new file mode 100644 index 00000000000..7ca3c4e2370 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Allgather.3.rst @@ -0,0 +1,214 @@ +.. _mpi_allgather: + + +MPI_Allgather +============= + +.. include_body + +:ref:`MPI_Allgather`, :ref:`MPI_Iallgather`, :ref:`MPI_Allgather_init` - Gathers data +from all processes and distributes it to all processes + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Allgather(const void *sendbuf, int sendcount, + MPI_Datatype sendtype, void *recvbuf, int recvcount, + MPI_Datatype recvtype, MPI_Comm comm) + + int MPI_Iallgather(const void *sendbuf, int sendcount, + MPI_Datatype sendtype, void *recvbuf, int recvcount, + MPI_Datatype recvtype, MPI_Comm comm, MPI_Request *request) + + int MPI_Allgather_init(const void *sendbuf, int sendcount, + MPI_Datatype sendtype, void *recvbuf, int recvcount, + MPI_Datatype recvtype, MPI_Comm comm, MPI_Info info, MPI_Request *request) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_ALLGATHER(SENDBUF, SENDCOUNT, SENDTYPE, RECVBUF, RECVCOUNT, + RECVTYPE, COMM, IERROR) + SENDBUF (*), RECVBUF (*) + INTEGER SENDCOUNT, SENDTYPE, RECVCOUNT, RECVTYPE, COMM, + INTEGER IERROR + + MPI_IALLGATHER(SENDBUF, SENDCOUNT, SENDTYPE, RECVBUF, RECVCOUNT, + RECVTYPE, COMM, REQUEST, IERROR) + SENDBUF(*), RECVBUF (*) + INTEGER SENDCOUNT, SENDTYPE, RECVCOUNT, RECVTYPE, COMM + INTEGER REQUEST, IERROR + + MPI_ALLGATHER_INIT(SENDBUF, SENDCOUNT, SENDTYPE, RECVBUF, RECVCOUNT, + RECVTYPE, COMM, INFO, REQUEST, IERROR) + SENDBUF(*), RECVBUF (*) + INTEGER SENDCOUNT, SENDTYPE, RECVCOUNT, RECVTYPE, COMM + INTEGER INFO, REQUEST, IERROR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Allgather(sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype, + comm, ierror) + TYPE(*), DIMENSION(..), INTENT(IN) :: sendbuf + TYPE(*), DIMENSION(..) :: recvbuf + INTEGER, INTENT(IN) :: sendcount, recvcount + TYPE(MPI_Datatype), INTENT(IN) :: sendtype, recvtype + TYPE(MPI_Comm), INTENT(IN) :: comm + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + MPI_Iallgather(sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype, + comm, request, ierror) + TYPE(*), DIMENSION(..), INTENT(IN), ASYNCHRONOUS :: sendbuf + TYPE(*), DIMENSION(..), ASYNCHRONOUS :: recvbuf + INTEGER, INTENT(IN) :: sendcount, recvcount + TYPE(MPI_Datatype), INTENT(IN) :: sendtype, recvtype + TYPE(MPI_Comm), INTENT(IN) :: comm + TYPE(MPI_Request), INTENT(OUT) :: request + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + MPI_Allgather_init(sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype, + comm, info, request, ierror) + TYPE(*), DIMENSION(..), INTENT(IN), ASYNCHRONOUS :: sendbuf + TYPE(*), DIMENSION(..), ASYNCHRONOUS :: recvbuf + INTEGER, INTENT(IN) :: sendcount, recvcount + TYPE(MPI_Datatype), INTENT(IN) :: sendtype, recvtype + TYPE(MPI_Comm), INTENT(IN) :: comm + TYPE(MPI_Info), INTENT(IN) :: info + TYPE(MPI_Request), INTENT(OUT) :: request + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETERS +---------------- +* ``sendbuf``: Starting address of send buffer (choice). +* ``sendcount``: Number of elements in send buffer (integer). +* ``sendtype``: Datatype of send buffer elements (handle). +* ``recvbuf``: Starting address of recv buffer (choice). +* ``recvcount``: Number of elements received from any process (integer). +* ``recvtype``: Datatype of receive buffer elements (handle). +* ``comm``: Communicator (handle). +* ``info``: Info (handle, persistent only). + +OUTPUT PARAMETERS +----------------- +* ``recvbuf``: Address of receive buffer (choice). +* ``request``: Request (handle, non-blocking only). +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +:ref:`MPI_Allgather` is similar to :ref:`MPI_Gather`, except that all processes +receive the result, instead of just the root. In other words, all +processes contribute to the result, and all processes receive the +result. + +The type signature associated with sendcount, sendtype at a process must +be equal to the type signature associated with recvcount, recvtype at +any other process. + +The outcome of a call to MPI_Allgather(...) is as if all processes +executed n calls to + +:: + + MPI_Gather(sendbuf,sendcount,sendtype,recvbuf,recvcount, + recvtype,root,comm), + +for root = 0 , ..., n-1. The rules for correct usage of :ref:`MPI_Allgather` +are easily found from the corresponding rules for :ref:`MPI_Gather`. + +**Example:** The all-gather version of Example 1 in :ref:`MPI_Gather`. Using +:ref:`MPI_Allgather`, we will gather 100 ints from every process in the group +to every process. + +:: + + MPI_Comm comm; + int gsize,sendarray[100]; + int *rbuf; + ... + MPI_Comm_size( comm, &gsize); + rbuf = (int *)malloc(gsize*100*sizeof(int)); + MPI_Allgather( sendarray, 100, MPI_INT, rbuf, 100, MPI_INT, comm); + +After the call, every process has the group-wide concatenation of the +sets of data. + + +USE OF IN-PLACE OPTION +---------------------- + +When the communicator is an intracommunicator, you can perform an +all-gather operation in-place (the output buffer is used as the input +buffer). Use the variable MPI_IN_PLACE as the value of *sendbuf*. In +this case, *sendcount* and *sendtype* are ignored. The input data of +each process is assumed to be in the area where that process would +receive its own contribution to the receive buffer. Specifically, the +outcome of a call to :ref:`MPI_Allgather` that used the in-place option is +identical to the case in which all processes executed *n* calls to + +:: + + MPI_ALLGATHER ( MPI_IN_PLACE, 0, MPI_DATATYPE_NULL, recvbuf, + recvcount, recvtype, root, comm ) + + for root =0, ... , n-1. + +Note that MPI_IN_PLACE is a special kind of value; it has the same +restrictions on its use as MPI_BOTTOM. + +Because the in-place option converts the receive buffer into a +send-and-receive buffer, a Fortran binding that includes INTENT must +mark these as INOUT, not OUT. + + +WHEN COMMUNICATOR IS AN INTER-COMMUNICATOR +------------------------------------------ + +When the communicator is an inter-communicator, the gather operation +occurs in two phases. The data is gathered from all the members of the +first group and received by all the members of the second group. Then +the data is gathered from all the members of the second group and +received by all the members of the first. The operation, however, need +not be symmetric. The number of items sent by the processes in first +group need not be equal to the number of items sent by the the processes +in the second group. You can move data in only one direction by giving +*sendcount* a value of 0 for communication in the reverse direction. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. + + +.. seealso:: + :ref:`MPI_Allgatherv` :ref:`MPI_Gather` diff --git a/docs/man-openmpi/man3/MPI_Allgather_init.3.rst b/docs/man-openmpi/man3/MPI_Allgather_init.3.rst new file mode 100644 index 00000000000..dbfc25c1b85 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Allgather_init.3.rst @@ -0,0 +1,9 @@ +.. _mpi_allgather_init: + +MPI_Allgather_init +================== + .. include_body + +.. include:: ../man3/MPI_Allgather.3.rst + :start-after: .. include_body + diff --git a/docs/man-openmpi/man3/MPI_Allgatherv.3.rst b/docs/man-openmpi/man3/MPI_Allgatherv.3.rst new file mode 100644 index 00000000000..a5c91932e78 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Allgatherv.3.rst @@ -0,0 +1,201 @@ +.. _mpi_allgatherv: + + +MPI_Allgatherv +============== + +.. include_body + +:ref:`MPI_Allgatherv`, :ref:`MPI_Iallgatherv`, :ref:`MPI_Allgatherv_init` - Gathers data +from all processes and delivers it to all. Each process may contribute a +different amount of data. + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Allgatherv(const void *sendbuf, int sendcount, + MPI_Datatype sendtype, void *recvbuf, const int recvcounts[], + const int displs[], MPI_Datatype recvtype, MPI_Comm comm) + + int MPI_Iallgatherv(const void *sendbuf, int sendcount, + MPI_Datatype sendtype, void *recvbuf, const int recvcounts[], + const int displs[], MPI_Datatype recvtype, MPI_Comm comm, + MPI_Request *request) + + int MPI_Allgatherv_init(const void *sendbuf, int sendcount, + MPI_Datatype sendtype, void *recvbuf, const int recvcounts[], + const int displs[], MPI_Datatype recvtype, MPI_Comm comm, + MPI_Info info, MPI_Request *request) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_ALLGATHERV(SENDBUF, SENDCOUNT, SENDTYPE, RECVBUF, + RECVCOUNT, DISPLS, RECVTYPE, COMM, IERROR) + SENDBUF(*), RECVBUF(*) + INTEGER SENDCOUNT, SENDTYPE, RECVCOUNT(*) + INTEGER DISPLS(*), RECVTYPE, COMM, IERROR + + MPI_IALLGATHERV(SENDBUF, SENDCOUNT, SENDTYPE, RECVBUF, + RECVCOUNT, DISPLS, RECVTYPE, COMM, REQUEST, IERROR) + SENDBUF(*), RECVBUF(*) + INTEGER SENDCOUNT, SENDTYPE, RECVCOUNT(*), + INTEGER DISPLS(*), RECVTYPE, COMM, REQUEST, IERROR + + MPI_ALLGATHERV_INIT(SENDBUF, SENDCOUNT, SENDTYPE, RECVBUF, + RECVCOUNT, DISPLS, RECVTYPE, COMM, INFO, REQUEST, IERROR) + SENDBUF(*), RECVBUF(*) + INTEGER SENDCOUNT, SENDTYPE, RECVCOUNT(*), + INTEGER DISPLS(*), RECVTYPE, COMM, INFO, REQUEST, IERROR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Allgatherv(sendbuf, sendcount, sendtype, recvbuf, recvcounts, displs, + recvtype, comm, ierror) + TYPE(*), DIMENSION(..), INTENT(IN) :: sendbuf + TYPE(*), DIMENSION(..) :: recvbuf + INTEGER, INTENT(IN) :: sendcount, recvcounts(*), displs(*) + TYPE(MPI_Datatype), INTENT(IN) :: sendtype, recvtype + TYPE(MPI_Comm), INTENT(IN) :: comm + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + MPI_Iallgatherv(sendbuf, sendcount, sendtype, recvbuf, recvcounts, displs, + recvtype, comm, request, ierror) + TYPE(*), DIMENSION(..), INTENT(IN), ASYNCHRONOUS :: sendbuf + TYPE(*), DIMENSION(..), ASYNCHRONOUS :: recvbuf + INTEGER, INTENT(IN) :: sendcount + INTEGER, INTENT(IN), ASYNCHRONOUS :: recvcounts(*), displs(*) + TYPE(MPI_Datatype), INTENT(IN) :: sendtype, recvtype + TYPE(MPI_Comm), INTENT(IN) :: comm + TYPE(MPI_Request), INTENT(OUT) :: request + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + MPI_Allgatherv_init(sendbuf, sendcount, sendtype, recvbuf, recvcounts, displs, + recvtype, comm, info, request, ierror) + TYPE(*), DIMENSION(..), INTENT(IN), ASYNCHRONOUS :: sendbuf + TYPE(*), DIMENSION(..), ASYNCHRONOUS :: recvbuf + INTEGER, INTENT(IN) :: sendcount + INTEGER, INTENT(IN), ASYNCHRONOUS :: recvcounts(*), displs(*) + TYPE(MPI_Datatype), INTENT(IN) :: sendtype, recvtype + TYPE(MPI_Comm), INTENT(IN) :: comm + TYPE(MPI_Info), INTENT(IN) :: info + TYPE(MPI_Request), INTENT(OUT) :: request + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETERS +---------------- +* ``sendbuf``: Starting address of send buffer (choice). +* ``sendcount``: Number of elements in send buffer (integer). +* ``sendtype``: Datatype of send buffer elements (handle). +* ``recvcount``: Integer array (of length group size) containing the number of elements that are received from each process. +* ``displs``: Integer array (of length group size). Entry i specifies the displacement (relative to recvbuf) at which to place the incoming data from process i. +* ``recvtype``: Datatype of receive buffer elements (handle). +* ``comm``: Communicator (handle). +* ``info``: Info (handle, persistent only). + +OUTPUT PARAMETERS +----------------- +* ``recvbuf``: Address of receive buffer (choice). +* ``request``: Request (handle, non-blocking only). +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +:ref:`MPI_Allgatherv` is similar to :ref:`MPI_Allgather` in that all processes gather +data from all other processes, except that each process can send a +different amount of data. The block of data sent from the jth process is +received by every process and placed in the jth block of the buffer +*recvbuf.* + +The type signature associated with sendcount, sendtype, at process j +must be equal to the type signature associated with recvcounts[j], +recvtype at any other process. + +The outcome is as if all processes executed calls to + +:: + + MPI_Allgatherv(sendbuf,sendcount,sendtype,recvbuf,recvcount, + displs,recvtype,root,comm) + +for root = 0 , ..., n-1. The rules for correct usage of :ref:`MPI_Allgatherv` +are easily found from the corresponding rules for :ref:`MPI_Gatherv`. + + +USE OF IN-PLACE OPTION +---------------------- + +When the communicator is an intracommunicator, you can perform an +all-gather operation in-place (the output buffer is used as the input +buffer). Use the variable MPI_IN_PLACE as the value of *sendbuf*. In +this case, *sendcount* and *sendtype* are ignored. The input data of +each process is assumed to be in the area where that process would +receive its own contribution to the receive buffer. Specifically, the +outcome of a call to :ref:`MPI_Allgather` that used the in-place option is +identical to the case in which all processes executed *n* calls to + +:: + + MPI_ALLGATHERV ( MPI_IN_PLACE, 0, MPI_DATATYPE_NULL, recvbuf, + recvcounts, displs, recvtype, root, comm ) + + for root =0, ... , n-1. + +Note that MPI_IN_PLACE is a special kind of value; it has the same +restrictions on its use as MPI_BOTTOM. + +Because the in-place option converts the receive buffer into a +send-and-receive buffer, a Fortran binding that includes INTENT must +mark these as INOUT, not OUT. + + +WHEN COMMUNICATOR IS AN INTER-COMMUNICATOR +------------------------------------------ + +When the communicator is an inter-communicator, the gather operation +occurs in two phases. The data is gathered from all the members of the +first group, concatenated, and received by all the members of the second +group. Then the data is gathered from all the members of the second +group, concatenated, and received by all the members of the first. The +send buffer arguments in the one group must be consistent with the +receive buffer arguments in the other group, and vice versa. The +operation must exhibit symmetric, full-duplex behavior. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. + + +.. seealso:: + :ref:`MPI_Gatherv` :ref:`MPI_Allgather` diff --git a/docs/man-openmpi/man3/MPI_Allgatherv_init.3.rst b/docs/man-openmpi/man3/MPI_Allgatherv_init.3.rst new file mode 100644 index 00000000000..b5fbd6ff40d --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Allgatherv_init.3.rst @@ -0,0 +1,9 @@ +.. _mpi_allgatherv_init: + +MPI_Allgatherv_init +=================== + .. include_body + +.. include:: ../man3/MPI_Allgatherv.3.rst + :start-after: .. include_body + diff --git a/docs/man-openmpi/man3/MPI_Alloc_mem.3.rst b/docs/man-openmpi/man3/MPI_Alloc_mem.3.rst new file mode 100644 index 00000000000..46cec80686c --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Alloc_mem.3.rst @@ -0,0 +1,127 @@ +.. _mpi_alloc_mem: + + +MPI_Alloc_mem +============= + +.. include_body + +:ref:`MPI_Alloc_mem` - Allocates a specified memory segment. + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Alloc_mem(MPI_Aint size, MPI_Info info, void *baseptr) + + +Fortran Syntax (see FORTRAN NOTES) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_ALLOC_MEM(SIZE, INFO, BASEPTR, IERROR) + INTEGER INFO, IERROR + INTEGER(KIND=MPI_ADDRESS_KIND) SIZE, BASEPTR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Alloc_mem(size, info, baseptr, ierror) + USE, INTRINSIC :: ISO_C_BINDING, ONLY + INTEGER(KIND=MPI_ADDRESS_KIND), INTENT(IN) :: size + TYPE(MPI_Info), INTENT(IN) :: info + TYPE(C_PTR), INTENT(OUT) :: baseptr + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETERS +---------------- +* ``size``: Size of memory segment in bytes (nonnegative integer). +* ``info``: Info argument (handle). + +OUTPUT PARAMETERS +----------------- +* ``baseptr``: Pointer to beginning of memory segment allocated. +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +:ref:`MPI_Alloc_mem` allocates *size* bytes of memory. The starting address of +this memory is returned in the variable *baseptr*. + + +C NOTES +------- + +The parameter *baseptr* is of type *void \** to allow passing any +pointer object for this parameter. The provided argument should be a +pointer to a pointer of arbitrary type (e.g., *void \*\**). + + +FORTRAN NOTES +------------- + +There is no portable FORTRAN 77 syntax for using :ref:`MPI_Alloc_mem`. There is +no portable Fortran syntax for using pointers returned from +:ref:`MPI_Alloc_mem`. However, :ref:`MPI_Alloc_mem` can be used with Sun Fortran +compilers. + +From FORTRAN 77, you can use the following non-standard declarations for +the SIZE and BASEPTR arguments: + +:: + + INCLUDE "mpif.h" + INTEGER*MPI_ADDRESS_KIND SIZE, BASEPTR + +From either FORTRAN 77 or Fortran 90, you can use "Cray pointers" for +the BASEPTR argument. Cray pointers are described further in the Fortran +User's Guide and are supported by many Fortran compilers. For example, + +.. code-block:: fortran + + INCLUDE "mpif.h" + REAL*4 A(100,100) + POINTER (BASEPTR, A) + INTEGER*MPI_ADDRESS_KIND SIZE + + SIZE = 4 * 100 * 100 + CALL MPI_ALLOC_MEM(SIZE,MPI_INFO_NULL,BASEPTR,IERR) + + ! use A + + CALL MPI_FREE_MEM(A, IERR) + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. + + +.. seealso:: + :ref:`MPI_Free_mem` diff --git a/docs/man-openmpi/man3/MPI_Allreduce.3.rst b/docs/man-openmpi/man3/MPI_Allreduce.3.rst new file mode 100644 index 00000000000..bae00545844 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Allreduce.3.rst @@ -0,0 +1,220 @@ +.. _mpi_allreduce: + + +MPI_Allreduce +============= + +.. include_body + +:ref:`MPI_Allreduce`, :ref:`MPI_Iallreduce`, :ref:`MPI_Allreduce_init` - Combines values +from all processes and distributes the result back to all processes. + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Allreduce(const void *sendbuf, void *recvbuf, int count, + MPI_Datatype datatype, MPI_Op op, MPI_Comm comm) + + int MPI_Iallreduce(const void *sendbuf, void *recvbuf, int count, + MPI_Datatype datatype, MPI_Op op, MPI_Comm comm, + MPI_Request *request) + + int MPI_Allreduce_init(const void *sendbuf, void *recvbuf, int count, + MPI_Datatype datatype, MPI_Op op, MPI_Comm comm, + MPI_Info info, MPI_Request *request) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_ALLREDUCE(SENDBUF, RECVBUF, COUNT, DATATYPE, OP, COMM, IERROR) + SENDBUF(*), RECVBUF(*) + INTEGER COUNT, DATATYPE, OP, COMM, IERROR + + MPI_IALLREDUCE(SENDBUF, RECVBUF, COUNT, DATATYPE, OP, COMM, REQUEST, IERROR) + SENDBUF(*), RECVBUF(*) + INTEGER COUNT, DATATYPE, OP, COMM, REQUEST, IERROR + + MPI_ALLREDUCE_INIT(SENDBUF, RECVBUF, COUNT, DATATYPE, OP, COMM, INFO, REQUEST, IERROR) + SENDBUF(*), RECVBUF(*) + INTEGER COUNT, DATATYPE, OP, COMM, INFO, REQUEST, IERROR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Allreduce(sendbuf, recvbuf, count, datatype, op, comm, ierror) + TYPE(*), DIMENSION(..), INTENT(IN) :: sendbuf + TYPE(*), DIMENSION(..) :: recvbuf + INTEGER, INTENT(IN) :: count + TYPE(MPI_Datatype), INTENT(IN) :: datatype + TYPE(MPI_Op), INTENT(IN) :: op + TYPE(MPI_Comm), INTENT(IN) :: comm + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + MPI_Iallreduce(sendbuf, recvbuf, count, datatype, op, comm, request, + ierror) + TYPE(*), DIMENSION(..), INTENT(IN), ASYNCHRONOUS :: sendbuf + TYPE(*), DIMENSION(..), ASYNCHRONOUS :: recvbuf + INTEGER, INTENT(IN) :: count + TYPE(MPI_Datatype), INTENT(IN) :: datatype + TYPE(MPI_Op), INTENT(IN) :: op + TYPE(MPI_Comm), INTENT(IN) :: comm + TYPE(MPI_Request), INTENT(OUT) :: request + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + MPI_Allreduce_init(sendbuf, recvbuf, count, datatype, op, comm, info, request, + ierror) + TYPE(*), DIMENSION(..), INTENT(IN), ASYNCHRONOUS :: sendbuf + TYPE(*), DIMENSION(..), ASYNCHRONOUS :: recvbuf + INTEGER, INTENT(IN) :: count + TYPE(MPI_Datatype), INTENT(IN) :: datatype + TYPE(MPI_Op), INTENT(IN) :: op + TYPE(MPI_Comm), INTENT(IN) :: comm + TYPE(MPI_Info), INTENT(IN) :: info + TYPE(MPI_Request), INTENT(OUT) :: request + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETERS +---------------- +* ``sendbuf``: Starting address of send buffer (choice). +* ``count``: Number of elements in send buffer (integer). +* ``datatype``: Datatype of elements of send buffer (handle). +* ``op``: Operation (handle). +* ``comm``: Communicator (handle). +* ``info``: Info (handle, persistent only). + +OUTPUT PARAMETERS +----------------- +* ``recvbuf``: Starting address of receive buffer (choice). +* ``request``: Request (handle, non-blocking only). +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +Same as :ref:`MPI_Reduce` except that the result appears in the receive buffer +of all the group members. + +**Example 1:** A routine that computes the product of a vector and an +array that are distributed across a group of processes and returns the +answer at all nodes (compare with Example 2, with :ref:`MPI_Reduce`, below). + +:: + + SUBROUTINE PAR_BLAS2(m, n, a, b, c, comm) + REAL a(m), b(m,n) ! local slice of array + REAL c(n) ! result + REAL sum(n) + INTEGER n, comm, i, j, ierr + + ! local sum + DO j= 1, n + sum(j) = 0.0 + DO i = 1, m + sum(j) = sum(j) + a(i)*b(i,j) + END DO + END DO + + ! global sum + CALL MPI_ALLREDUCE(sum, c, n, MPI_REAL, MPI_SUM, comm, ierr) + + ! return result at all nodes + RETURN + +**Example 2:** A routine that computes the product of a vector and an +array that are distributed across a group of processes and returns the +answer at node zero. + +:: + + SUBROUTINE PAR_BLAS2(m, n, a, b, c, comm) + REAL a(m), b(m,n) ! local slice of array + REAL c(n) ! result + REAL sum(n) + INTEGER n, comm, i, j, ierr + + ! local sum + DO j= 1, n + sum(j) = 0.0 + DO i = 1, m + sum(j) = sum(j) + a(i)*b(i,j) + END DO + END DO + + ! global sum + CALL MPI_REDUCE(sum, c, n, MPI_REAL, MPI_SUM, 0, comm, ierr) + + ! return result at node zero (and garbage at the other nodes) + RETURN + + +USE OF IN-PLACE OPTION +---------------------- + +When the communicator is an intracommunicator, you can perform an +all-reduce operation in-place (the output buffer is used as the input +buffer). Use the variable MPI_IN_PLACE as the value of *sendbuf* at all +processes. + +Note that MPI_IN_PLACE is a special kind of value; it has the same +restrictions on its use as MPI_BOTTOM. + +Because the in-place option converts the receive buffer into a +send-and-receive buffer, a Fortran binding that includes INTENT must +mark these as INOUT, not OUT. + + +WHEN COMMUNICATOR IS AN INTER-COMMUNICATOR +------------------------------------------ + +When the communicator is an inter-communicator, the reduce operation +occurs in two phases. The data is reduced from all the members of the +first group and received by all the members of the second group. Then +the data is reduced from all the members of the second group and +received by all the members of the first. The operation exhibits a +symmetric, full-duplex behavior. + +When the communicator is an intra-communicator, these groups are the +same, and the operation occurs in a single phase. + + +NOTES ON COLLECTIVE OPERATIONS +------------------------------ + +The reduction functions ( MPI_Op ) do not return an error value. As a +result, if the functions detect an error, all they can do is either call +:ref:`MPI_Abort` or silently skip the problem. Thus, if you change the error +handler from MPI_ERRORS_ARE_FATAL to something else, for example, +MPI_ERRORS_RETURN , then no error may be indicated. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. diff --git a/docs/man-openmpi/man3/MPI_Allreduce_init.3.rst b/docs/man-openmpi/man3/MPI_Allreduce_init.3.rst new file mode 100644 index 00000000000..752100d6c60 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Allreduce_init.3.rst @@ -0,0 +1,9 @@ +.. _mpi_allreduce_init: + +MPI_Allreduce_init +================== + .. include_body + +.. include:: ../man3/MPI_Allreduce.3.rst + :start-after: .. include_body + diff --git a/docs/man-openmpi/man3/MPI_Alltoall.3.rst b/docs/man-openmpi/man3/MPI_Alltoall.3.rst new file mode 100644 index 00000000000..bbaa119af26 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Alltoall.3.rst @@ -0,0 +1,208 @@ +.. _mpi_alltoall: + + +MPI_Alltoall +============ + +.. include_body + +:ref:`MPI_Alltoall`, :ref:`MPI_Ialltoall`, :ref:`MPI_Alltoall_init` - All processes send +data to all processes + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Alltoall(const void *sendbuf, int sendcount, + MPI_Datatype sendtype, void *recvbuf, int recvcount, + MPI_Datatype recvtype, MPI_Comm comm) + + int MPI_Ialltoall(const void *sendbuf, int sendcount, + MPI_Datatype sendtype, void *recvbuf, int recvcount, + MPI_Datatype recvtype, MPI_Comm comm, MPI_Request *request) + + int MPI_Alltoall_init(const void *sendbuf, int sendcount, + MPI_Datatype sendtype, void *recvbuf, int recvcount, + MPI_Datatype recvtype, MPI_Comm comm, MPI_Info info, MPI_Request *request) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_ALLTOALL(SENDBUF, SENDCOUNT, SENDTYPE, RECVBUF, RECVCOUNT, + RECVTYPE, COMM, IERROR) + + SENDBUF(*), RECVBUF(*) + INTEGER SENDCOUNT, SENDTYPE, RECVCOUNT, RECVTYPE + INTEGER COMM, IERROR + + MPI_IALLTOALL(SENDBUF, SENDCOUNT, SENDTYPE, RECVBUF, RECVCOUNT, + RECVTYPE, COMM, REQUEST, IERROR) + + SENDBUF(*), RECVBUF(*) + INTEGER SENDCOUNT, SENDTYPE, RECVCOUNT, RECVTYPE + INTEGER COMM, REQUEST, IERROR + + MPI_ALLTOALL_INIT(SENDBUF, SENDCOUNT, SENDTYPE, RECVBUF, RECVCOUNT, + RECVTYPE, COMM, INFO, REQUEST, IERROR) + + SENDBUF(*), RECVBUF(*) + INTEGER SENDCOUNT, SENDTYPE, RECVCOUNT, RECVTYPE + INTEGER COMM, INFO, REQUEST, IERROR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Alltoall(sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype, + comm, ierror) + + TYPE(*), DIMENSION(..), INTENT(IN) :: sendbuf + TYPE(*), DIMENSION(..) :: recvbuf + INTEGER, INTENT(IN) :: sendcount, recvcount + TYPE(MPI_Datatype), INTENT(IN) :: sendtype, recvtype + TYPE(MPI_Comm), INTENT(IN) :: comm + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + MPI_Ialltoall(sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype, + comm, request, ierror) + + TYPE(*), DIMENSION(..), INTENT(IN), ASYNCHRONOUS :: sendbuf + TYPE(*), DIMENSION(..), ASYNCHRONOUS :: recvbuf + INTEGER, INTENT(IN) :: sendcount, recvcount + TYPE(MPI_Datatype), INTENT(IN) :: sendtype, recvtype + TYPE(MPI_Comm), INTENT(IN) :: comm + TYPE(MPI_Request), INTENT(OUT) :: request + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + MPI_Alltoall_init(sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype, + comm, info, request, ierror) + + TYPE(*), DIMENSION(..), INTENT(IN), ASYNCHRONOUS :: sendbuf + TYPE(*), DIMENSION(..), ASYNCHRONOUS :: recvbuf + INTEGER, INTENT(IN) :: sendcount, recvcount + TYPE(MPI_Datatype), INTENT(IN) :: sendtype, recvtype + TYPE(MPI_Comm), INTENT(IN) :: comm + TYPE(MPI_Info), INTENT(IN) :: info + TYPE(MPI_Request), INTENT(OUT) :: request + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETERS +---------------- +* ``sendbuf``: Starting address of send buffer (choice). +* ``sendcount``: Number of elements to send to each process (integer). +* ``sendtype``: Datatype of send buffer elements (handle). +* ``recvcount``: Number of elements to receive from each process (integer). +* ``recvtype``: Datatype of receive buffer elements (handle). +* ``comm``: Communicator over which data is to be exchanged (handle). +* ``info``: Info (handle, persistent only) + +OUTPUT PARAMETERS +----------------- +* ``recvbuf``: Starting address of receive buffer (choice). +* ``request``: Request (handle, non-blocking only). +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +:ref:`MPI_Alltoall` is a collective operation in which all processes send the +same amount of data to each other, and receive the same amount of data +from each other. The operation of this routine can be represented as +follows, where each process performs 2n (n being the number of processes +in communicator *comm*) independent point-to-point communications +(including communication with itself). + +:: + + MPI_Comm_size(comm, &n); + for (i = 0, i < n; i++) + MPI_Send(sendbuf + i * sendcount * extent(sendtype), + sendcount, sendtype, i, ..., comm); + for (i = 0, i < n; i++) + MPI_Recv(recvbuf + i * recvcount * extent(recvtype), + recvcount, recvtype, i, ..., comm); + +Each process breaks up its local *sendbuf* into n blocks - each +containing *sendcount* elements of type *sendtype* - and divides its +*recvbuf* similarly according to *recvcount* and *recvtype*. Process j +sends the k-th block of its local *sendbuf* to process k, which places +the data in the j-th block of its local *recvbuf*. The amount of data +sent must be equal to the amount of data received, pairwise, between +every pair of processes. + +WHEN COMMUNICATOR IS AN INTER-COMMUNICATOR + +When the communicator is an inter-communicator, the gather operation +occurs in two phases. The data is gathered from all the members of the +first group and received by all the members of the second group. Then +the data is gathered from all the members of the second group and +received by all the members of the first. The operation exhibits a +symmetric, full-duplex behavior. + +The first group defines the root process. The root process uses MPI_ROOT +as the value of *root*. All other processes in the first group use +MPI_PROC_NULL as the value of *root*. All processes in the second group +use the rank of the root process in the first group as the value of +*root*. + +When the communicator is an intra-communicator, these groups are the +same, and the operation occurs in a single phase. + + +USE OF IN-PLACE OPTION +---------------------- + +When the communicator is an intracommunicator, you can perform an +all-to-all operation in-place (the output buffer is used as the input +buffer). Use the variable MPI_IN_PLACE as the value of *sendbuf*. In +this case, *sendcount* and *sendtype* are ignored. The input data of +each process is assumed to be in the area where that process would +receive its own contribution to the receive buffer. + + +NOTES +----- + +All arguments on all processes are significant. The *comm* argument, in +particular, must describe the same communicator on all processes. + +There are two MPI library functions that are more general than +:ref:`MPI_Alltoall`. :ref:`MPI_Alltoallv` allows all-to-all communication to and from +buffers that need not be contiguous; different processes may send and +receive different amounts of data. :ref:`MPI_Alltoallw` expands :ref:`MPI_Alltoallv`'s +functionality to allow the exchange of data with different datatypes. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. + + +.. seealso:: + :ref:`MPI_Alltoallv` :ref:`MPI_Alltoallw` diff --git a/docs/man-openmpi/man3/MPI_Alltoall_init.3.rst b/docs/man-openmpi/man3/MPI_Alltoall_init.3.rst new file mode 100644 index 00000000000..7feb6da0e21 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Alltoall_init.3.rst @@ -0,0 +1,9 @@ +.. _mpi_alltoall_init: + +MPI_Alltoall_init +================= + .. include_body + +.. include:: ../man3/MPI_Alltoall.3.rst + :start-after: .. include_body + diff --git a/docs/man-openmpi/man3/MPI_Alltoallv.3.rst b/docs/man-openmpi/man3/MPI_Alltoallv.3.rst new file mode 100644 index 00000000000..e2d83720428 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Alltoallv.3.rst @@ -0,0 +1,228 @@ +.. _mpi_alltoallv: + + +MPI_Alltoallv +============= + +.. include_body + +:ref:`MPI_Alltoallv`, :ref:`MPI_Ialltoallv`, :ref:`MPI_Alltoallv_init` - All processes +send different amount of data to, and receive different amount of data +from, all processes + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Alltoallv(const void *sendbuf, const int sendcounts[], + const int sdispls[], MPI_Datatype sendtype, + void *recvbuf, const int recvcounts[], + const int rdispls[], MPI_Datatype recvtype, MPI_Comm comm) + + int MPI_Ialltoallv(const void *sendbuf, const int sendcounts[], + const int sdispls[], MPI_Datatype sendtype, + void *recvbuf, const int recvcounts[], + const int rdispls[], MPI_Datatype recvtype, MPI_Comm comm, + MPI_Request *request) + + int MPI_Alltoallv_init(const void *sendbuf, const int sendcounts[], + const int sdispls[], MPI_Datatype sendtype, + void *recvbuf, const int recvcounts[], + const int rdispls[], MPI_Datatype recvtype, MPI_Comm comm, + MPI_Info info, MPI_Request *request) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_ALLTOALLV(SENDBUF, SENDCOUNTS, SDISPLS, SENDTYPE, + RECVBUF, RECVCOUNTS, RDISPLS, RECVTYPE, COMM, IERROR) + + SENDBUF(*), RECVBUF(*) + INTEGER SENDCOUNTS(*), SDISPLS(*), SENDTYPE + INTEGER RECVCOUNTS(*), RDISPLS(*), RECVTYPE + INTEGER COMM, IERROR + + MPI_IALLTOALLV(SENDBUF, SENDCOUNTS, SDISPLS, SENDTYPE, + RECVBUF, RECVCOUNTS, RDISPLS, RECVTYPE, COMM, REQUEST, IERROR) + + SENDBUF(*), RECVBUF(*) + INTEGER SENDCOUNTS(*), SDISPLS(*), SENDTYPE + INTEGER RECVCOUNTS(*), RDISPLS(*), RECVTYPE + INTEGER COMM, REQUEST, IERROR + + MPI_ALLTOALLV_INIT(SENDBUF, SENDCOUNTS, SDISPLS, SENDTYPE, + RECVBUF, RECVCOUNTS, RDISPLS, RECVTYPE, COMM, INFO, REQUEST, IERROR) + + SENDBUF(*), RECVBUF(*) + INTEGER SENDCOUNTS(*), SDISPLS(*), SENDTYPE + INTEGER RECVCOUNTS(*), RDISPLS(*), RECVTYPE + INTEGER COMM, INFO, REQUEST, IERROR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Alltoallv(sendbuf, sendcounts, sdispls, sendtype, recvbuf, recvcounts, + rdispls, recvtype, comm, ierror) + + TYPE(*), DIMENSION(..), INTENT(IN) :: sendbuf + TYPE(*), DIMENSION(..) :: recvbuf + INTEGER, INTENT(IN) :: sendcounts(*), sdispls(*), recvcounts(*), + rdispls(*) + TYPE(MPI_Datatype), INTENT(IN) :: sendtype, recvtype + TYPE(MPI_Comm), INTENT(IN) :: comm + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + MPI_Ialltoallv(sendbuf, sendcounts, sdispls, sendtype, recvbuf, recvcounts, + rdispls, recvtype, comm, request, ierror) + + TYPE(*), DIMENSION(..), INTENT(IN), ASYNCHRONOUS :: sendbuf + TYPE(*), DIMENSION(..), ASYNCHRONOUS :: recvbuf + INTEGER, INTENT(IN), ASYNCHRONOUS :: sendcounts(*), sdispls(*), + recvcounts(*), rdispls(*) + TYPE(MPI_Datatype), INTENT(IN) :: sendtype, recvtype + TYPE(MPI_Comm), INTENT(IN) :: comm + TYPE(MPI_Request), INTENT(OUT) :: request + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + MPI_Alltoallv_init(sendbuf, sendcounts, sdispls, sendtype, recvbuf, recvcounts, + rdispls, recvtype, comm, info, request, ierror) + + TYPE(*), DIMENSION(..), INTENT(IN), ASYNCHRONOUS :: sendbuf + TYPE(*), DIMENSION(..), ASYNCHRONOUS :: recvbuf + INTEGER, INTENT(IN), ASYNCHRONOUS :: sendcounts(*), sdispls(*), + recvcounts(*), rdispls(*) + TYPE(MPI_Datatype), INTENT(IN) :: sendtype, recvtype + TYPE(MPI_Comm), INTENT(IN) :: comm + TYPE(MPI_Info), INTENT(IN) :: info + TYPE(MPI_Request), INTENT(OUT) :: request + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETERS +---------------- +* ``sendbuf``: Starting address of send buffer. +* ``sendcounts``: Integer array, where entry i specifies the number of elements to send to rank i. +* ``sdispls``: Integer array, where entry i specifies the displacement (offset from *sendbuf*, in units of *sendtype*) from which to send data to rank i. +* ``sendtype``: Datatype of send buffer elements. +* ``recvcounts``: Integer array, where entry j specifies the number of elements to receive from rank j. +* ``rdispls``: Integer array, where entry j specifies the displacement (offset from *recvbuf*, in units of *recvtype*) to which data from rank j should be written. +* ``recvtype``: Datatype of receive buffer elements. +* ``comm``: Communicator over which data is to be exchanged. +* ``info``: Info (handle, persistent only) + +OUTPUT PARAMETERS +----------------- +* ``recvbuf``: Address of receive buffer. +* ``request``: Request (handle, non-blocking only). +* ``IERROR``: Fortran only: Error status. + +DESCRIPTION +----------- + +:ref:`MPI_Alltoallv` is a generalized collective operation in which all +processes send data to and receive data from all other processes. It +adds flexibility to :ref:`MPI_Alltoall` by allowing the user to specify data to +send and receive vector-style (via a displacement and element count). +The operation of this routine can be thought of as follows, where each +process performs 2n (n being the number of processes in communicator +*comm*) independent point-to-point communications (including +communication with itself). + +:: + + MPI_Comm_size(comm, &n); + for (i = 0, i < n; i++) + MPI_Send(sendbuf + sdispls[i] * extent(sendtype), + sendcounts[i], sendtype, i, ..., comm); + for (i = 0, i < n; i++) + MPI_Recv(recvbuf + rdispls[i] * extent(recvtype), + recvcounts[i], recvtype, i, ..., comm); + +Process j sends the k-th block of its local *sendbuf* to process k, +which places the data in the j-th block of its local *recvbuf*. + +When a pair of processes exchanges data, each may pass different element +count and datatype arguments so long as the sender specifies the same +amount of data to send (in bytes) as the receiver expects to receive. + +Note that process i may send a different amount of data to process j +than it receives from process j. Also, a process may send entirely +different amounts of data to different processes in the communicator. + +WHEN COMMUNICATOR IS AN INTER-COMMUNICATOR + +When the communicator is an inter-communicator, the gather operation +occurs in two phases. The data is gathered from all the members of the +first group and received by all the members of the second group. Then +the data is gathered from all the members of the second group and +received by all the members of the first. The operation exhibits a +symmetric, full-duplex behavior. + +The first group defines the root process. The root process uses MPI_ROOT +as the value of *root*. All other processes in the first group use +MPI_PROC_NULL as the value of *root*. All processes in the second group +use the rank of the root process in the first group as the value of +*root*. + +When the communicator is an intra-communicator, these groups are the +same, and the operation occurs in a single phase. + + +USE OF IN-PLACE OPTION +---------------------- + +When the communicator is an intracommunicator, you can perform an +all-to-all operation in-place (the output buffer is used as the input +buffer). Use the variable MPI_IN_PLACE as the value of *sendbuf*. In +this case, *sendcounts*, *sdispls*, and *sendtype* are ignored. The +input data of each process is assumed to be in the area where that +process would receive its own contribution to the receive buffer. + + +NOTES +----- + +The specification of counts and displacements should not cause any +location to be written more than once. + +All arguments on all processes are significant. The *comm* argument, in +particular, must describe the same communicator on all processes. + +The offsets of *sdispls* and *rdispls* are measured in units of +*sendtype* and *recvtype*, respectively. Compare this to :ref:`MPI_Alltoallw`, +where these offsets are measured in bytes. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. + + +.. seealso:: + :ref:`MPI_Alltoall` :ref:`MPI_Alltoallw` diff --git a/docs/man-openmpi/man3/MPI_Alltoallv_init.3.rst b/docs/man-openmpi/man3/MPI_Alltoallv_init.3.rst new file mode 100644 index 00000000000..3c4bdd8690c --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Alltoallv_init.3.rst @@ -0,0 +1,9 @@ +.. _mpi_alltoallv_init: + +MPI_Alltoallv_init +================== + .. include_body + +.. include:: ../man3/MPI_Alltoallv.3.rst + :start-after: .. include_body + diff --git a/docs/man-openmpi/man3/MPI_Alltoallw.3.rst b/docs/man-openmpi/man3/MPI_Alltoallw.3.rst new file mode 100644 index 00000000000..86b585e17f3 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Alltoallw.3.rst @@ -0,0 +1,232 @@ +.. _mpi_alltoallw: + + +MPI_Alltoallw +============= + +.. include_body + +:ref:`MPI_Alltoallw`, :ref:`MPI_Ialltoallw`, :ref:`MPI_Alltoallw_init` - All processes +send data of different types to, and receive data of different types +from, all processes + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Alltoallw(const void *sendbuf, const int sendcounts[], + const int sdispls[], const MPI_Datatype sendtypes[], + void *recvbuf, const int recvcounts[], const int rdispls[], + const MPI_Datatype recvtypes[], MPI_Comm comm) + + int MPI_Ialltoallw(const void *sendbuf, const int sendcounts[], + const int sdispls[], const MPI_Datatype sendtypes[], + void *recvbuf, const int recvcounts[], const int rdispls[], + const MPI_Datatype recvtypes[], MPI_Comm comm, + MPI_Request *request) + + int MPI_Alltoallw_init(const void *sendbuf, const int sendcounts[], + const int sdispls[], const MPI_Datatype sendtypes[], + void *recvbuf, const int recvcounts[], const int rdispls[], + const MPI_Datatype recvtypes[], MPI_Comm comm, MPI_Info info, + MPI_Request *request) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_ALLTOALLW(SENDBUF, SENDCOUNTS, SDISPLS, SENDTYPES, + RECVBUF, RECVCOUNTS, RDISPLS, RECVTYPES, COMM, IERROR) + + SENDBUF(*), RECVBUF(*) + INTEGER SENDCOUNTS(*), SDISPLS(*), SENDTYPES(*) + INTEGER RECVCOUNTS(*), RDISPLS(*), RECVTYPES(*) + INTEGER COMM, IERROR + + MPI_IALLTOALLW(SENDBUF, SENDCOUNTS, SDISPLS, SENDTYPES, + RECVBUF, RECVCOUNTS, RDISPLS, RECVTYPES, COMM, REQUEST, IERROR) + + SENDBUF(*), RECVBUF(*) + INTEGER SENDCOUNTS(*), SDISPLS(*), SENDTYPES(*) + INTEGER RECVCOUNTS(*), RDISPLS(*), RECVTYPES(*) + INTEGER COMM, REQUEST, IERROR + + MPI_ALLTOALLW_INIT(SENDBUF, SENDCOUNTS, SDISPLS, SENDTYPES, + RECVBUF, RECVCOUNTS, RDISPLS, RECVTYPES, COMM, INFO, REQUEST, IERROR) + + SENDBUF(*), RECVBUF(*) + INTEGER SENDCOUNTS(*), SDISPLS(*), SENDTYPES(*) + INTEGER RECVCOUNTS(*), RDISPLS(*), RECVTYPES(*) + INTEGER COMM, INFO, REQUEST, IERROR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Alltoallw(sendbuf, sendcounts, sdispls, sendtypes, recvbuf, recvcounts, + rdispls, recvtypes, comm, ierror) + + TYPE(*), DIMENSION(..), INTENT(IN) :: sendbuf + TYPE(*), DIMENSION(..) :: recvbuf + INTEGER, INTENT(IN) :: sendcounts(*), sdispls(*), recvcounts(*), + rdispls(*) + TYPE(MPI_Datatype), INTENT(IN) :: sendtypes(*) + TYPE(MPI_Datatype), INTENT(IN) :: recvtypes(*) + TYPE(MPI_Comm), INTENT(IN) :: comm + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + MPI_Ialltoallw(sendbuf, sendcounts, sdispls, sendtypes, recvbuf, + recvcounts, rdispls, recvtypes, comm, request, ierror) + + TYPE(*), DIMENSION(..), INTENT(IN), ASYNCHRONOUS :: sendbuf + TYPE(*), DIMENSION(..), ASYNCHRONOUS :: recvbuf + INTEGER, INTENT(IN), ASYNCHRONOUS :: sendcounts(*), sdispls(*), + recvcounts(*), rdispls(*) + TYPE(MPI_Datatype), INTENT(IN), ASYNCHRONOUS :: sendtypes(*), + recvtypes(*) + TYPE(MPI_Comm), INTENT(IN) :: comm + TYPE(MPI_Request), INTENT(OUT) :: request + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + MPI_Alltoallw_init(sendbuf, sendcounts, sdispls, sendtypes, recvbuf, + recvcounts, rdispls, recvtypes, comm, fIinfo, request, ierror) + + TYPE(*), DIMENSION(..), INTENT(IN), ASYNCHRONOUS :: sendbuf + TYPE(*), DIMENSION(..), ASYNCHRONOUS :: recvbuf + INTEGER, INTENT(IN), ASYNCHRONOUS :: sendcounts(*), sdispls(*), + recvcounts(*), rdispls(*) + TYPE(MPI_Datatype), INTENT(IN), ASYNCHRONOUS :: sendtypes(*), + recvtypes(*) + TYPE(MPI_Comm), INTENT(IN) :: comm + TYPE(MPI_Info), INTENT(IN) :: info + TYPE(MPI_Request), INTENT(OUT) :: request + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETERS +---------------- +* ``sendbuf``: Starting address of send buffer. +* ``sendcounts``: Integer array, where entry i specifies the number of elements to send to rank i. +* ``sdispls``: Integer array, where entry i specifies the displacement (in bytes, offset from *sendbuf) from which to send data to rank i.* +* ``sendtypes``: Datatype array, where entry i specifies the datatype to use when sending data to rank i. +* ``recvcounts``: Integer array, where entry j specifies the number of elements to receive from rank j. +* ``rdispls``: Integer array, where entry j specifies the displacement (in bytes, offset from *recvbuf) to which data from rank j should* be written. +* ``recvtypes``: Datatype array, where entry j specifies the datatype to use when receiving data from rank j. +* ``comm``: Communicator over which data is to be exchanged. +* ``info``: Info (handle, persistent only) + +OUTPUT PARAMETERS +----------------- +* ``recvbuf``: Address of receive buffer. +* ``request``: Request (handle, non-blocking only). +* ``IERROR``: Fortran only: Error status. + +DESCRIPTION +----------- + +:ref:`MPI_Alltoallw` is a generalized collective operation in which all +processes send data to and receive data from all other processes. It +adds flexibility to :ref:`MPI_Alltoallv` by allowing the user to specify the +datatype of individual data blocks (in addition to displacement and +element count). Its operation can be thought of in the following way, +where each process performs 2n (n being the number of processes in +communicator *comm) independent point-to-point communications* +(including communication with itself). + +:: + + MPI_Comm_size(comm, &n); + for (i = 0, i < n; i++) + MPI_Send(sendbuf + sdispls[i], sendcounts[i], + sendtypes[i], i, ..., comm); + for (i = 0, i < n; i++) + MPI_Recv(recvbuf + rdispls[i], recvcounts[i], + recvtypes[i], i, ..., comm); + +Process j sends the k-th block of its local *sendbuf to process* k, +which places the data in the j-th block of its local *recvbuf.* + +When a pair of processes exchanges data, each may pass different element +count and datatype arguments so long as the sender specifies the same +amount of data to send (in bytes) as the receiver expects to receive. + +Note that process i may send a different amount of data to process j +than it receives from process j. Also, a process may send entirely +different amounts and types of data to different processes in the +communicator. + +WHEN COMMUNICATOR IS AN INTER-COMMUNICATOR + +When the communicator is an inter-communicator, the gather operation +occurs in two phases. The data is gathered from all the members of the +first group and received by all the members of the second group. Then +the data is gathered from all the members of the second group and +received by all the members of the first. The operation exhibits a +symmetric, full-duplex behavior. + +The first group defines the root process. The root process uses MPI_ROOT +as the value of *root*. All other processes in the first group use +MPI_PROC_NULL as the value of *root*. All processes in the second group +use the rank of the root process in the first group as the value of +*root*. + +When the communicator is an intra-communicator, these groups are the +same, and the operation occurs in a single phase. + + +USE OF IN-PLACE OPTION +---------------------- + +When the communicator is an intracommunicator, you can perform an +all-to-all operation in-place (the output buffer is used as the input +buffer). Use the variable MPI_IN_PLACE as the value of *sendbuf*. In +this case, *sendcounts*, *sdispls*, and *sendtypes* are ignored. The +input data of each process is assumed to be in the area where that +process would receive its own contribution to the receive buffer. + + +NOTES +----- + +The specification of counts, types, and displacements should not cause +any location to be written more than once. + +All arguments on all processes are significant. The *comm* argument, in +particular, must describe the same communicator on all processes. + +The offsets of *sdispls* and *rdispls* are measured in bytes. Compare +this to :ref:`MPI_Alltoallv`, where these offsets are measured in units of +*sendtype* and *recvtype*, respectively. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. + + +.. seealso:: + :ref:`MPI_Alltoall` :ref:`MPI_Alltoallv` diff --git a/docs/man-openmpi/man3/MPI_Alltoallw_init.3.rst b/docs/man-openmpi/man3/MPI_Alltoallw_init.3.rst new file mode 100644 index 00000000000..1c67a0c672c --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Alltoallw_init.3.rst @@ -0,0 +1,9 @@ +.. _mpi_alltoallw_init: + +MPI_Alltoallw_init +================== + .. include_body + +.. include:: ../man3/MPI_Alltoallw.3.rst + :start-after: .. include_body + diff --git a/docs/man-openmpi/man3/MPI_Attr_delete.3.rst b/docs/man-openmpi/man3/MPI_Attr_delete.3.rst new file mode 100644 index 00000000000..3ebe031d233 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Attr_delete.3.rst @@ -0,0 +1,90 @@ +.. _mpi_attr_delete: + + +MPI_Attr_delete +=============== + +.. include_body + +:ref:`MPI_Attr_delete` - Deletes attribute value associated with a key -- +use of this routine is deprecated. + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Attr_delete(MPI_Comm comm, int keyval) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + INCLUDE 'mpif.h' + MPI_ATTR_DELETE(COMM, KEYVAL, IERROR) + INTEGER COMM, KEYVAL, IERROR + + +INPUT PARAMETERS +---------------- +* ``comm``: Communicator to which attribute is attached (handle). +* ``keyval``: The key value of the deleted attribute (integer). + +OUTPUT PARAMETER +---------------- +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +Note that use of this routine is *deprecated* as of MPI-2, and was +*deleted* in MPI-3. Please use :ref:`MPI_Comm_delete_attr`. This function does +not have a mpi_f08 binding. + +Delete attribute from cache by key. This function invokes the attribute +delete function delete_fn specified when the keyval was created. The +call will fail if the delete_fn function returns an error code other +than MPI_SUCCESS. + +Whenever a communicator is replicated using the function :ref:`MPI_Comm_dup`, +all callback copy functions for attributes that are currently set are +invoked (in arbitrary order). Whenever a communicator is deleted using +the function :ref:`MPI_Comm_free`, all callback delete functions for attributes +that are currently set are invoked. + + +NOTES +----- + +Note that it is not defined by the MPI standard what happens if the +delete_fn callback invokes other MPI functions. In Open MPI, it is not +valid for delete_fn callbacks (or any of their children) to add or +delete attributes on the same object on which the delete_fn callback is +being invoked. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. + + +.. seealso:: + :ref:`MPI_Comm_delete_attr` diff --git a/docs/man-openmpi/man3/MPI_Attr_get.3.rst b/docs/man-openmpi/man3/MPI_Attr_get.3.rst new file mode 100644 index 00000000000..8e345e52b24 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Attr_get.3.rst @@ -0,0 +1,79 @@ +.. _mpi_attr_get: + + +MPI_Attr_get +============ + +.. include_body + +:ref:`MPI_Attr_get` - Retrieves attribute value by key -- use of this +routine is deprecated. + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Attr_get(MPI_Comm comm, int keyval,void *attribute_val, + int *flag ) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + INCLUDE 'mpif.h' + MPI_ATTR_GET(COMM, KEYVAL, ATTRIBUTE_VAL, FLAG, IERROR) + INTEGER COMM, KEYVAL, ATTRIBUTE_VAL, IERROR + LOGICAL FLAG + + +INPUT PARAMETERS +---------------- +* ``comm``: Communicator to which attribute is attached (handle). +* ``keyval``: Key value (integer). + +OUTPUT PARAMETERS +----------------- +* ``attribute_val``: Attribute value, unless flag = false. +* ``flag``: True if an attribute value was extracted; false if no attribute is associated with the key. +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +Note that use of this routine is *deprecated* as of MPI-2, and was +*deleted* in MPI-3. Please use :ref:`MPI_Comm_get_attr`. This function does not +have a mpi_f08 binding. + +Retrieves attribute value by key. The call is erroneous if there is no +key with value keyval. On the other hand, the call is correct if the key +value exists, but no attribute is attached on comm for that key; in such +case, the call returns flag = false. In particular MPI_KEYVAL_INVALID is +an erroneous key value. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. + + +.. seealso:: + :ref:`MPI_Comm_get_attr` diff --git a/docs/man-openmpi/man3/MPI_Attr_put.3.rst b/docs/man-openmpi/man3/MPI_Attr_put.3.rst new file mode 100644 index 00000000000..811f1d28e74 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Attr_put.3.rst @@ -0,0 +1,95 @@ +.. _mpi_attr_put: + + +MPI_Attr_put +============ + +.. include_body + +:ref:`MPI_Attr_put` - Stores attribute value associated with a key -- use +of this routine is deprecated. + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Attr_put(MPI_Comm comm, int keyval, void *attribute_val) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + INCLUDE 'mpif.h' + MPI_ATTR_PUT(COMM, KEYVAL, ATTRIBUTE_VAL, IERROR) + INTEGER COMM, KEYVAL, ATTRIBUTE_VAL, IERROR + + +INPUT PARAMETERS +---------------- +* ``comm``: Communicator to which attribute will be attached (handle). +* ``keyval``: Key value, as returned by MPI_KEYVAL_CREATE (integer). +* ``attribute_val``: Attribute value. + +OUTPUT PARAMETER +---------------- +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +Note that use of this routine is *deprecated as of MPI-2, and* was +*deleted in MPI-3. Please use :ref:`MPI_Comm_set_attr`. This* function does not +have a mpi_f08 binding. + +:ref:`MPI_Attr_put` stores the stipulated attribute value attribute_val for +subsequent retrieval by :ref:`MPI_Attr_get`. If the value is already present, +then the outcome is as if :ref:`MPI_Attr_delete` was first called to delete the +previous value (and the callback function delete_fn was executed), and a +new value was next stored. The call is erroneous if there is no key with +value keyval; in particular MPI_KEYVAL_INVALID is an erroneous key +value. The call will fail if the delete_fn function returned an error +code other than MPI_SUCCESS. + + +NOTES +----- + +Values of the permanent attributes MPI_TAG_UB, MPI_HOST, MPI_IO, and +MPI_WTIME_IS_GLOBAL may not be changed. + +The type of the attribute value depends on whether C or Fortran is being +used. In C, an attribute value is a pointer (void \*); in Fortran, it is +a single integer (not a pointer, since Fortran has no pointers and there +are systems for which a pointer does not fit in an integer, e.g., any +32-bit address system that uses 64 bits for Fortran DOUBLE PRECISION). + +If an attribute is already present, the delete function (specified when +the corresponding keyval was created) will be called. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. + + +.. seealso:: + :ref:`MPI_Comm_set_attr` diff --git a/docs/man-openmpi/man3/MPI_Barrier.3.rst b/docs/man-openmpi/man3/MPI_Barrier.3.rst new file mode 100644 index 00000000000..10f48de1f9a --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Barrier.3.rst @@ -0,0 +1,97 @@ +.. _mpi_barrier: + +MPI_Barrier +=========== + +.. include_body + +:ref:`MPI_Barrier`, :ref:`MPI_Ibarrier` - Synchronization between MPI processes in a +group + +Syntax +------ + +C Syntax +^^^^^^^^ + +.. code:: C + + #include + + int MPI_Barrier(MPI_Comm) + int MPI_Ibarrier(MPI_Comm comm, MPI_Request *request) + int MPI_barrier_init(MPI_Comm comm, MPI_Info info, MPI_Request *request) + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code:: Fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_BARRIER(COMM, IERROR) + INTEGER COMM, IERROR + MPI_IBARRIER(COMM, REQUEST, IERROR) + INTEGER COMM, REQUEST, IERROR + MPI_BARRIER_INIT(COMM, INFO, REQUEST, IERROR) + INTEGER COMM, INFO, REQUEST, IERROR + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code:: Fortran + + USE mpi_f08 + MPI_Barrier(comm, ierror) + TYPE(MPI_Comm), INTENT(IN) :: comm + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + MPI_Ibarrier(comm, request, ierror) + TYPE(MPI_Comm), INTENT(IN) :: comm + TYPE(MPI_Request), INTENT (OUT) :: request + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + MPI_Barrier_init(comm, info, request, ierror) + TYPE(MPI_Comm), INTENT(IN) :: comm + TYPE(MPI_Info), INTENT(IN) :: info + TYPE(MPI_Request), INTENT (OUT) :: request + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + +Input Parameter +--------------- + +- comm : Communicator (handle). +- info : Info (handle, persistent only). + +Output Parameters +----------------- + +- request : Request (handle, non-blocking only). +- IERROR : Fortran only: Error status (integer). + +Description +----------- + +An MPI barrier completes after all groups members have entered the +barrier. + +When Communicator is an Inter-Communicator +------------------------------------------ + +When the communicator is an inter-communicator, the barrier operation is +performed across all processes in both groups. All processes in the +first group may exit the barrier when all processes in the second group +have entered the barrier. + +Errors +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. Before the +error value is returned, the current MPI error handler is called. By +default, this error handler aborts the MPI job, except for I/O function +errors. The error handler may be changed with :ref:`MPI_Comm_set_errhandler`; +the predefined error handler MPI_ERRORS_RETURN may be used to cause +error values to be returned. Note that MPI does not guarantee that an +MPI program can continue past an error. + + +.. seealso:: :ref:`MPI_Bcast` diff --git a/docs/man-openmpi/man3/MPI_Barrier_init.3.rst b/docs/man-openmpi/man3/MPI_Barrier_init.3.rst new file mode 100644 index 00000000000..09e3adc6451 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Barrier_init.3.rst @@ -0,0 +1,9 @@ +.. _mpi_barrier_init: + +MPI_Barrier_init +================ + .. include_body + +.. include:: ../man3/MPI_Barrier.3.rst + :start-after: .. include_body + diff --git a/docs/man-openmpi/man3/MPI_Bcast.3.rst b/docs/man-openmpi/man3/MPI_Bcast.3.rst new file mode 100644 index 00000000000..483490e0348 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Bcast.3.rst @@ -0,0 +1,140 @@ +.. _mpi_bcast: + +MPI_Bcast +========= + +.. include_body + +:ref:`MPI_Bcast`, :ref:`MPI_Ibcast` - Broadcasts a message from the process +with rank *root* to all other processes of the group. + +SYNTAX +------ + +C Syntax +^^^^^^^^ + +.. code:: C + + #include + + int MPI_Bcast(void *buffer, int count, MPI_Datatype datatype, + int root, MPI_Comm comm) + + int MPI_Ibcast(void *buffer, int count, MPI_Datatype datatype, + int root, MPI_Comm comm, MPI_Request *request) + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code:: Fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_BCAST(BUFFER, COUNT, DATATYPE, ROOT, COMM, IERROR) + BUFFER(*) + INTEGER COUNT, DATATYPE, ROOT, COMM, IERROR + + MPI_IBCAST(BUFFER, COUNT, DATATYPE, ROOT, COMM, REQUEST, IERROR) + BUFFER(*) + INTEGER COUNT, DATATYPE, ROOT, COMM, REQUEST, IERROR + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code:: Fortran + + USE mpi_f08 + MPI_Bcast(buffer, count, datatype, root, comm, ierror) + TYPE(*), DIMENSION(..) :: buffer + INTEGER, INTENT(IN) :: count, root + TYPE(MPI_Datatype), INTENT(IN) :: datatype + TYPE(MPI_Comm), INTENT(IN) :: comm + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + MPI_Ibcast(buffer, count, datatype, root, comm, request, ierror) + TYPE(*), DIMENSION(..), ASYNCHRONOUS :: buffer + INTEGER, INTENT(IN) :: count, root + TYPE(MPI_Datatype), INTENT(IN) :: datatype + TYPE(MPI_Comm), INTENT(IN) :: comm + TYPE(MPI_Request), INTENT(OUT) :: request + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + +INPUT/OUTPUT PARAMETERS +----------------------- + +- ``buffer``: Starting address of buffer (choice). +- ``count``: Number of entries in buffer (integer). +- ``datatype``: Data type of buffer (handle). +- ``root``: Rank of broadcast root (integer). +- ``comm``: Communicator (handle). + +OUTPUT PARAMETERS +----------------- + +- ``request``: Request (handle, non-blocking only). +- ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +:ref:`MPI_Bcast` broadcasts a message from the process with rank root to +all processes of the group, itself included. It is called by all members +of group using the same arguments for ``comm``, ``root``. On return, the +contents of root's communication buffer has been copied to all +processes. + +General, derived datatypes are allowed for datatype. The type signature +of count, datatype on any process must be equal to the type signature o +f count, datatype at the root. This implies that the amount of data sent +must be equal to the amount received, pairwise between each process and +the root. :ref:`MPI_Bcast` and all other data-movement collective routines +make this restriction. Distinct type maps between sender and receiver +are still allowed. + +**Example:** Broadcast 100 ints from process 0 to every process in the +group. + +.. code:: C + + MPI_Comm comm; + int array[100]; + int root=0; + //... + MPI_Bcast( array, 100, MPI_INT, root, comm); + +As in many of our sample code fragments, we assume that some of the +variables (such as comm in the example above) have been assigned +appropriate values. + +WHEN COMMUNICATOR IS AN INTER-COMMUNICATOR +------------------------------------------ + +When the communicator is an inter-communicator, the root process in the +first group broadcasts data to all the processes in the second group. +The first group defines the root process. That process uses MPI_ROOT +as the value of its ``root`` argument. The remaining processes use +MPI_PROC_NULL as the value of their ``root`` argument. All processes +in the second group use the rank of that root process in the first group +as the value of their ``root`` argument. The receive buffer arguments of +the processes in the second group must be consistent with the send +buffer argument of the root process in the first group. + +NOTES +----- + +This function does not support the in-place option. + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler +MPI_ERRORS_RETURN may be used to cause error values to be returned. +Note that MPI does not guarantee that an MPI program can continue past +an error. diff --git a/docs/man-openmpi/man3/MPI_Bcast_init.3.rst b/docs/man-openmpi/man3/MPI_Bcast_init.3.rst new file mode 100644 index 00000000000..e2081f4893b --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Bcast_init.3.rst @@ -0,0 +1,9 @@ +.. _mpi_bcast_init: + +MPI_Bcast_init +============== + .. include_body + +.. include:: ../man3/MPI_Bcast.3.rst + :start-after: .. include_body + diff --git a/docs/man-openmpi/man3/MPI_Bsend.3.rst b/docs/man-openmpi/man3/MPI_Bsend.3.rst new file mode 100644 index 00000000000..08b13068440 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Bsend.3.rst @@ -0,0 +1,110 @@ +.. _mpi_bsend: + +MPI_Bsend +========= + +.. include_body + +:ref:`MPI_Bsend` - Basic send with user-specified buffering. + +Syntax +------ + +C Syntax +^^^^^^^^ + +.. code:: c + + #include + + int MPI_Bsend(const void *buf, int count, MPI_Datatype datatype, + int dest, int tag, MPI_Comm comm) + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + + MPI_BSEND(BUF, COUNT,DATATYPE, DEST, TAG, COMM, IERROR) + BUF(*) + INTEGER COUNT, DATATYPE, DEST, TAG, COMM, IERROR + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code:: fortran + + USE mpi_f08 + MPI_Bsend(buf, count, datatype, dest, tag, comm, ierror) + TYPE(*), DIMENSION(..), INTENT(IN) :: buf + INTEGER, INTENT(IN) :: count, dest, tag + TYPE(MPI_Datatype), INTENT(IN) :: datatype + TYPE(MPI_Comm), INTENT(IN) :: comm + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + +Input Parameters +---------------- + +- ``buf`` : Initial address of send buffer (choice). +- ``count`` : Number of entries in send buffer (nonnegative integer). +- ``datatype`` : Datatype of each send buffer element (handle). +- ``dest`` : Rank of destination (integer). +- ``tag`` : Message tag (integer). +- ``comm`` : Communicator (handle). + +Output Parameters +----------------- + +- ``IERROR`` : Fortran only: Error status (integer). + +Description +----------- + +:ref:`MPI_Bsend` performs a buffered-mode, blocking send. + +Notes +----- + +This send is provided as a convenience function; it allows the user to +send messages without worrying about where they are buffered (because +the user must have provided buffer space with :ref:`MPI_Buffer_attach`). + +In deciding how much buffer space to allocate, remember that the buffer +space is not available for reuse by subsequent :ref:`MPI_Bsend`\ s unless +you are certain that the message has been received (not just that it +should have been received). For example, this code does not allocate +enough buffer space: + +.. code:: c + + MPI_Buffer_attach( b, n*sizeof(double) + MPI_BSEND_OVERHEAD ); + for (i=0; i + + int MPI_Bsend_init(const void *buf, int count, MPI_Datatype datatype, + int dest, int tag, MPI_Comm comm, MPI_Request *request) + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code:: Fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + + MPI_BSEND_INIT(BUF, COUNT, DATATYPE, DEST, TAG, COMM, REQUEST, IERROR) + BUF(*) + INTEGER COUNT, DATATYPE, DEST, TAG, + INTEGER COMM, REQUEST, IERROR + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code:: Fortran + + USE mpi_f08 + + MPI_Bsend_init(buf, count, datatype, dest, tag, comm, request, ierror) + TYPE(*), DIMENSION(..), INTENT(IN), ASYNCHRONOUS :: buf + INTEGER, INTENT(IN) :: count, dest, tag + TYPE(MPI_Datatype), INTENT(IN) :: datatype + TYPE(MPI_Comm), INTENT(IN) :: comm + TYPE(MPI_Request), INTENT(OUT) :: request + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + +Input Parameters +---------------- + +- buf : Initial address of send buffer (choice). +- count : Number of elements sent (integer). +- datatype : Type of each element (handle). +- dest : Rank of destination (integer). +- tag : Message tag (integer). +- comm : Communicator (handle). + +Output Parameters +----------------- + +- request : Communication request (handle). +- IERROR : Fortran only: Error status (integer). + +Description +----------- + +Creates a persistent communication request for a buffered mode send, and +binds to it all the arguments of a send operation. + +A communication (send or receive) that uses a persistent request is +initiated by the function :ref:`MPI_Start`. + +Errors +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. + + +.. seealso:: :ref:`MPI_Send_init` :ref:`MPI_Start` diff --git a/docs/man-openmpi/man3/MPI_Buffer_attach.3.rst b/docs/man-openmpi/man3/MPI_Buffer_attach.3.rst new file mode 100644 index 00000000000..86b547bdd7b --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Buffer_attach.3.rst @@ -0,0 +1,100 @@ +.. _mpi_buffer_attach: + +MPI_Buffer_attach +================= + +.. include_body + +:ref:`MPI_Buffer_attach` - Attaches a user-defined buffer for sending. + +Syntax +------ + +C Syntax +^^^^^^^^ + +.. code:: C + + #include + + int MPI_Buffer_attach(void *buf, int size) + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code:: Fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + + MPI_BUFFER_ATTACH(BUF, SIZE, IERROR) + BUF(*) + INTEGER SIZE, IERROR + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code:: Fortran + + USE mpi_f08 + + MPI_Buffer_attach(buffer, size, ierror) + TYPE(*), DIMENSION(..), ASYNCHRONOUS :: buffer + INTEGER, INTENT(IN) :: size + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + +Input Parameters +---------------- + +- buf : Initial buffer address (choice). +- size : Buffer size, in bytes (integer). + +Output Parameter +---------------- + +- IERROR : Fortran only: Error status (integer). + +Description +----------- + +Provides to MPI a buffer in the user's memory to be used for buffering +outgoing messages. The buffer is used only by messages sent in buffered +mode. Only one buffer can be attached to a process at a time. + +Notes +----- + +The size given should be the sum of the sizes of all outstanding Bsends +that you intend to have, plus MPI_BSEND_OVERHEAD bytes for each Bsend +that you do. For the purposes of calculating size, you should use +:ref:`MPI_Pack_size`. In other words, in the code + +c MPI_Buffer_attach( buf, size ); MPI_Bsend( ..., count=20, +datatype=type1, ... ); //... MPI_Bsend( ..., count=40, datatype=type2, +... ); + +the value of size in the :ref:`MPI_Buffer_attach` call should be greater than +the value computed by + +c MPI_Pack_size( 20, type1, comm, &s1 ); MPI_Pack_size( 40, type2, comm, +&s2 ); size = s1 + s2 + 2 \* MPI_BSEND_OVERHEAD; + +MPI_BSEND_OVERHEAD gives the maximum amount of buffer space that may be +used by the Bsend routines. This value is in mpi.h for C and mpif.h for +Fortran. + +Errors +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. + + +.. seealso:: :ref:`MPI_Buffer_detach` diff --git a/docs/man-openmpi/man3/MPI_Buffer_detach.3.rst b/docs/man-openmpi/man3/MPI_Buffer_detach.3.rst new file mode 100644 index 00000000000..cbafb87e651 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Buffer_detach.3.rst @@ -0,0 +1,114 @@ +.. _mpi_buffer_detach: + +MPI_Buffer_detach +================= + +.. include_body + +:ref:`MPI_Buffer_detach` - Removes an existing buffer (for use in in :ref:`MPI_Bsend`, +etc.) + +Syntax +------ + +C Syntax +^^^^^^^^ + +.. code:: C + + #include + + int MPI_Buffer_detach(void *buf, int *size) + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code:: Fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + + MPI_BUFFER_DETACH(BUF, SIZE, IERROR) + BUF(*) + INTEGER SIZE, IERROR + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code:: Fortran + + USE mpi_f08 + + MPI_Buffer_detach(buffer_addr, size, ierror) + USE, INTRINSIC :: ISO_C_BINDING, ONLY + TYPE(C_PTR), INTENT(OUT) :: buffer_addr + INTEGER, INTENT(OUT) :: size + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + +Output Parameters +----------------- + +- buf : Initial buffer address (choice). +- size : Buffer size, in bytes (integer). +- IERROR : Fortran only: Error status (integer). + +Description +----------- + +Detach the buffer currently associated with MPI. The call returns the +address and the size of the detached buffer. This operation will block +until all messages currently in the buffer have been transmitted. Upon +return of this function, the user may reuse or deallocate the space +taken by the buffer. + +Example: Calls to attach and detach buffers. c #define BUFFSIZE 10000 + +int size char *buff; MPI_Buffer_attach( malloc(BUFFSIZE), BUFFSIZE); /* +a buffer of 10000 bytes can now be used by :ref:`MPI_Bsend` */ +MPI_Buffer_detach( &buff, &size); /* Buffer size reduced to zero */ +MPI_Buffer_attach( buff, size); /* Buffer of 10000 bytes available again +\*/ + +Notes +----- + +The reason that :ref:`MPI_Buffer_detach` returns the address and size of the +buffer being detached is to allow nested libraries to replace and +restore the buffer. For example, consider c int size, mysize, idummy; +void \*ptr, \*myptr, *dummy; MPI_Buffer_detach( &ptr, &size ); +MPI_Buffer_attach( myptr, mysize ); /* ... library code ... \*/ +MPI_Buffer_detach( &dummy, &idummy ); MPI_Buffer_attach( ptr, size ); + +This is much like the action of the UNIX signal routine and has the same +strengths (it's simple) and weak‐nesses (it only works for nested +usages). + +For Fortran: The Fortran binding for this routine is different. Because +Fortran does not have pointers, it is impossible to provide a way to use +the output of this routine to exchange buffers. In this case, only the +size field is set. + +For C: Even though the buf argument is declared as void, it is really +the address of a void pointer. See Rationale, below, for more details. + +Even though the C functions :ref:`MPI_Buffer_attach` and :ref:`MPI_Buffer_detach` both +have a first argument of type void*, these arguments are used +differently: A pointer to the buffer is passed to :ref:`MPI_Buffer_attach`; the +address of the pointer is passed to :ref:`MPI_Buffer_detach`, so that this call +can return the pointer value. + +Errors +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. + + +.. seealso:: :ref:`MPI_Buffer_attach` diff --git a/docs/man-openmpi/man3/MPI_Cancel.3.rst b/docs/man-openmpi/man3/MPI_Cancel.3.rst new file mode 100644 index 00000000000..c11b875913a --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Cancel.3.rst @@ -0,0 +1,123 @@ +.. _mpi_cancel: + +MPI_Cancel +========== + +.. include_body + +:ref:`MPI_Cancel` - Cancels a communication request. + +Syntax +------ + +C Syntax +^^^^^^^^ + +.. code:: C + + #include + + int MPI_Cancel(MPI_Request *request) + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code:: Fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + + MPI_CANCEL(REQUEST, IERROR) + INTEGER REQUEST, IERROR + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code:: Fortran + + USE mpi_f08 + + MPI_Cancel(request, ierror) + TYPE(MPI_Request), INTENT(IN) :: request + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + +Input Parameter +--------------- + +- request : Communication request (handle). + +Output Parameter +---------------- + +- IERROR : Fortran only: Error status (integer). + +Description +----------- + +The :ref:`MPI_Cancel` operation allows pending communications to be canceled. +This is required for cleanup. Posting a send or a receive ties up user +resources (send or receive buffers), and a cancel may be needed to free +these resources gracefully. + +A call to :ref:`MPI_Cancel` marks for cancellation a pending, nonblocking +communication operation (send or receive). The cancel call is local. It +returns immediately, possibly before the communication is actually +canceled. It is still necessary to complete a communication that has +been marked for cancellation, using a call to :ref:`MPI_Request_free`, +:ref:`MPI_Wait`, or :ref:`MPI_Test` (or any of the derived operations). + +If a communication is marked for cancellation, then an :ref:`MPI_Wait` call for +that communication is guaranteed to return, irrespective of the +activities of other processes (i.e., :ref:`MPI_Wait` behaves as a local +function); similarly if :ref:`MPI_Test` is repeatedly called in a busy wait +loop for a canceled communication, then :ref:`MPI_Test` will eventually be +successful. + +:ref:`MPI_Cancel` can be used to cancel a communication that uses a persistent +request (see Section 3.9 in the MPI-1 Standard, "Persistent +Communication Requests") in the same way it is used for nonpersistent +requests. A successful cancellation cancels the active communication, +but not the request itself. After the call to :ref:`MPI_Cancel` and the +subsequent call to :ref:`MPI_Wait` or :ref:`MPI_Test`, the request becomes inactive +and can be activated for a new communication. + +The successful cancellation of a buffered send frees the buffer space +occupied by the pending message. + +Either the cancellation succeeds or the communication succeeds, but not +both. If a send is marked for cancellation, then it must be the case +that either the send completes normally, in which case the message sent +is received at the destination process, or that the send is successfully +canceled, in which case no part of the message is received at the +destination. Then, any matching receive has to be satisfied by another +send. If a receive is marked for cancellation, then it must be the case +that either the receive completes normally, or that the receive is +successfully canceled, in which case no part of the receive buffer is +altered. Then, any matching send has to be satisfied by another receive. + +If the operation has been canceled, then information to that effect will +be returned in the status argument of the operation that completes the +communication. + +Notes +----- + +The primary expected use of :ref:`MPI_Cancel` is in multi-buffering schemes, +where speculative MPI_Irecvs are made. When the computation completes, +some of these requests may remain; using :ref:`MPI_Cancel` allows the user to +cancel these unsatisfied requests. + +Errors +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. Before the +error value is returned, the current MPI error handler is called. By +default, this error handler aborts the MPI job, except for I/O function +errors. The error handler may be changed with :ref:`MPI_Comm_set_errhandler`; +the predefined error handler MPI_ERRORS_RETURN may be used to cause +error values to be returned. Note that MPI does not guarantee that an +MPI program can continue past an error. + + +.. seealso:: :ref:`MPI_Probe` diff --git a/docs/man-openmpi/man3/MPI_Cart_coords.3.rst b/docs/man-openmpi/man3/MPI_Cart_coords.3.rst new file mode 100644 index 00000000000..cf55640955b --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Cart_coords.3.rst @@ -0,0 +1,81 @@ +.. _mpi_cart_coords: + +MPI_Cart_coords +=============== + +.. include_body + +:ref:`MPI_Cart_coords` - Determines process coords in Cartesian topology +given rank in group. + +Syntax +------ + +C Syntax +^^^^^^^^ + +.. code:: c + + #include + + int MPI_Cart_coords(MPI_Comm comm, int rank, int maxdims, + int coords[]) + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + + MPI_CART_COORDS(COMM, RANK, MAXDIMS, COORDS, IERROR) + INTEGER COMM, RANK, MAXDIMS, COORDS(*), IERROR + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code:: fortran + + USE mpi_f08 + + MPI_Cart_coords(comm, rank, maxdims, coords, ierror) + TYPE(MPI_Comm), INTENT(IN) :: comm + INTEGER, INTENT(IN) :: rank, maxdims + INTEGER, INTENT(OUT) :: coords(maxdims) + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + +Input Parameters +---------------- + +- ``comm`` : Communicator with Cartesian structure (handle). +- ``rank`` : Rank of a process within group of comm (integer). +- ``maxdims`` : Length of vector coords in the calling program + (integer). Length of vector coords in the calling program (integer). + +Output Parameters +----------------- + +- ``coords`` : Integer array (of size ndims,which was defined by + :ref:`MPI_Cart_create` call) containing the Cartesian coordinates of + specified process (integer). +- ``IERROR`` : Fortran only: Error status (integer). + +Description +----------- + +:ref:`MPI_Cart_coords` provies a mapping of ``rank``\ s to Cartesian +coordinates. + +Errors +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. Before the +error value is returned, the current MPI error handler is called. By +default, this error handler aborts the MPI job, except for I/O function +errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler +MPI_ERRORS_RETURN may be used to cause error values to be returned. +Note that MPI does not guarantee that an MPI program can continue past +an error. diff --git a/docs/man-openmpi/man3/MPI_Cart_create.3.rst b/docs/man-openmpi/man3/MPI_Cart_create.3.rst new file mode 100644 index 00000000000..d2036b875a7 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Cart_create.3.rst @@ -0,0 +1,94 @@ +.. _mpi_cart_create: + +MPI_Cart_create +=============== + +.. include_body + +:ref:`MPI_Cart_create` - Makes a new communicator to which Cartesian +topology information has been attached. + +Syntax +------ + +C Syntax +^^^^^^^^ + +.. code:: c + + #include + + int MPI_Cart_create(MPI_Comm comm_old, int ndims, const int dims[], + + const int periods[], int reorder, MPI_Comm *comm_cart) + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + + MPI_CART_CREATE(COMM_OLD, NDIMS, DIMS, PERIODS, REORDER, + COMM_CART, IERROR) + INTEGER COMM_OLD, NDIMS, DIMS(*), COMM_CART, IERROR + LOGICAL PERIODS(*), REORDER + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code:: fortran + + USE mpi_f08 + + MPI_Cart_create(comm_old, ndims, dims, periods, reorder, comm_cart, ierror) + TYPE(MPI_Comm), INTENT(IN) :: comm_old + INTEGER, INTENT(IN) :: ndims, dims(ndims) + LOGICAL, INTENT(IN) :: periods(ndims), reorder + TYPE(MPI_Comm), INTENT(OUT) :: comm_cart + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + +Input Parameters +---------------- + +- ``comm_old`` : Input communicator (handle). +- ``ndims`` : Number of dimensions of Cartesian grid (integer). +- ``dims`` : Integer array of size ndims specifying the number of + processes in each dimension. +- ``periods`` : Logical array of size ndims specifying whether the grid + is periodic (true) or not (false) in each dimension. +- ``reorder`` : Ranking may be reordered (true) or not (false) + (logical). + +Output Parameters +----------------- + +- ``comm_cart`` : Communicator with new Cartesian topology (handle). +- ``IERROR`` : Fortran only: Error status (integer). + +Description +----------- + +:ref:`MPI_Cart_create` returns a handle to a new communicator to which the +Cartesian topology information is attached. If ``reorder`` = false then +the rank of each process in the new group is identical to its rank in +the old group. Otherwise, the function may ``reorder`` the processes +(possibly so as to choose a good embedding of the virtual topology onto +the physical machine). If the total size of the Cartesian grid is +smaller than the size of the group of comm, then some processes are +returned MPI_COMM_NULL, in analogy to :ref:`MPI_Comm_split`. The call +is erroneous if it specifies a grid that is larger than the group size. + +Errors +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. Before the +error value is returned, the current MPI error handler is called. By +default, this error handler aborts the MPI job, except for I/O function +errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler +MPI_ERRORS_RETURN may be used to cause error values to be returned. +Note that MPI does not guarantee that an MPI program can continue past +an error. diff --git a/docs/man-openmpi/man3/MPI_Cart_get.3.rst b/docs/man-openmpi/man3/MPI_Cart_get.3.rst new file mode 100644 index 00000000000..63e66725fd3 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Cart_get.3.rst @@ -0,0 +1,88 @@ +.. _mpi_cart_get: + +MPI_Cart_get +============ + +.. include_body + +:ref:`MPI_Cart_get` - Retrieves Cartesian topology information associated with +a communicator. + +Syntax +------ + +C Syntax +^^^^^^^^ + +.. code:: C + + #include + + int MPI_Cart_get(MPI_Comm comm, int maxdims, int dims[], int periods[], + int coords[]) + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code:: Fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + + MPI_CART_GET(COMM, MAXDIMS, DIMS, PERIODS, COORDS, IERROR) + INTEGER COMM, MAXDIMS, DIMS(*), COORDS(*), IERROR + LOGICAL PERIODS(*) + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code:: Fortran + + USE mpi_f08 + + MPI_Cart_get(comm, maxdims, dims, periods, coords, ierror) + TYPE(MPI_Comm), INTENT(IN) :: comm + INTEGER, INTENT(IN) :: maxdims + INTEGER, INTENT(OUT) :: dims(maxdims), coords(maxdims) + LOGICAL, INTENT(OUT) :: periods(maxdims) + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + +Input Parameters +---------------- + +- comm : Communicator with Cartesian structure (handle). +- maxdims : Length of vectors dims, periods, and coords in the calling + program (integer). + +Output Parameters +----------------- + +- dims : Number of processes for each Cartesian dimension (array of + integers). +- periods : Periodicity (true/false) for each Cartesian dimension + (array of logicals). +- coords : Coordinates of calling process in Cartesian structure (array + of integers). +- IERROR : Fortran only: Error status (integer). + +Description +----------- + +The functions :ref:`MPI_Cartdim_get` and :ref:`MPI_Cart_get` return the Cartesian +topology information that was associated with a communicator by +:ref:`MPI_Cart_create`. + +Errors +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. Before the +error value is returned, the current MPI error handler is called. By +default, this error handler aborts the MPI job, except for I/O function +errors. The error handler may be changed with :ref:`MPI_Comm_set_errhandler`; +the predefined error handler MPI_ERRORS_RETURN may be used to cause +error values to be returned. Note that MPI does not guarantee that an +MPI program can continue past an error. + + +.. seealso:: :ref:`MPI_Cartdim_get` diff --git a/docs/man-openmpi/man3/MPI_Cart_map.3.rst b/docs/man-openmpi/man3/MPI_Cart_map.3.rst new file mode 100644 index 00000000000..8de9db8a013 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Cart_map.3.rst @@ -0,0 +1,90 @@ +.. _mpi_cart_map: + +MPI_Cart_map +============ + +.. include_body + +:ref:`MPI_Cart_map` - Maps process to Cartesian topology information. + +Syntax +------ + +C Syntax +^^^^^^^^ + +.. code:: C + + #include + + int MPI_Cart_map(MPI_Comm comm, int ndims, const int dims[], + const int periods[], int *newrank) + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code:: Fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + + MPI_CART_MAP(COMM, NDIMS, DIMS, PERIODS, NEWRANK, IERROR) + INTEGER COMM, NDIMS, DIMS(*), NEWRANK, IERROR + LOGICAL PERIODS(*) + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code:: Fortran + + USE mpi_f08 + + MPI_Cart_map(comm, ndims, dims, periods, newrank, ierror) + TYPE(MPI_Comm), INTENT(IN) :: comm + INTEGER, INTENT(IN) :: ndims, dims(ndims) + LOGICAL, INTENT(IN) :: periods(ndims) + INTEGER, INTENT(OUT) :: newrank + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + +Input Parameters +---------------- + +- comm : Input communicator (handle). +- ndims : Number of dimensions of Cartesian structure (integer). +- dims : Integer array of size ndims specifying the number of processes + in each coordinate direction. +- periods : Logical array of size ndims specifying the periodicity + specification in each coordinate direction. + +Output Parameters +----------------- + +- newrank : Reordered rank of the calling process; MPI_UNDEFINED if + calling process does not belong to grid (integer). +- IERROR : Fortran only: Error status (integer). + +Description +----------- + +:ref:`MPI_Cart_map` and :ref:`MPI_Graph_map` can be used to implement all other +topology functions. In general they will not be called by the user +directly, unless he or she is creating additional virtual topology +capability other than that provided by MPI. :ref:`MPI_Cart_map` computes an +"optimal" placement for the calling process on the physical machine. A +possible implementation of this function is to always return the rank of +the calling process, that is, not to perform any reordering. + +Errors +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. Before the +error value is returned, the current MPI error handler is called. By +default, this error handler aborts the MPI job, except for I/O function +errors. The error handler may be changed with :ref:`MPI_Comm_set_errhandler`; +the predefined error handler MPI_ERRORS_RETURN may be used to cause +error values to be returned. Note that MPI does not guarantee that an +MPI program can continue past an error. + + +.. seealso:: :ref:`MPI_Graph_map` diff --git a/docs/man-openmpi/man3/MPI_Cart_rank.3.rst b/docs/man-openmpi/man3/MPI_Cart_rank.3.rst new file mode 100644 index 00000000000..39c3e153ffa --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Cart_rank.3.rst @@ -0,0 +1,85 @@ +.. _mpi_cart_rank: + +MPI_Cart_rank +============= + +.. include_body + +:ref:`MPI_Cart_rank` - Determines process rank in communicator given Cartesian +location. + +Syntax +------ + +C Syntax +^^^^^^^^ + +.. code:: C + + #include + + int MPI_Cart_rank(MPI_Comm comm, int coords[], int *rank) + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code:: Fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + + MPI_CART_RANK(COMM, COORDS, RANK, IERROR) + INTEGER COMM, COORDS(*), RANK, IERROR + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code:: Fortran + + USE mpi_f08 + + MPI_Cart_rank(comm, coords, rank, ierror) + TYPE(MPI_Comm), INTENT(IN) :: comm + INTEGER, INTENT(IN) :: coords(*) + INTEGER, INTENT(OUT) :: rank + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + +Input Parameters +---------------- + +- comm : Communicator with Cartesian structure (handle). +- coords : Integer array (of size ndims, which was defined by + :ref:`MPI_Cart_create` call) specifying the Cartesian coordinates of a + process. + +Output Parameter +---------------- + +- rank : Rank of specified process (integer). +- IERROR : Fortran only: Error status (integer). + +Description +----------- + +For a process group with Cartesian structure, the function :ref:`MPI_Cart_rank` +translates the logical process coordinates to process ranks as they are +used by the point-to-point routines. For dimension i with periods(i) = +true, if the coordinate, coords(i), is out of range, that is, coords(i) +< 0 or coords(i) >= dims(i), it is shifted back to the interval 0 =< +coords(i) < dims(i) automatically. Out-of-range coordinates are +erroneous for nonperiodic dimensions. + +Errors +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. Before the +error value is returned, the current MPI error handler is called. By +default, this error handler aborts the MPI job, except for I/O function +errors. The error handler may be changed with :ref:`MPI_Comm_set_errhandler`; +the predefined error handler MPI_ERRORS_RETURN may be used to cause +error values to be returned. Note that MPI does not guarantee that an +MPI program can continue past an error. + + +.. seealso:: :ref:`MPI_Cart_create` diff --git a/docs/man-openmpi/man3/MPI_Cart_shift.3.rst b/docs/man-openmpi/man3/MPI_Cart_shift.3.rst new file mode 100644 index 00000000000..0dd22bfd729 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Cart_shift.3.rst @@ -0,0 +1,130 @@ +.. _mpi_cart_shift: + +MPI_Cart_shift +============== + +.. include_body + +:ref:`MPI_Cart_shift` - Returns the shifted source and destination ranks, +given a shift direction and amount. + +Syntax +------ + +C Syntax +^^^^^^^^ + +.. code:: c + + #include + + int MPI_Cart_shift(MPI_Comm comm, int direction, int disp, + int *rank_source, int *rank_dest) + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + + MPI_CART_SHIFT(COMM, DIRECTION, DISP, RANK_SOURCE, + RANK_DEST, IERROR) + INTEGER COMM, DIRECTION, DISP, RANK_SOURCE + INTEGER RANK_DEST, IERROR + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code:: fortran + + USE mpi_f08 + + MPI_Cart_shift(comm, direction, disp, rank_source, rank_dest, ierror) + TYPE(MPI_Comm), INTENT(IN) :: comm + INTEGER, INTENT(IN) :: direction, disp + INTEGER, INTENT(OUT) :: rank_source, rank_dest + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + +Input Parameters +---------------- + +- ``comm`` : Communicator with Cartesian structure (handle). +- ``direction`` : Coordinate dimension of shift (integer). +- ``disp`` : Displacement ( > 0: upward shift, < 0: downward shift) + (integer). + +Output Parameters +----------------- + +- ``rank_source`` : Rank of source process (integer). +- ``rank_dest`` : Rank of destination process (integer). +- ``IERROR`` : Fortran only: Error status (integer). + +Description +----------- + +If the process topology is a Cartesian structure, an :ref:`MPI_Sendrecv` +operation is likely to be used along a coordinate ``direction`` to +perform a shift of data. As input, :ref:`MPI_Sendrecv` takes the rank of a +source process for the receive, and the rank of a destination process +for the send. If the function :ref:`MPI_Cart_shift` is called for a +Cartesian process group, it provides the calling process with the above +identifiers, which then can be passed to :ref:`MPI_Sendrecv`. The user +specifies the coordinate ``direction`` and the size of the step +(positive or negative). The function is local. + +The ``direction`` argument indicates the dimension of the shift, i.e., +the coordinate whose value is modified by the shift. The coordinates are +numbered from 0 to ndims-1, where ndims is the number of dimensions. + +Note: The ``direction`` argument is in the range [0, n-1] for an +n-dimensional Cartesian mesh. + +Depending on the periodicity of the Cartesian group in the specified +coordinate ``direction``, :ref:`MPI_Cart_shift` provides the identifiers +for a circular or an end-off shift. In the case of an end-off shift, the +value MPI_PROC_NULL may be returned in ``rank_source`` or +``rank_dest``, indicating that the source or the destination for the +shift is out of range. + +Example: The ``comm``\ unicator, ``comm``, has a two-dimensional, +periodic, Cartesian topology associated with it. A two-dimensional array +of REALs is stored one element per process, in variable A. One wishes to +skew this array, by shifting column i (vertically, i.e., along the +column) by i steps. + +.. code:: fortran + + ! find process rank + CALL MPI_COMM_RANK(comm, rank, ierr) + ! find Cartesian coordinates + CALL MPI_CART_COORDS(comm, rank, maxdims, coords, ierr) + ! compute shift source and destination + CALL MPI_CART_SHIFT(comm, 0, coords(2), source, dest, ierr) + ! skew array + CALL MPI_SENDRECV_REPLACE(A, 1, MPI_REAL, dest, 0, source, 0, comm, status, + ierr) + +Note +---- + +In Fortran, the dimension indicated by DIRECTION = i has DIMS(i+1) +nodes, where DIMS is the array that was used to create the grid. In C, +the dimension indicated by direction = i is the dimension specified by +dims[i]. + +Errors +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler +MPI_ERRORS_RETURN may be used to cause error values to be returned. +Note that MPI does not guarantee that an MPI program can continue past +an error. diff --git a/docs/man-openmpi/man3/MPI_Cart_sub.3.rst b/docs/man-openmpi/man3/MPI_Cart_sub.3.rst new file mode 100644 index 00000000000..5578472576a --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Cart_sub.3.rst @@ -0,0 +1,99 @@ +.. _mpi_cart_sub: + +MPI_Cart_sub +============ + +.. include_body + +:ref:`MPI_Cart_sub` - Partitions a communicator into subgroups, which form +lower-dimensional Cartesian subgrids. + +Syntax +------ + +C Syntax +^^^^^^^^ + +.. code:: C + + #include + + int MPI_Cart_sub(MPI_Comm comm, const int remain_dims[], MPI_Comm *comm_new) + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code:: Fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + + MPI_CART_SUB(COMM, REMAIN_DIMS, COMM_NEW, IERROR) + INTEGER COMM, COMM_NEW, IERROR + LOGICAL REMAIN_DIMS(*) + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code:: Fortran + + USE mpi_f08 + + MPI_Cart_sub(comm, remain_dims, newcomm, ierror) + TYPE(MPI_Comm), INTENT(IN) :: comm + LOGICAL, INTENT(IN) :: remain_dims(*) + TYPE(MPI_Comm), INTENT(OUT) :: newcomm + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + +Input Parameters +---------------- + +- comm : Communicator with Cartesian structure (handle). +- remain_dims : The ith entry of remain_dims specifies whether the ith + dimension is kept in the subgrid (true) or is dropped (false) + (logical vector). + +Output Parameters +----------------- + +- comm_new : Communicator containing the subgrid that includes the + calling process (handle). +- IERROR : Fortran only: Error status (integer). + +Description +----------- + +If a Cartesian topology has been created with :ref:`MPI_Cart_create`, the +function :ref:`MPI_Cart_sub` can be used to partition the communicator group +into subgroups that form lower-dimensional Cartesian subgrids, and to +build for each subgroup a communicator with the associated subgrid +Cartesian topology. (This function is closely related to +:ref:`MPI_Comm_split`.) + +Example: Assume that MPI_Cart_create( ..., comm) has defined a (2 x 3 x +4) grid. Let remain_dims = (true, false, true). Then a call to + +:: + + MPI_Cart_sub(comm, remain_dims, comm_new) + +will create three communicators, each with eight processes in a 2 x 4 +Cartesian topology. If remain_dims = (false, false, true) then the call +to MPI_Cart_sub(comm, remain_dims, comm_new) will create six +nonoverlapping communicators, each with four processes, in a +one-dimensional Cartesian topology. + +Errors +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. Before the +error value is returned, the current MPI error handler is called. By +default, this error handler aborts the MPI job, except for I/O function +errors. The error handler may be changed with :ref:`MPI_Comm_set_errhandler`; +the predefined error handler MPI_ERRORS_RETURN may be used to cause +error values to be returned. Note that MPI does not guarantee that an +MPI program can continue past an error. + + +.. seealso:: :ref:`MPI_Cart_create` diff --git a/docs/man-openmpi/man3/MPI_Cartdim_get.3.rst b/docs/man-openmpi/man3/MPI_Cartdim_get.3.rst new file mode 100644 index 00000000000..aa38d6eb461 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Cartdim_get.3.rst @@ -0,0 +1,76 @@ +.. _mpi_cartdim_get: + +MPI_Cartdim_get +=============== + +.. include_body + +:ref:`MPI_Cartdim_get` - Retrieves Cartesian topology information associated +with a communicator. + +Syntax +------ + +C Syntax +^^^^^^^^ + +.. code:: C + + #include + + int MPI_Cartdim_get(MPI_Comm comm, int *ndims) + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + + MPI_CARTDIM_GET(COMM, NDIMS, IERROR) + INTEGER COMM, NDIMS, IERROR + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code:: fortran + + USE mpi_f08 + + MPI_Cartdim_get(comm, ndims, ierror) + TYPE(MPI_Comm), INTENT(IN) :: comm + INTEGER, INTENT(OUT) :: ndims + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + +Input Parameter +--------------- + +- comm : Communicator with Cartesian structure (handle). + +Output Parameters +----------------- + +- ndims : Number of dimensions of the Cartesian structure (integer). +- IERROR : Fortran only: Error status (integer). + +Description +----------- + +:ref:`MPI_Cartdim_get` returns the number of dimensions of the Cartesian +structure. + +Errors +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. Before the +error value is returned, the current MPI error handler is called. By +default, this error handler aborts the MPI job, except for I/O function +errors. The error handler may be changed with :ref:`MPI_Comm_set_errhandler`; +the predefined error handler MPI_ERRORS_RETURN may be used to cause +error values to be returned. Note that MPI does not guarantee that an +MPI program can continue past an error. + + +.. seealso:: :ref:`MPI_Cart_get` diff --git a/docs/man-openmpi/man3/MPI_Close_port.3.rst b/docs/man-openmpi/man3/MPI_Close_port.3.rst new file mode 100644 index 00000000000..27d46e20d11 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Close_port.3.rst @@ -0,0 +1,72 @@ +.. _mpi_close_port: + +MPI_Close_port +============== + +.. include_body + +:ref:`MPI_Close_port` - Releases the specified network address. + +Syntax +------ + +C Syntax +^^^^^^^^ + +.. code:: c + + #include + + int MPI_Close_port(const char *port_name) + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + + MPI_CLOSE_PORT(PORT_NAME, IERROR) + CHARACTER*(*) PORT_NAME + INTEGER IERROR + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code:: fortran + + USE mpi_f08 + + MPI_Close_port(port_name, ierror) + CHARACTER(LEN=*), INTENT(IN) :: port_name + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + +Input Parameter +--------------- + +- ``port_name`` : A port (string). + +Output Parameter +---------------- + +- ``IERROR`` : Fortran only: Error status (integer). + +Description +----------- + +:ref:`MPI_Close_port` releases the network address represented by +``port_name``. + +Errors +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. Before the +error value is returned, the current MPI error handler is called. By +default, this error handler aborts the MPI job, except for I/O function +errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler +MPI_ERRORS_RETURN may be used to cause error values to be returned. +Note that MPI does not guarantee that an MPI program can continue past +an error. diff --git a/docs/man-openmpi/man3/MPI_Comm_accept.3.rst b/docs/man-openmpi/man3/MPI_Comm_accept.3.rst new file mode 100644 index 00000000000..7f2bbbb3379 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Comm_accept.3.rst @@ -0,0 +1,87 @@ +.. _mpi_comm_accept: + +MPI_Comm_accept +=============== + +.. include_body + +:ref:`MPI_Comm_accept` - Establishes communication with a client. + +Syntax +------ + +C Syntax +^^^^^^^^ + +.. code:: C + + #include + + int MPI_Comm_accept(const char *port_name, MPI_Info info, int root, MPI_Comm comm, MPI_Comm *newcomm) + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code:: Fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + + MPI_COMM_ACCEPT(PORT_NAME, INFO, ROOT, COMM, NEWCOMM, IERROR) + CHARACTER*(*) PORT_NAME + INTEGER INFO, ROOT, COMM, NEWCOMM, IERROR + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code:: Fortran + + USE mpi_f08 + + MPI_Comm_accept(port_name, info, root, comm, newcomm, ierror) + CHARACTER(LEN=*), INTENT(IN) :: port_name + TYPE(MPI_Info), INTENT(IN) :: info + INTEGER, INTENT(IN) :: root + TYPE(MPI_Comm), INTENT(IN) :: comm + TYPE(MPI_Comm), INTENT(OUT) :: newcomm + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + +Input Parameters +---------------- + +- port_name : Port name (string, used only on *root*). +- info : Options given by root for the accept (handle, used only on + root). No options currently supported. +- root : Rank in *comm* of root node (integer). +- comm : Intracommunicator over which call is collective (handle). + +Output Parameters +----------------- + +- newcomm : Intercommunicator with client as remote group (handle) +- IERROR : Fortran only: Error status (integer). + +Description +----------- + +:ref:`MPI_Comm_accept` establishes communication with a client. It is +collective over the calling communicator. It returns an +intercommunicator that allows communication with the client, after the +client has connected with the :ref:`MPI_Comm_accept` function using the +:ref:`MPI_Comm_connect` function. The port_name must have been established +through a call to :ref:`MPI_Open_port` on the root. + +Errors +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. Before the +error value is returned, the current MPI error handler is called. By +default, this error handler aborts the MPI job, except for I/O function +errors. The error handler may be changed with :ref:`MPI_Comm_set_errhandler`; +the predefined error handler MPI_ERRORS_RETURN may be used to cause +error values to be returned. See the MPI man page for a full list of MPI +error codes. + + +.. seealso:: :ref:`MPI_Comm_connect` diff --git a/docs/man-openmpi/man3/MPI_Comm_c2f.3.rst b/docs/man-openmpi/man3/MPI_Comm_c2f.3.rst new file mode 100644 index 00000000000..80e10f44555 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Comm_c2f.3.rst @@ -0,0 +1,9 @@ +.. _mpi_comm_c2f: + +MPI_Comm_c2f +============ + .. include_body + +.. include:: ../man3/MPI_Comm_f2c.3.rst + :start-after: .. include_body + diff --git a/docs/man-openmpi/man3/MPI_Comm_call_errhandler.3.rst b/docs/man-openmpi/man3/MPI_Comm_call_errhandler.3.rst new file mode 100644 index 00000000000..7722f38427b --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Comm_call_errhandler.3.rst @@ -0,0 +1,80 @@ +.. _mpi_comm_call_errhandler: + +MPI_Comm_call_errhandler +======================== + +.. include_body + +:ref:`MPI_Comm_call_errhandler` - Passes the supplied error code to the error +handler assigned to a communicator + +Syntax +------ + +C Syntax +^^^^^^^^ + +.. code:: c + + #include + + int MPI_Comm_call_errhandler(MPI_Comm comm, int errorcode) + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code:: Fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + + MPI_COMM_CALL_ERRHANDLER(COMM, ERRORCODE, IERROR) + INTEGER COMM, ERRORCODE, IERROR + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code:: Fortran + + USE mpi_f08 + + MPI_Comm_call_errhandler(comm, errorcode, ierror) + TYPE(MPI_Comm), INTENT(IN) :: comm + INTEGER, INTENT(IN) :: errorcode + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + +Input Parameter +--------------- + +- comm : communicator with error handler (handle). +- errorcode : error code (integer). + +Output Parameters +----------------- + +- IERROR : Fortran only: Error status (integer). + +Description +----------- + +This function invokes the error handler assigned to the communicator +comm with the supplied error code errorcode. If the error handler was +successfully called, the process is not aborted, and the error handler +returns, this function returns MPI_SUCCESS. + +Notes +----- + +Users should note that the default error handler is +MPI_ERRORS_ARE_FATAL. Thus, calling this function will abort the +processes in comm if the default error handler has not been changed. + +Errors +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. See the MPI +man page for a full list of MPI error codes. + + +.. seealso:: :ref:`MPI_Comm_create_errhandler` diff --git a/docs/man-openmpi/man3/MPI_Comm_compare.3.rst b/docs/man-openmpi/man3/MPI_Comm_compare.3.rst new file mode 100644 index 00000000000..c10ab1aa6aa --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Comm_compare.3.rst @@ -0,0 +1,78 @@ +.. _mpi_comm_compare: + +MPI_Comm_compare +================ + +.. include_body + +:ref:`MPI_Comm_compare` - Compares two communicators. + +Syntax +------ + +C Syntax +^^^^^^^^ + +.. code:: c + + #include + + int MPI_Comm_compare(MPI_Comm comm1, MPI_Comm comm2, int *result) + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + + MPI_COMM_COMPARE(COMM1, COMM2, RESULT, IERROR) + INTEGER COMM1, COMM2, RESULT, IERROR + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code:: fortran + + USE mpi_f08 + + MPI_Comm_compare(comm1, comm2, result, ierror) + TYPE(MPI_Comm), INTENT(IN) :: comm1, comm2 + INTEGER, INTENT(OUT) :: result + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + +Input Parameters +---------------- + +- ``comm1`` : Comm1 (handle). +- ``comm2`` : Comm2 (handle). + +Output Parameters +----------------- + +- ``result`` : Result of comparison (integer). +- ``IERROR`` : Fortran only: Error status (integer). + +Description +----------- + +MPI_IDENT ``result``\ s if and only if ``comm1`` and ``comm2`` are +handles for the same object (identical groups and same contexts). +MPI_CONGRUENT results if the underlying groups are identical in +constituents and rank order; these communicators differ only by context. +MPI_SIMILAR results of the group members of both communicators are +the same but the rank order differs. MPI_UNEQUAL results otherwise. + +Errors +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. Before the +error value is returned, the current MPI error handler is called. By +default, this error handler aborts the MPI job, except for I/O function +errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler +MPI_ERRORS_RETURN may be used to cause error values to be returned. +Note that MPI does not guarantee that an MPI program can continue past +an error. diff --git a/docs/man-openmpi/man3/MPI_Comm_connect.3.rst b/docs/man-openmpi/man3/MPI_Comm_connect.3.rst new file mode 100644 index 00000000000..9bc65aebeb1 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Comm_connect.3.rst @@ -0,0 +1,96 @@ +.. _mpi_comm_connect: + +MPI_Comm_connect +================ + +.. include_body + +:ref:`MPI_Comm_connect` - Establishes communication with a server. + +Syntax +------ + +C Syntax +^^^^^^^^ + +.. code:: C + + #include + + int MPI_Comm_connect(const char *port_name, MPI_Info info, int root, + MPI_Comm comm, MPI_Comm *newcomm) + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code:: Fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + + MPI_COMM_CONNECT(PORT_NAME, INFO, ROOT, COMM, NEWCOMM, IERROR) + CHARACTER*(*) PORT_NAME + INTEGER INFO, ROOT, COMM, NEWCOMM, IERROR + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code:: Fortran + + USE mpi_f08 + + MPI_Comm_connect(port_name, info, root, comm, newcomm, ierror) + CHARACTER(LEN=*), INTENT(IN) :: port_name + TYPE(MPI_Info), INTENT(IN) :: info + INTEGER, INTENT(IN) :: root + TYPE(MPI_Comm), INTENT(IN) :: comm + TYPE(MPI_Comm), INTENT(OUT) :: newcomm + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + +Input Parameters +---------------- + +- port_name : Port name (string, used only on *root*). +- info : Options given by root for the connect (handle, used only on + root). No options currently supported. +- root : Rank in *comm* of root node (integer). +- comm : Intracommunicator over which call is collective (handle). + +Output Parameters +----------------- + +- newcomm : Intercommunicator with client as remote group (handle) +- IERROR : Fortran only: Error status (integer). + +Description +----------- + +:ref:`MPI_Comm_connect` establishes communication with a server specified by +port_name. It is collective over the calling communicator and returns an +intercommunicator in which the remote group participated in an +:ref:`MPI_Comm_accept`. The :ref:`MPI_Comm_connect` call must only be called after the +:ref:`MPI_Comm_accept` call has been made by the MPI job acting as the server. +If the named port does not exist (or has been closed), :ref:`MPI_Comm_connect` +raises an error of class MPI_ERR_PORT. MPI provides no guarantee of +fairness in servicing connection attempts. That is, connection attempts +are not necessarily satisfied in the order in which they were initiated, +and competition from other connection attempts may prevent a particular +connection attempt from being satisfied. The port_name parameter is the +address of the server. It must be the same as the name returned by +:ref:`MPI_Open_port` on the server. + +Errors +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. Before the +error value is returned, the current MPI error handler is called. By +default, this error handler aborts the MPI job, except for I/O function +errors. The error handler may be changed with :ref:`MPI_Comm_set_errhandler`; +the predefined error handler MPI_ERRORS_RETURN may be used to cause +error values to be returned. Note that MPI does not guarantee that an +MPI program can continue past an error. See the MPI man page for a full +list of MPI error codes. + + +.. seealso:: :ref:`MPI_Comm_accept` diff --git a/docs/man-openmpi/man3/MPI_Comm_create.3.rst b/docs/man-openmpi/man3/MPI_Comm_create.3.rst new file mode 100644 index 00000000000..cb3bb12cf6d --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Comm_create.3.rst @@ -0,0 +1,95 @@ +.. _mpi_comm_create: + +MPI_Comm_create +=============== + +.. include_body + +:ref:`MPI_Comm_create` - Creates a new communicator. + +Syntax +------ + +C Syntax +^^^^^^^^ + +.. code:: C + + #include + + int MPI_Comm_create(MPI_Comm comm, MPI_Group group, MPI_Comm *newcomm) + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code:: Fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + + MPI_COMM_CREATE(COMM, GROUP, NEWCOMM, IERROR) + INTEGER COMM, GROUP, NEWCOMM, IERROR + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code:: Fortran + + USE mpi_f08 + + MPI_Comm_create(comm, group, newcomm, ierror) + TYPE(MPI_Comm), INTENT(IN) :: comm + TYPE(MPI_Group), INTENT(IN) :: group + TYPE(MPI_Comm), INTENT(OUT) :: newcomm + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + +Input Parameter +--------------- + +- comm : Communicator (handle). +- group : Group, which is a subset of the group of comm (handle). + +Output Parameters +----------------- + +- newcomm : New communicator (handle). +- IERROR : Fortran only: Error status (integer). + +Description +----------- + +This function creates a new communicator newcomm with communication +group defined by group and a new context. The function sets newcomm to a +new communicator that spans all the processes that are in the group. It +sets newcomm to MPI_COMM_NULL for processes that are not in the group. +Each process must call with a group argument that is a subgroup of the +group associated with comm; this could be MPI_GROUP_EMPTY. The processes +may specify different values for the group argument. If a process calls +with a non-empty group, then all processes in that group must call the +function with the same group as argument, that is: the same processes in +the same order. Otherwise the call is erroneous. + +Notes +----- + +:ref:`MPI_Comm_create` provides a means of making a subset of processes for the +purpose of separate MIMD computation, with separate communication space. +newcomm, which is created by :ref:`MPI_Comm_create`, can be used in subsequent +calls to :ref:`MPI_Comm_create` (or other communicator constructors) to further +subdivide a computation into parallel sub-computations. A more general +service is provided by :ref:`MPI_Comm_split`. + +Errors +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. Before the +error value is returned, the current MPI error handler is called. By +default, this error handler aborts the MPI job, except for I/O function +errors. The error handler may be changed with :ref:`MPI_Comm_set_errhandler`; +the predefined error handler MPI_ERRORS_RETURN may be used to cause +error values to be returned. Note that MPI does not guarantee that an +MPI program can continue past an error. + + +.. seealso:: :ref:`MPI_Comm_split` diff --git a/docs/man-openmpi/man3/MPI_Comm_create_errhandler.3.rst b/docs/man-openmpi/man3/MPI_Comm_create_errhandler.3.rst new file mode 100644 index 00000000000..fb4e65d43ec --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Comm_create_errhandler.3.rst @@ -0,0 +1,102 @@ +.. _mpi_comm_create_errhandler: + +MPI_Comm_create_errhandler +========================== + +.. include_body + +:ref:`MPI_Comm_create_errhandler` - Creates an error handler that can be +attached to communicators. + +Syntax +------ + +C Syntax +^^^^^^^^ + +.. code:: c + + #include + + int MPI_Comm_create_errhandler(MPI_Comm_errhandler_function *function, + MPI_Errhandler *errhandler) + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + + MPI_COMM_CREATE_ERRHANDLER(FUNCTION, ERRHANDLER, IERROR) + EXTERNAL FUNCTION + INTEGER ERRHANDLER, IERROR + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code:: fortran + + USE mpi_f08 + + MPI_Comm_create_errhandler(comm_errhandler_fn, errhandler, ierror) + PROCEDURE(MPI_Comm_errhandler_function) :: comm_errhandler_fn + TYPE(MPI_Errhandler), INTENT(OUT) :: errhandler + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + +Deprecated Type Name Note +------------------------- + +MPI-2.2 deprecated the MPI_Comm_errhandler_fn and +``MPI::Comm::Errhandler_fn`` types in favor of +MPI_Comm_errhandler_function and ``MPI::Comm::Errhandler_function``, +respectively. Open MPI supports both names (indeed, the \_fn names are +typedefs to the \_function names). + +Input Parameter +--------------- + +- ``function`` : User-defined error handling procedure (function). + +Output Parameters +----------------- + +- ``errhandler`` : MPI error handler (handle). +- ``IERROR`` : Fortran only: Error status (integer). + +Description +----------- + +:ref:`MPI_Comm_create_errhandler` creates an error handler that can be +attached to communicators. This ``function`` is identical to +:ref:`MPI_Errhandler_create`, the use of which is deprecated. In C, the +user routine should be a ``function`` of type +MPI_Comm_errhandler_function, which is defined as + +.. code:: c + + typedef void MPI_Comm_errhandler_function(MPI_Comm *, int *, ...); + +The first argument is the communicator in use. The second is the error +code to be returned by the MPI routine that raised the error. This +typedef replaces ``MPI_Handler_function``, the use of which is +deprecated. In Fortran, the user routine should be of this form: + +.. code:: fortran + + SUBROUTINE COMM_ERRHANDLER_FUNCTION(COMM, ERROR_CODE, ...) + INTEGER COMM, ERROR_CODE + +Errors +------ + +Almost all MPI routines return an error value; C routines as the value +of the ``function`` and Fortran routines in the last argument. Before +the error value is returned, the current MPI error handler is called. By +default, this error handler aborts the MPI job, except for I/O +``function`` errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler +MPI_ERRORS_RETURN may be used to cause error values to be returned. +Note that MPI does not guarantee that an MPI program can continue past +an error. diff --git a/docs/man-openmpi/man3/MPI_Comm_create_from_group.3.rst b/docs/man-openmpi/man3/MPI_Comm_create_from_group.3.rst new file mode 100644 index 00000000000..4023b25dc5b --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Comm_create_from_group.3.rst @@ -0,0 +1,107 @@ +.. _mpi_comm_create_from_group: + +MPI_Comm_create_from_group +========================== + +.. include_body + +:ref:`MPI_Comm_create_from_group` - Creates a new communicator from a group and +stringtag + +Syntax +------ + +C Syntax +^^^^^^^^ + +.. code:: C + + #include + + int MPI_Comm_create_from_group(MPI_Group group, const char *stringtag, MPI_Info info, MPI_Errhandler errhandler, MPI_Comm *newcomm) + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code:: Fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + + MPI_COMM_CREATE_FROM_GROUP(GROUP, STRINGTAG, INFO, ERRHANDLER, NEWCOMM, IERROR) + INTEGER GROUP, INFO, ERRHANDLER, NEWCOMM, IERROR + CHARACTER*(*) STRINGTAG + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code:: Fortran + + USE mpi_f08 + + MPI_Comm_create_from_group(group, stringtag, info, errhandler, newcomm, ierror) + TYPE(MPI_Group), INTENT(IN) :: group + CHARACTER(LEN=*), INTENT(IN) :: stringtag + TYPE(MPI_Info), INTENT(IN) :: info + TYPE(MPI_Errhandler), INTENT(IN) :: errhandler + TYPE(MPI_Comm), INTENT(OUT) :: newcomm + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + +Input Parameters +---------------- + +- group : Group (handler) +- stringtag : Unique identifier for this operation (string) +- info : info object (handler) +- errhandler : error handler to be attached to the new + intra-communicator (handle) + +Output Parameters +----------------- + +- newcomm : New communicator (handle). +- IERROR : Fortran only: Error status (integer). + +Description +----------- + +:ref:`MPI_Comm_create_from_group` is similar to :ref:`MPI_Comm_create_group`, except +that the set of MPI processes involved in the creation of the new +intra-communicator is specified by a group argument, rather than the +group associated with a pre-existing communicator. If a non-empty group +is specified, then all MPI processes in that group must call the +function and each of these MPI processes must provide the same +arguments, including a group that contains the same members with the +same ordering, and identical stringtag value. In the event that +MPI_GROUP_EMPTY is supplied as the group argument, then the call is a +local operation and MPI_COMM_NULL is returned as newcomm. The stringtag +argument is analogous to the tag used for :ref:`MPI_Comm_create_group`. If +multiple threads at a given MPI process perform concurrent +:ref:`MPI_Comm_create_from_group` operations, the user must distinguish these +operations by providing different stringtag arguments. The stringtag +shall not exceed MPI_MAX_STRINGTAG_LEN characters in length. For C, this +includes space for a null terminating character. + +Notes +----- + +The errhandler argument specifies an error handler to be attached to the +new intracommunicator. The info argument provides hints and assertions, +possibly MPI implementation dependent, which indicate desired +characteristics and guide communicator creation. MPI_MAX_STRINGTAG_LEN +shall have a value of at least 63. + +Errors +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. Before the +error value is returned, the current MPI error handler is called. By +default, this error handler aborts the MPI job, except for I/O function +errors. The error handler may be changed with :ref:`MPI_Comm_set_errhandler`; +the predefined error handler MPI_ERRORS_RETURN may be used to cause +error values to be returned. Note that MPI does not guarantee that an +MPI program can continue past an error. + + +.. seealso:: :ref:`MPI_Comm_create_group` diff --git a/docs/man-openmpi/man3/MPI_Comm_create_group.3.rst b/docs/man-openmpi/man3/MPI_Comm_create_group.3.rst new file mode 100644 index 00000000000..c47462261f0 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Comm_create_group.3.rst @@ -0,0 +1,106 @@ +.. _mpi_comm_create_group: + +MPI_Comm_create_group +===================== + +.. include_body + +:ref:`MPI_Comm_create_group` - Creates a new communicator. + +Syntax +------ + +C Syntax +^^^^^^^^ + +.. code:: C + + #include + + int MPI_Comm_create_group(MPI_Comm comm, MPI_Group group, int tag, MPI_Comm *newcomm) + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code:: Fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + + MPI_COMM_CREATE_GROUP(COMM, GROUP, TAG, NEWCOMM, IERROR) + INTEGER COMM, GROUP, TAG, NEWCOMM, IERROR + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code:: Fortran + + USE mpi_f08 + + MPI_Comm_create_group(comm, group, tag, newcomm, ierror) + TYPE(MPI_Comm), INTENT(IN) :: comm + TYPE(MPI_Group), INTENT(IN) :: group + INTEGER, INTENT(IN) :: tag + TYPE(MPI_Comm), INTENT(OUT) :: newcomm + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + +Input Parameters +---------------- + +- comm : Communicator (handle). +- group : Group, which is a subset of the group of comm (handle). +- tag : Tag (integer). + +Output Parameters +----------------- + +- newcomm : New communicator (handle). +- IERROR : Fortran only: Error status (integer). + +Description +----------- + +:ref:`MPI_Comm_create_group` is similar to :ref:`MPI_Comm_create`; however, +:ref:`MPI_Comm_create` must be called by all processes in the group of comm, +whereas :ref:`MPI_Comm_create_group` must be called by all processes in group, +which is a subgroup of the group of comm. In addition, +:ref:`MPI_Comm_create_group` requires that comm is an intracommunicator. +:ref:`MPI_Comm_create_group` returns a new intracommunicator, newcomm, for +which the group argument defines the communication group. No cached +information propagates from comm to newcomm. Each process must provide a +group argument that is a subgroup of the group associated with comm; +this could be MPI_GROUP_EMPTY. If a non-empty group is specified, then +all processes in that group must call the function, and each of these +processes must provide the same arguments, including a group that +contains the same members with the same ordering. Otherwise the call is +erroneous. If the calling process is a member of the group given as the +group argument, then newcomm is a communicator with group as its +associated group. If the calling process is not a member of group, e.g., +group is MPI_GROUP_EMPTY, then the call is a local operation and +MPI_COMM_NULL is returned as newcomm. + +Notes +----- + +:ref:`MPI_Comm_create_group` provides a means of making a subset of processes +for the purpose of separate MIMD computation, with separate +communication space. newcomm, which is created by :ref:`MPI_Comm_create_group`, +can be used in subsequent calls to :ref:`MPI_Comm_create_group` (or other +communicator constructors) to further subdivide a computation into +parallel sub-computations. A more general service is provided by +:ref:`MPI_Comm_split`. + +Errors +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. Before the +error value is returned, the current MPI error handler is called. By +default, this error handler aborts the MPI job, except for I/O function +errors. The error handler may be changed with :ref:`MPI_Comm_set_errhandler`; +the predefined error handler MPI_ERRORS_RETURN may be used to cause +error values to be returned. Note that MPI does not guarantee that an +MPI program can continue past an error. + + +.. seealso:: :ref:`MPI_Comm_create` diff --git a/docs/man-openmpi/man3/MPI_Comm_create_keyval.3.rst b/docs/man-openmpi/man3/MPI_Comm_create_keyval.3.rst new file mode 100644 index 00000000000..2895e2f4ac8 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Comm_create_keyval.3.rst @@ -0,0 +1,143 @@ +.. _mpi_comm_create_keyval: + +MPI_Comm_create_keyval +====================== + +.. include_body + +:ref:`MPI_Comm_create_keyval` - Generates a new attribute key. + +Syntax +------ + +C Syntax +^^^^^^^^ + +.. code:: c + + #include + + int MPI_Comm_create_keyval(MPI_Comm_copy_attr_function + *comm_copy_attr_fn, MPI_Comm_delete_attr_function + *comm_delete_attr_fn, int *comm_keyval, + void *extra_state) + +Fortran Syntax (See Fortran 77 Notes) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. code:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + + MPI_COMM_CREATE_KEYVAL(COMM_COPY_ATTR_FN, COMM_DELETE_ATTR_FN, + COMM_KEYVAL, EXTRA_STATE, IERROR) + EXTERNAL COMM_COPY_ATTR_FN, COMM_DELETE_ATTR_FN + INTEGER COMM_KEYVAL, IERROR + + INTEGER(KIND=MPI_ADDRESS_KIND) EXTRA_STATE + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code:: fortran + + USE mpi_f08 + + MPI_Comm_create_keyval(comm_copy_attr_fn, comm_delete_attr_fn, comm_keyval, + extra_state, ierror) + PROCEDURE(MPI_Comm_copy_attr_function) :: comm_copy_attr_fn + PROCEDURE(MPI_Comm_delete_attr_function) :: comm_delete_attr_fn + INTEGER, INTENT(OUT) :: comm_keyval + INTEGER(KIND=MPI_ADDRESS_KIND), INTENT(IN) :: extra_state + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + +Input Parameters +---------------- + +- ``comm_copy_attr_fn`` : Copy callback function for ``comm_keyval`` + (function). +- ``comm_delete_attr_fn`` : Delete callback function for + ``comm_keyval`` (function). +- ``extra_state`` : Extra state for callback functions. + +Output Parameter +---------------- + +- ``comm_keyval`` : Key value for future access (integer). +- ``IERROR`` : Fortran only: Error status (integer). + +Description +----------- + +This function replaces :ref:`MPI_Keyval_create`, the use of which is +deprecated. The C binding is identical. The Fortran binding differs in +that ``extra_state`` is an address-sized integer. Also, the copy and +delete callback functions have Fortran bindings that are consistent with +address-sized attributes. The argument ``comm_copy_attr_fn`` may be +specified as MPI_COMM_NULL_COPY_FN or MPI_COMM_DUP_FN from C or +Fortran. MPI_COMM_NULL_COPY_FN is a function that does nothing more +than returning ``flag = 0`` and MPI_SUCCESS. MPI_COMM_DUP_FN is +a simple-minded copy function that sets ``flag = 1``, returns the value +of ``attribute_val_in`` in ``attribute_val_out``, and returns +MPI_SUCCESS. These replace the MPI-1 predefined callbacks +MPI_NULL_COPY_FN and MPI_DUP_FN, the use of which is deprecated. +The two C callback functions are: + +.. code:: c + + typedef int MPI_Comm_copy_attr_function(MPI_Comm oldcomm, int comm_keyval, + void *extra_state, void *attribute_val_in, + void *attribute_val_out, int *flag); + + typedef int MPI_Comm_delete_attr_function(MPI_Comm comm, int comm_keyval, + void *attribute_val, void *extra_state); + +which are the same as the MPI-1.1 calls but with a new name. The old +names are deprecated. The two Fortran callback functions are: + +.. code:: fortran + + SUBROUTINE COMM_COPY_ATTR_FN(OLDCOMM, COMM_KEYVAL, EXTRA_STATE, + ATTRIBUTE_VAL_IN, ATTRIBUTE_VAL_OUT, FLAG, IERROR) + INTEGER OLDCOMM, COMM_KEYVAL, IERROR + + INTEGER(KIND=MPI_ADDRESS_KIND) EXTRA_STATE, ATTRIBUTE_VAL_IN, + ATTRIBUTE_VAL_OUT + LOGICAL FLAG + + SUBROUTINE COMM_DELETE_ATTR_FN(COMM, COMM_KEYVAL, ATTRIBUTE_VAL, EXTRA_STATE, + IERROR) + INTEGER COMM, COMM_KEYVAL, IERROR + + INTEGER(KIND=MPI_ADDRESS_KIND) ATTRIBUTE_VAL, EXTRA_STATE + +Fortran 77 Notes +^^^^^^^^^^^^^^^^ + +The MPI standard prescribes portable Fortran syntax for the +``EXTRA_STATE`` argument only for Fortran 90. FORTRAN 77 users may use +the non-portable syntax + +.. code:: fortran + + INTEGER*MPI_ADDRESS_KIND EXTRA_STATE + +where ``MPI_ADDRESS_KIND`` is a constant defined in mpif.h and gives the +length of the declared integer in bytes. + +Errors +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. Before the +error value is returned, the current MPI error handler is called. By +default, this error handler aborts the MPI job, except for I/O function +errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler +MPI_ERRORS_RETURN may be used to cause error values to be returned. +Note that MPI does not guarantee that an MPI program can continue past +an error. See the MPI man page for a full list of MPI error codes. + + +.. seealso:: :ref:`MPI` diff --git a/docs/man-openmpi/man3/MPI_Comm_delete_attr.3.rst b/docs/man-openmpi/man3/MPI_Comm_delete_attr.3.rst new file mode 100644 index 00000000000..ba03c08777e --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Comm_delete_attr.3.rst @@ -0,0 +1,97 @@ +.. _mpi_comm_delete_attr: + +MPI_Comm_delete_attr +==================== + +.. include_body + +:ref:`MPI_Comm_delete_attr` - Deletes attribute value associated with a +key. + +Syntax +------ + +C Syntax +^^^^^^^^ + +.. code:: c + + #include + + int MPI_Comm_delete_attr(MPI_Comm comm, int comm_keyval) + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + + MPI_COMM_DELETE_ATTR(COMM, COMM_KEYVAL, IERROR) + INTEGER COMM, COMM_KEYVAL, IERROR + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code:: fortran + + USE mpi_f08 + + MPI_Comm_delete_attr(comm, comm_keyval, ierror) + TYPE(MPI_Comm), INTENT(IN) :: comm + INTEGER, INTENT(IN) :: comm_keyval + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + +Input/Output Parameter +^^^^^^^^^^^^^^^^^^^^^^ + +- ``comm`` : Communicator from which the attribute is deleted (handle). + +Input Parameter +--------------- + +- ``comm_keyval`` : Key value (integer). + +Output Parameter +---------------- + +- ``IERROR`` : Fortran only: Error status (integer). + +Description +----------- + +:ref:`MPI_Comm_delete_attr` deletes an attribute from cache by key. This +function invokes the attribute delete function ``delete_fn`` specified +when the ``comm_keyval`` was created. The call will fail if the +``delete_fn`` function returns an error code other than MPI_SUCCESS.`` +Whenever a communicator is replicated using the function +:ref:`MPI_Comm_dup`, all callback copy functions for attributes that are +currently set are invoked (in arbitrary order). Whenever a communicator +is deleted using the function :ref:`MPI_Comm_free`, all callback delete +functions for attributes that are currently set are invoked. This +function is the same as :ref:`MPI_Attr_delete` but is needed to match the +``comm``\ unicator-specific functions introduced in the MPI-2 standard. +The use of :ref:`MPI_Attr_delete` is deprecated. + +Notes +----- + +Note that it is not defined by the MPI standard what happens if the +``delete_fn`` callback invokes other MPI functions. In Open MPI, it is +not valid for ``delete_fn`` callbacks (or any of their children) to add +or delete attributes on the same object on which the ``delete_fn`` +callback is being invoked. + +Errors +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. Before the +error value is returned, the current MPI error handler is called. By +default, this error handler aborts the MPI job, except for I/O function +errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler +MPI_ERRORS_RETURN may be used to cause error values to be returned. +Note that MPI does not guarantee that an MPI program can continue past +an error. diff --git a/docs/man-openmpi/man3/MPI_Comm_disconnect.3.rst b/docs/man-openmpi/man3/MPI_Comm_disconnect.3.rst new file mode 100644 index 00000000000..099ef342c24 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Comm_disconnect.3.rst @@ -0,0 +1,91 @@ +.. _mpi_comm_disconnect: + +MPI_Comm_disconnect +=================== + +.. include_body + +:ref:`MPI_Comm_disconnect` - Deallocates communicator object and sets handle to +MPI_COMM_NULL. + +Syntax +------ + +C Syntax +^^^^^^^^ + +.. code:: c + + #include + + int MPI_Comm_disconnect(MPI_Comm *comm) + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + + MPI_COMM_DISCONNECT(COMM, IERROR) + INTEGER COMM, IERROR + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code:: fortran + + USE mpi_f08 + + MPI_Comm_disconnect(comm, ierror) + TYPE(MPI_Comm), INTENT(INOUT) :: comm + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + +Input/Output Parameter +^^^^^^^^^^^^^^^^^^^^^^ + +- comm : Communicator (handle). + +Output Parameter +---------------- + +- IERROR : Fortran only: Error status (integer). + +Description +----------- + +:ref:`MPI_Comm_disconnect` waits for all pending communication on comm to +complete internally, deallocates the communicator object, and sets the +handle to MPI_COMM_NULL. It is a collective operation. It may not be +called with the communicator MPI_COMM_WORLD or MPI_COMM_SELF. +:ref:`MPI_Comm_disconnect` may be called only if all communication is complete +and matched, so that buffered data can be delivered to its destination. +This requirement is the same as for :ref:`MPI_Finalize`. :ref:`MPI_Comm_disconnect` +has the same action as :ref:`MPI_Comm_free`, except that it waits for pending +communication to finish internally and enables the guarantee about the +behavior of disconnected processes. + +Notes +----- + +To disconnect two processes you may need to call :ref:`MPI_Comm_disconnect`, +:ref:`MPI_Win_free`, and :ref:`MPI_File_close` to remove all communication paths +between the two processes. Note that it may be necessary to disconnect +several communicators (or to free several windows or files) before two +processes are completely independent. + +Errors +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. Before the +error value is returned, the current MPI error handler is called. By +default, this error handler aborts the MPI job, except for I/O function +errors. The error handler may be changed with :ref:`MPI_Comm_set_errhandler`; +the predefined error handler MPI_ERRORS_RETURN may be used to cause +error values to be returned. Note that MPI does not guarantee that an +MPI program can continue past an error. + + +.. seealso:: :ref:`MPI_Comm_connect` diff --git a/docs/man-openmpi/man3/MPI_Comm_dup.3.rst b/docs/man-openmpi/man3/MPI_Comm_dup.3.rst new file mode 100644 index 00000000000..c189d875849 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Comm_dup.3.rst @@ -0,0 +1,99 @@ +.. _mpi_comm_dup: + +MPI_Comm_dup +============ + +.. include_body + +:ref:`MPI_Comm_dup` - Duplicates an existing communicator with all its cached +information. + +Syntax +------ + +C Syntax +^^^^^^^^ + +.. code:: c + + #include + + int MPI_Comm_dup(MPI_Comm comm, MPI_Comm *newcomm) + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + + MPI_COMM_DUP(COMM, NEWCOMM, IERROR) + INTEGER COMM, NEWCOMM, IERROR + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code:: fortran + + USE mpi_f08 + + MPI_Comm_dup(comm, newcomm, ierror) + TYPE(MPI_Comm), INTENT(IN) :: comm + TYPE(MPI_Comm), INTENT(OUT) :: newcomm + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + +Input Parameter +--------------- + +- comm : Communicator (handle). + +Output Parameters +----------------- + +- newcomm : Copy of comm (handle). +- IERROR : Fortran only: Error status (integer). + +Description +----------- + +:ref:`MPI_Comm_dup` duplicates the existing communicator comm with associated +key values. For each key value, the respective copy callback function +determines the attribute value associated with this key in the new +communicator; one particular action that a copy callback may take is to +delete the attribute from the new communicator. Returns in newcomm a new +communicator with the same group, any copied cached information, but a +new context (see Section 5.7.1 of the MPI-1 Standard, "Functionality"). + +Notes +----- + +This operation is used to provide a parallel library call with a +duplicate communication space that has the same properties as the +original communicator. This includes any attributes (see below) and +topologies (see Chapter 6, "Process Topologies," in the MPI-1 Standard). +This call is valid even if there are pending point-to-point +communications involving the communicator comm. A typical call might +involve an :ref:`MPI_Comm_dup` at the beginning of the parallel call, and an +:ref:`MPI_Comm_free` of that duplicated communicator at the end of the call. +Other models of communicator management are also possible. This call +applies to both intra- and intercommunicators. Note that it is not +defined by the MPI standard what happens if the attribute copy callback +invokes other MPI functions. In Open MPI, it is not valid for attribute +copy callbacks (or any of their children) to add or delete attributes on +the same object on which the attribute copy callback is being invoked. + +Errors +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. Before the +error value is returned, the current MPI error handler is called. By +default, this error handler aborts the MPI job, except for I/O function +errors. The error handler may be changed with :ref:`MPI_Comm_set_errhandler`; +the predefined error handler MPI_ERRORS_RETURN may be used to cause +error values to be returned. Note that MPI does not guarantee that an +MPI program can continue past an error. + + +.. seealso:: :ref:`MPI_Comm_dup_with_info` diff --git a/docs/man-openmpi/man3/MPI_Comm_dup_with_info.3.rst b/docs/man-openmpi/man3/MPI_Comm_dup_with_info.3.rst new file mode 100644 index 00000000000..f45bc105793 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Comm_dup_with_info.3.rst @@ -0,0 +1,99 @@ +.. _mpi_comm_dup_with_info: + +MPI_Comm_dup_with_info +====================== + +.. include_body + +:ref:`MPI_Comm_dup_with_info` - Duplicates an existing communicator using +provided info. + +Syntax +------ + +C Syntax +^^^^^^^^ + +.. code:: c + + #include + + int MPI_Comm_dup_with_info(MPI_Comm comm, MPI_Info info, MPI_Comm *newcomm) + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + + MPI_COMM_DUP_WITH_INFO(COMM, INFO, NEWCOMM, IERROR) + INTEGER COMM, INFO, NEWCOMM, IERROR + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code:: fortran + + USE mpi_f08 + + MPI_Comm_dup_with_info(comm, info, newcomm, ierror) + TYPE(MPI_Comm), INTENT(IN) :: comm + TYPE(MPI_Info), INTENT(IN) :: info + TYPE(MPI_Comm), INTENT(OUT) :: newcomm + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + +Input Parameter +--------------- + +- comm : Communicator (handle). +- info : Info argument (handle). + +Output Parameters +----------------- + +- newcomm : Copy of comm (handle). +- IERROR : Fortran only: Error status (integer). + +Description +----------- + +:ref:`MPI_Comm_dup_with_info` acts exactly like :ref:`MPI_Comm_dup` except that the +info hints associated with the communicator comm are not duplicated in +newcomm. The hints provided by the argument info are associated with the +output communicator newcomm instead. See :ref:`MPI_Comm_set_info` for the +list of recognized info keys. + +Notes +----- + +This operation is used to provide a parallel library call with a +duplicate communication space that has the same properties as the +original communicator. This includes any attributes (see below) and +topologies (see Chapter 6, "Process Topologies," in the MPI-1 Standard). +This call is valid even if there are pending point-to-point +communications involving the communicator comm. A typical call might +involve an :ref:`MPI_Comm_dup_with_info` at the beginning of the parallel call, +and an :ref:`MPI_Comm_free` of that duplicated communicator at the end of the +call. Other models of communicator management are also possible. This +call applies to both intra- and intercommunicators. Note that it is not +defined by the MPI standard what happens if the attribute copy callback +invokes other MPI functions. In Open MPI, it is not valid for attribute +copy callbacks (or any of their children) to add or delete attributes on +the same object on which the attribute copy callback is being invoked. + +Errors +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. Before the +error value is returned, the current MPI error handler is called. By +default, this error handler aborts the MPI job, except for I/O function +errors. The error handler may be changed with :ref:`MPI_Comm_set_errhandler`; +the predefined error handler MPI_ERRORS_RETURN may be used to cause +error values to be returned. Note that MPI does not guarantee that an +MPI program can continue past an error. + + +.. seealso:: :ref:`MPI_Comm_dup` diff --git a/docs/man-openmpi/man3/MPI_Comm_f2c.3.rst b/docs/man-openmpi/man3/MPI_Comm_f2c.3.rst new file mode 100644 index 00000000000..9e99816785e --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Comm_f2c.3.rst @@ -0,0 +1,77 @@ +.. _mpi_comm_f2c: + + +MPI_Comm_f2c +============ + +.. include_body + +:ref:`MPI_Comm_f2c`, :ref:`MPI_Comm_c2f`, :ref:`MPI_File_f2c`, :ref:`MPI_File_c2f`, :ref:`MPI_Info_f2c`, +:ref:`MPI_Info_c2f`, :ref:`MPI_Message_f2c`, :ref:`MPI_Message_c2f`, :ref:`MPI_Op_f2c`, :ref:`MPI_Op_c2f`, +:ref:`MPI_Request_f2c`, :ref:`MPI_Request_c2f`, :ref:`MPI_Type_f2c`, :ref:`MPI_Type_c2f`, +:ref:`MPI_Win_f2c`, :ref:`MPI_Win_c2f` - Translates a C handle into a Fortran +handle, or vice versa. + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + MPI_Comm MPI_Comm_f2c(MPI_Fint comm) + MPI_Fint MPI_Comm_c2f(MPI_Comm comm) + + MPI_File MPI_File_f2c(MPI_Fint file) + MPI_Fint MPI_File_c2f(MPI_File file) + + MPI_Group MPI_Group_f2c(MPI Fint group) + MPI_Fint MPI_Group_c2f(MPI Group group) + + MPI_Info MPI_Info_f2c(MPI_Fint info) + MPI_Fint MPI_Info_c2f(MPI_Info info) + + MPI_Message MPI_Message_f2c(MPI_Fint message) + MPI_Fint MPI_Message_c2f(MPI_Message message) + + MPI_Op MPI_Op_f2c(MPI_Fint op) + MPI_Fint MPI_Op_c2f(MPI_Op op) + + MPI_Request MPI_Request_f2c(MPI_Fint request) + MPI_Fint MPI_Request_c2f(MPI_Request request) + + MPI_Datatype MPI_Type_f2c(MPI_Fint datatype) + MPI_Fint MPI_Type_c2f(MPI_Datatype datatype) + + MPI_Win MPI_Win_f2c(MPI_Fint win) + MPI_Fint MPI_Win_c2f(MPI_Win win) + + +DESCRIPTION +----------- + +Handles are passed between Fortran and C by using an explicit C wrapper +to convert Fortran handles to C handles. There is no direct access to C +handles in Fortran. The type definition MPI_Fint is provided in C for +an integer of the size that matches a Fortran *INTEGER*; usually, +MPI_Fint will be equivalent to *int*. The handle translation functions +are provided in C to convert from a Fortran handle (which is an integer) +to a C handle, and vice versa. + +For example, if *comm* is a valid Fortran handle to a communicator, then +:ref:`MPI_Comm_f2c` returns a valid C handle to that same communicator; if +*comm* = MPI_COMM_NULL (Fortran value), then :ref:`MPI_Comm_f2c` returns a null +C handle; if *comm* is an invalid Fortran handle, then :ref:`MPI_Comm_f2c` +returns an invalid C handle. + + +NOTE +---- + +This function does not return an error value. Consequently, the result +of calling it before :ref:`MPI_Init` or after :ref:`MPI_Finalize` is undefined. diff --git a/docs/man-openmpi/man3/MPI_Comm_free.3.rst b/docs/man-openmpi/man3/MPI_Comm_free.3.rst new file mode 100644 index 00000000000..11747464433 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Comm_free.3.rst @@ -0,0 +1,93 @@ +.. _mpi_comm_free: + + +MPI_Comm_free +============= + +.. include_body + +:ref:`MPI_Comm_free` - Mark a communicator object for deallocation. + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Comm_free(MPI_Comm *comm) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_COMM_FREE(COMM, IERROR) + INTEGER COMM, IERROR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Comm_free(comm, ierror) + TYPE(MPI_Comm), INTENT(INOUT) :: comm + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETER +--------------- +* ``comm``: Communicator to be destroyed (handle). + +OUTPUT PARAMETER +---------------- +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +This operation marks the communicator object for deallocation. The +handle is set to MPI_COMM_NULL. Any pending operations that use this +communicator will complete normally; the object is actually deallocated +only if there are no other active references to it. This call applies to +intracommunicators and intercommunicators. Upon actual deallocation, the +delete callback functions for all cached attributes (see Section 5.7 in +the MPI-1 Standard, "Caching") are called in arbitrary order. + + +NOTES +----- + +Note that it is not defined by the MPI standard what happens if the +delete_fn callback invokes other MPI functions. In Open MPI, it is not +valid for delete_fn callbacks (or any of their children) to add or +delete attributes on the same object on which the delete_fn callback is +being invoked. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. + + +.. seealso:: + :ref:`MPI_Comm_delete_attr` diff --git a/docs/man-openmpi/man3/MPI_Comm_free_keyval.3.rst b/docs/man-openmpi/man3/MPI_Comm_free_keyval.3.rst new file mode 100644 index 00000000000..15cedab0d12 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Comm_free_keyval.3.rst @@ -0,0 +1,92 @@ +.. _mpi_comm_free_keyval: + + +MPI_Comm_free_keyval +==================== + +.. include_body + +:ref:`MPI_Comm_free_keyval` - Frees attribute key for communicator cache +attribute. + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Comm_free_keyval(int *comm_keyval) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_COMM_FREE_KEYVAL(COMM_KEYVAL, IERROR) + INTEGER COMM_KEYVAL, IERROR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Comm_free_keyval(comm_keyval, ierror) + INTEGER, INTENT(INOUT) :: comm_keyval + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT/OUTPUT PARAMETER +---------------------- +* ``comm_keyval``: + +OUTPUT PARAMETER +---------------- +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +:ref:`MPI_Comm_free_keyval` frees an extant attribute key. This function sets +the value of *keyval* to MPI_KEYVAL_INVALID. Note that it is not +erroneous to free an attribute key that is in use, because the actual +free does not transpire until after all references (in other +communicators on the process) to the key have been freed. These +references need to be explicitly freed by the program, either via calls +to :ref:`MPI_Comm_delete_attr` that free one attribute instance, or by calls to +:ref:`MPI_Comm_free` that free all attribute instances associated with the +freed communicator. + +This call is identical to the call :ref:`MPI_Keyval_free` but is needed to +match the communicator-specific creation function introduced in the +MPI-2 standard. The use of :ref:`MPI_Keyval_free` is deprecated. + + +NOTES +----- + +Key values are global (they can be used with any and all communicators). + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. diff --git a/docs/man-openmpi/man3/MPI_Comm_get_attr.3.rst b/docs/man-openmpi/man3/MPI_Comm_get_attr.3.rst new file mode 100644 index 00000000000..93e41df92b6 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Comm_get_attr.3.rst @@ -0,0 +1,105 @@ +.. _mpi_comm_get_attr: + + +MPI_Comm_get_attr +================= + +.. include_body + +:ref:`MPI_Comm_get_attr` - Retrieves attribute value by key. + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Comm_get_attr(MPI_Comm comm, int comm_keyval, + void *attribute_val, int *flag) + + +Fortran Syntax (see FORTRAN 77 NOTES) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_COMM_GET_ATTR(COMM, COMM_KEYVAL, ATTRIBUTE_VAL, FLAG, IERROR) + INTEGER COMM, COMM_KEYVAL, IERROR + INTEGER(KIND=MPI_ADDRESS_KIND) ATTRIBUTE_VAL + LOGICAL FLAG + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Comm_get_attr(comm, comm_keyval, attribute_val, flag, ierror) + TYPE(MPI_Comm), INTENT(IN) :: comm + INTEGER, INTENT(IN) :: comm_keyval + INTEGER(KIND=MPI_ADDRESS_KIND), INTENT(OUT) :: attribute_val + LOGICAL, INTENT(OUT) :: flag + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETERS +---------------- +* ``comm``: Communicator to which the attribute is attached (handle). +* ``comm_keyval``: Key value (integer). + +OUTPUT PARAMETER +---------------- +* ``attribute_val``: Attribute value, unless f\ *lag* = false. +* ``flag``: False if no attribute is associated with the key (logical). +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +:ref:`MPI_Comm_get_attr` retrieves an attribute value by key. The call is +erroneous if there is no key with value *keyval*. On the other hand, the +call is correct if the key value exists, but no attribute is attached on +*comm* for that key; in that case, the call returns *flag* = false. In +particular, MPI_KEYVAL_INVALID is an erroneous key value. + +This function replaces :ref:`MPI_Attr_get`, the use of which is deprecated. The +C binding is identical. The Fortran binding differs in that +*attribute_val* is an address-sized integer. + + +FORTRAN 77 NOTES +---------------- + +The MPI standard prescribes portable Fortran syntax for the +*ATTRIBUTE_VAL* argument only for Fortran 90. Sun FORTRAN 77 users may +use the non-portable syntax + +:: + + INTEGER*MPI_ADDRESS_KIND ATTRIBUTE_VAL + +where MPI_ADDRESS_KIND is a constant defined in mpif.h and gives the +length of the declared integer in bytes. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. diff --git a/docs/man-openmpi/man3/MPI_Comm_get_errhandler.3.rst b/docs/man-openmpi/man3/MPI_Comm_get_errhandler.3.rst new file mode 100644 index 00000000000..7c63104fab3 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Comm_get_errhandler.3.rst @@ -0,0 +1,81 @@ +.. _mpi_comm_get_errhandler: + + +MPI_Comm_get_errhandler +======================= + +.. include_body + +:ref:`MPI_Comm_get_errhandler` - Retrieves error handler associated with a +communicator. + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Comm_get_errhandler(MPI_Comm comm, + MPI_Errhandler *errhandler) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_COMM_GET_ERRHANDLER(COMM, ERRHANDLER, IERROR) + INTEGER COMM, ERRHANDLER, IERROR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Comm_get_errhandler(comm, errhandler, ierror) + TYPE(MPI_Comm), INTENT(IN) :: comm + TYPE(MPI_Errhandler), INTENT(OUT) :: errhandler + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETER +--------------- +* ``comm``: Communicator (handle). + +OUTPUT PARAMETERS +----------------- +* ``errhandler``: New error handler for communicator (handle). +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +:ref:`MPI_Comm_get_errhandler` retrieves the error handler currently associated +with a communicator. This call is identical to :ref:`MPI_Errhandler_get`, the +use of which is deprecated. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. + +See the MPI man page for a full list of MPI error codes. diff --git a/docs/man-openmpi/man3/MPI_Comm_get_info.3.rst b/docs/man-openmpi/man3/MPI_Comm_get_info.3.rst new file mode 100644 index 00000000000..ff2077347a3 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Comm_get_info.3.rst @@ -0,0 +1,84 @@ +.. _mpi_comm_get_info: + + +MPI_Comm_get_info +================= + +.. include_body + +:ref:`MPI_Comm_get_info` - Retrieves active communicator info hints + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Comm_get_info(MPI_Comm comm, MPI_Info *info_used) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_COMM_GET_INFO(COMM, INFO_USED, IERROR) + INTEGER COMM, INFO_USED, IERROR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Comm_get_info(comm, info_used, ierror) + TYPE(MPI_Comm), INTENT(IN) :: comm + TYPE(MPI_Info), INTENT(OUT) :: info_used + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETERS +---------------- +* ``comm``: Communicator from which to receive active info hints + +OUTPUT PARAMETERS +----------------- +* ``info_used``: New info object returned with all active hints on this communicator. +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +:ref:`MPI_Comm_get_info` returns a new info object containing the hints of the +communicator associated with *comm*. The current setting of all hints +actually used by the system related to this communicator is returned in +*info_used*. If no such hints exist, a handle to a newly created info +object is returned that contains no key/value pair. The user is +responsible for freeing info_used via :ref:`MPI_Info_free`. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. + + +.. seealso:: + :ref:`MPI_Comm_get_info` :ref:`MPI_Info_free` diff --git a/docs/man-openmpi/man3/MPI_Comm_get_name.3.rst b/docs/man-openmpi/man3/MPI_Comm_get_name.3.rst new file mode 100644 index 00000000000..11272d0433c --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Comm_get_name.3.rst @@ -0,0 +1,110 @@ +.. _mpi_comm_get_name: + + +MPI_Comm_get_name +================= + +.. include_body + +:ref:`MPI_Comm_get_name` - Returns the name that was most recently +associated with a communicator. + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Comm_get_name(MPI_Comm comm, char *comm_name, int *resultlen) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_COMM_GET_NAME(COMM, COMM_NAME, RESULTLEN, IERROR) + INTEGER COMM, RESULTLEN, IERROR + CHARACTER*(*) COMM_NAME + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Comm_get_name(comm, comm_name, resultlen, ierror) + TYPE(MPI_Comm), INTENT(IN) :: comm + CHARACTER(LEN=MPI_MAX_OBJECT_NAME), INTENT(OUT) :: comm_name + INTEGER, INTENT(OUT) :: resultlen + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETER +--------------- +* ``comm``: Communicator the name of which is to be returned (handle). + +OUTPUT PARAMETER +---------------- +* ``comm_name``: Name previously stored on the communicator, or an empty string if no such name exists (string). +* ``resultlen``: Length of returned name (integer). +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +:ref:`MPI_Comm_get_name` returns the last name that was previously associated +with the given communicator. The name may be set and retrieved from any +language. The same name will be returned independent of the language +used. *comm_name* should be allocated so that it can hold a resulting +string of length MPI_MAX_OBJECT_NAME characters. :ref:`MPI_Comm_get_name` +returns a copy of the set name in *comm_name*. + +If the user has not associated a name with a communicator, or an error +occurs, :ref:`MPI_Comm_get_name` will return an empty string (all spaces in +Fortran, "" in C). The three predefined communicators will have +predefined names associated with them. Thus, the names of +MPI_COMM_WORLD, MPI_COMM_SELF, and MPI_COMM_PARENT will have the default +of MPI_COMM_WORLD, MPI_COMM_SELF, and MPI_COMM_PARENT. The fact that the +system may have chosen to give a default name to a communicator does not +prevent the user from setting a name on the same communicator; doing +this removes the old name and assigns the new one. + + +NOTES +----- + +It is safe simply to print the string returned by :ref:`MPI_Comm_get_name`, as +it is always a valid string even if there was no name. + +Note that associating a name with a communicator has no effect on the +semantics of an MPI program, and will (necessarily) increase the store +requirement of the program, since the names must be saved. Therefore, +there is no requirement that users use these functions to associate +names with communicators. However debugging and profiling MPI +applications may be made easier if names are associated with +communicators, since the debugger or profiler should then be able to +present information in a less cryptic manner. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. diff --git a/docs/man-openmpi/man3/MPI_Comm_get_parent.3.rst b/docs/man-openmpi/man3/MPI_Comm_get_parent.3.rst new file mode 100644 index 00000000000..5c77b3d1fa6 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Comm_get_parent.3.rst @@ -0,0 +1,96 @@ +.. _mpi_comm_get_parent: + + +MPI_Comm_get_parent +=================== + +.. include_body + +:ref:`MPI_Comm_get_parent` - Returns the parent intercommunicator of +current spawned process. + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Comm_get_parent(MPI_Comm *parent) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_COMM_GET_PARENT(PARENT, IERROR) + INTEGER PARENT, IERROR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Comm_get_parent(parent, ierror) + TYPE(MPI_Comm), INTENT(OUT) :: parent + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +OUTPUT PARAMETERS +----------------- +* ``parent``: The parent communicator (handle). +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +If a process was started with :ref:`MPI_Comm_spawn` or :ref:`MPI_Comm_spawn_multiple`, +:ref:`MPI_Comm_get_parent` returns the "parent" intercommunicator of the +current process. This parent intercommunicator is created implicitly +inside of :ref:`MPI_Init` and is the same intercommunicator returned by the +spawn call made in the parents. + +If the process was not spawned, :ref:`MPI_Comm_get_parent` returns +MPI_COMM_NULL. + +After the parent communicator is freed or disconnected, +:ref:`MPI_Comm_get_parent` returns MPI_COMM_NULL. + + +NOTES +----- + +:ref:`MPI_Comm_get_parent` returns a handle to a single intercommunicator. +Calling :ref:`MPI_Comm_get_parent` a second time returns a handle to the same +intercommunicator. Freeing the handle with :ref:`MPI_Comm_disconnect` or +:ref:`MPI_Comm_free` will cause other references to the intercommunicator to +become invalid (dangling). Note that calling :ref:`MPI_Comm_free` on the parent +communicator is not useful. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. + + +.. seealso:: + :ref:`MPI_Comm_spawn` :ref:`MPI_Comm_spawn_multiple` diff --git a/docs/man-openmpi/man3/MPI_Comm_group.3.rst b/docs/man-openmpi/man3/MPI_Comm_group.3.rst new file mode 100644 index 00000000000..fcc630cbdf5 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Comm_group.3.rst @@ -0,0 +1,77 @@ +.. _mpi_comm_group: + + +MPI_Comm_group +============== + +.. include_body + +:ref:`MPI_Comm_group` - Returns the group associated with a communicator. + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Comm_group(MPI_Comm comm, MPI_Group *group) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_COMM_GROUP(COMM, GROUP, IERROR) + INTEGER COMM, GROUP, IERROR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Comm_group(comm, group, ierror) + TYPE(MPI_Comm), INTENT(IN) :: comm + TYPE(MPI_Group), INTENT(OUT) :: group + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETER +--------------- +* ``comm``: Communicator. + +OUTPUT PARAMETERS +----------------- +* ``group``: Group in communicator (handle). +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +If the communicator is an intercommunicator (enables communication +between two groups of processes), this function returns the local group. +To return the remote group, use the :ref:`MPI_Comm_remote_group` function. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. diff --git a/docs/man-openmpi/man3/MPI_Comm_idup.3.rst b/docs/man-openmpi/man3/MPI_Comm_idup.3.rst new file mode 100644 index 00000000000..a54eb775a63 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Comm_idup.3.rst @@ -0,0 +1,116 @@ +.. _mpi_comm_idup: + + +MPI_Comm_idup +============= + +.. include_body + +:ref:`MPI_Comm_idup` - Start the nonblocking duplication of an existing +communicator with all its cached information. + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Comm_idup(MPI_Comm comm, MPI_Comm *newcomm, MPI_Request *request) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_COMM_IDUP(COMM, NEWCOMM, REQUEST, IERROR) + INTEGER COMM, NEWCOMM, REQUEST, IERROR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Comm_idup(comm, newcomm, request, ierror) + TYPE(MPI_Comm), INTENT(IN) :: comm + TYPE(MPI_Comm), INTENT(OUT) :: newcomm + TYPE(MPI_Request), INTENT(OUT) :: request + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETER +--------------- +* ``comm``: Communicator (handle). + +OUTPUT PARAMETERS +----------------- +* ``newcomm``: Copy of comm (handle). +* ``request``: Communication request (handle). +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +:ref:`MPI_Comm_idup` starts the nonblocking duplication of an existing +communicator comm with associated key values. For each key value, the +respective copy callback function determines the attribute value +associated with this key in the new communicator; one particular action +that a copy callback may take is to delete the attribute from the new +communicator. Returns in newcomm a new communicator with the same group, +any copied cached information, but a new context (see Section 5.7.1 of +the MPI-1 Standard, "Functionality"). The communicator returned in +*newcomm* will not be available until the request is complete. + +The completion of a communicator duplication request can be determined +by calling any of :ref:`MPI_Wait`, :ref:`MPI_Waitany`, :ref:`MPI_Test`, or :ref:`MPI_Testany` with +the request returned by this function. + + +NOTES +----- + +This operation is used to provide a parallel library call with a +duplicate communication space that has the same properties as the +original communicator. This includes any attributes (see below) and +topologies (see Chapter 6, "Process Topologies," in the MPI-1 Standard). +This call is valid even if there are pending point-to-point +communications involving the communicator comm. A typical call might +involve an :ref:`MPI_Comm_idup` at the beginning of the parallel call, and an +:ref:`MPI_Comm_free` of that duplicated communicator at the end of the call. +Other models of communicator management are also possible. + +This call applies to both intra- and intercommunicators. + +Note that it is not defined by the MPI standard what happens if the +attribute copy callback invokes other MPI functions. In Open MPI, it is +not valid for attribute copy callbacks (or any of their children) to add +or delete attributes on the same object on which the attribute copy +callback is being invoked. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. + + +.. seealso:: + :ref:`MPI_Comm_dup` :ref:`MPI_Comm_dup_with_info` diff --git a/docs/man-openmpi/man3/MPI_Comm_idup_with_info.3.rst b/docs/man-openmpi/man3/MPI_Comm_idup_with_info.3.rst new file mode 100644 index 00000000000..087d50715af --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Comm_idup_with_info.3.rst @@ -0,0 +1,119 @@ +.. _mpi_comm_idup_with_info: + + +MPI_Comm_idup_with_info +======================= + +.. include_body + +:ref:`MPI_Comm_idup_with_info` - Start the nonblocking duplication of an +existing communicator with all its cached information. + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Comm_idup_with_info(MPI_Comm comm, MPI_Info info, MPI_Comm *newcomm, MPI_Request *request) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_COMM_IDUP_WITH_INFO(COMM, INFO, NEWCOMM, REQUEST, IERROR) + INTEGER COMM, INFO, NEWCOMM, REQUEST, IERROR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Comm_idup_with_info(comm, info, newcomm, request, ierror) + TYPE(MPI_Comm), INTENT(IN) :: comm + TYPE(MPI_Info), INTENT(IN) :: info + TYPE(MPI_Comm), INTENT(OUT) :: newcomm + TYPE(MPI_Request), INTENT(OUT) :: request + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETER +--------------- +* ``comm``: Communicator (handle). info Info object (handle). + +OUTPUT PARAMETERS +----------------- +* ``newcomm``: Copy of comm (handle). +* ``request``: Communication request (handle). +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +:ref:`MPI_Comm_idup_with_info` starts the nonblocking duplication of an +existing communicator comm with associated key values. For each key +value, the respective copy callback function determines the attribute +value associated with this key in the new communicator; one particular +action that a copy callback may take is to delete the attribute from the +new communicator. Returns in newcomm a new communicator with the same +group, any copied cached information, but a new context (see Section +5.7.1 of the MPI-1 Standard, "Functionality"). The communicator returned +in *newcomm* will not be available until the request is complete. The +hints provided by the supplied *info* argument are associated with the +output communicator. + +The completion of a communicator duplication request can be determined +by calling any of :ref:`MPI_Wait`, :ref:`MPI_Waitany`, :ref:`MPI_Test`, or :ref:`MPI_Testany` with +the request returned by this function. + + +NOTES +----- + +This operation is used to provide a parallel library call with a +duplicate communication space that has the same properties as the +original communicator. This includes any attributes (see below) and +topologies (see Chapter 6, "Process Topologies," in the MPI-1 Standard). +This call is valid even if there are pending point-to-point +communications involving the communicator comm. A typical call might +involve an :ref:`MPI_Comm_idup_with_info` at the beginning of the parallel +call, and an :ref:`MPI_Comm_free` of that duplicated communicator at the end of +the call. Other models of communicator management are also possible. + +This call applies to both intra- and intercommunicators. + +Note that it is not defined by the MPI standard what happens if the +attribute copy callback invokes other MPI functions. In Open MPI, it is +not valid for attribute copy callbacks (or any of their children) to add +or delete attributes on the same object on which the attribute copy +callback is being invoked. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. + + +.. seealso:: + :ref:`MPI_Comm_dup` :ref:`MPI_Comm_idup` :ref:`MPI_Comm_dup_with_info` diff --git a/docs/man-openmpi/man3/MPI_Comm_join.3.rst b/docs/man-openmpi/man3/MPI_Comm_join.3.rst new file mode 100644 index 00000000000..dc5743d8746 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Comm_join.3.rst @@ -0,0 +1,108 @@ +.. _mpi_comm_join: + + +MPI_Comm_join +============= + +.. include_body + +:ref:`MPI_Comm_join` - Establishes communication between MPI jobs + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Comm_join(int fd, MPI_Comm *intercomm) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_COMM_JOIN(FD, INTERCOMM, IERROR) + INTEGER FD, INTERCOMM, IERROR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Comm_join(fd, intercomm, ierror) + INTEGER, INTENT(IN) :: fd + TYPE(MPI_Comm), INTENT(OUT) :: intercomm + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETER +--------------- +* ``fd``: socket file descriptor (socket). + +OUTPUT PARAMETERS +----------------- +* ``intercomm``: Intercommunicator between processes (handle). +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +:ref:`MPI_Comm_join` creates an intercommunicator from the union of two MPI +processes that are connected by a socket. *fd* is a file descriptor +representing a socket of type SOCK_STREAM (a two-way reliable +byte-stream connection). Nonblocking I/O and asynchronous notification +via SIGIO must not be enabled for the socket. The socket must be in a +connected state, and must be quiescent when :ref:`MPI_Comm_join` is called. + +:ref:`MPI_Comm_join` must be called by the process at each end of the socket. +It does not return until both processes have called :ref:`MPI_Comm_join`. + + +NOTES +----- + +There are no MPI library calls for opening and manipulating a socket. +The socket *fd* can be opened using standard socket API calls. MPI uses +the socket to bootstrap creation of the intercommunicator, and for +nothing else. Upon return, the file descriptor will be open and +quiescent. + +In a multithreaded process, the application must ensure that other +threads do not access the socket while one is in the midst of calling +:ref:`MPI_Comm_join`. + +The returned communicator will contain the two processes connected by +the socket, and may be used to establish MPI communication with +additional processes, through the usual MPI communicator-creation +mechanisms. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. + +See the MPI man page for a full list of MPI error codes. + + +.. seealso:: + socket(3SOCKET) :ref:`MPI_Comm_create` :ref:`MPI_Comm_group` diff --git a/docs/man-openmpi/man3/MPI_Comm_rank.3.rst b/docs/man-openmpi/man3/MPI_Comm_rank.3.rst new file mode 100644 index 00000000000..b7ad6e11c01 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Comm_rank.3.rst @@ -0,0 +1,89 @@ +.. _mpi_comm_rank: + + +MPI_Comm_rank +============= + +.. include_body + +:ref:`MPI_Comm_rank` - Determines the rank of the calling process in the +communicator. + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Comm_rank(MPI_Comm comm, int *rank) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_COMM_RANK(COMM, RANK, IERROR) + INTEGER COMM, RANK, IERROR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Comm_rank(comm, rank, ierror) + TYPE(MPI_Comm), INTENT(IN) :: comm + INTEGER, INTENT(OUT) :: rank + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETERS +---------------- +* ``comm``: Communicator (handle). + +OUTPUT PARAMETERS +----------------- +* ``rank``: Rank of the calling process in group of comm (integer). +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +This function gives the rank of the process in the particular +communicator's group. It is equivalent to accessing the communicator's +group with :ref:`MPI_Comm_group`, computing the rank using :ref:`MPI_Group_rank`, and +then freeing the temporary group via :ref:`MPI_Group_free`. + +Many programs will be written with the master-slave model, where one +process (such as the rank-zero process) will play a supervisory role, +and the other processes will serve as compute nodes. In this framework, +:ref:`MPI_Comm_size` and :ref:`MPI_Comm_rank` are useful for determining the roles of +the various processes of a communicator. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. + + +.. seealso:: + :ref:`MPI_Comm_group` :ref:`MPI_Comm_size` :ref:`MPI_Comm_compare` diff --git a/docs/man-openmpi/man3/MPI_Comm_remote_group.3.rst b/docs/man-openmpi/man3/MPI_Comm_remote_group.3.rst new file mode 100644 index 00000000000..3d068d7af81 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Comm_remote_group.3.rst @@ -0,0 +1,85 @@ +.. _mpi_comm_remote_group: + + +MPI_Comm_remote_group +===================== + +.. include_body + +:ref:`MPI_Comm_remote_group` - Accesses the remote group associated with an +intercommunicator. + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Comm_remote_group(MPI_Comm comm, MPI_Group *group) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_COMM_REMOTE_GROUP(COMM, GROUP, IERROR) + INTEGER COMM, GROUP, IERROR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Comm_remote_group(comm, group, ierror) + TYPE(MPI_Comm), INTENT(IN) :: comm + TYPE(MPI_Group), INTENT(OUT) :: group + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETER +--------------- +* ``comm``: Communicator. + +OUTPUT PARAMETERS +----------------- +* ``group``: Remote group of communicator. +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +:ref:`MPI_Comm_remote_group` accesses the remote group associated with an +intercommunicator. + +The intercommunicator accessors (:ref:`MPI_Comm_test_inter`, +:ref:`MPI_Comm_remote_size`, MPI_Comm_remote_group) are all local operations. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. + + +.. seealso:: + :ref:`MPI_Comm_test_inter` :ref:`MPI_Comm_remote_size` :ref:`MPI_Intercomm_create` + :ref:`MPI_Intercomm_merge` diff --git a/docs/man-openmpi/man3/MPI_Comm_remote_size.3.rst b/docs/man-openmpi/man3/MPI_Comm_remote_size.3.rst new file mode 100644 index 00000000000..b52f3720c51 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Comm_remote_size.3.rst @@ -0,0 +1,85 @@ +.. _mpi_comm_remote_size: + + +MPI_Comm_remote_size +==================== + +.. include_body + +:ref:`MPI_Comm_remote_size` - Determines the size of the remote group +associated with an intercommunicator. + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Comm_remote_size(MPI_Comm comm, int *size) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_COMM_REMOTE_SIZE(COMM, SIZE, IERROR) + INTEGER COMM, SIZE, IERROR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Comm_remote_size(comm, size, ierror) + TYPE(MPI_Comm), INTENT(IN) :: comm + INTEGER, INTENT(OUT) :: size + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETER +--------------- +* ``comm``: Communicator (handle). + +OUTPUT PARAMETERS +----------------- +* ``size``: Number of processes in the remote group of comm (integer). +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +:ref:`MPI_Comm_remote_size` determines the size of the remote group associated +with an intercommunicator. + +The intercommunicator accessors (:ref:`MPI_Comm_test_inter`, +:ref:`MPI_Comm_remote_size`, MPI_Comm_remote_group) are all local operations. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. + + +.. seealso:: + :ref:`MPI_Comm_test_inter` :ref:`MPI_Comm_remote_group` :ref:`MPI_Intercomm_create` + :ref:`MPI_Intercomm_merge` diff --git a/docs/man-openmpi/man3/MPI_Comm_set_attr.3.rst b/docs/man-openmpi/man3/MPI_Comm_set_attr.3.rst new file mode 100644 index 00000000000..0d2645567e2 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Comm_set_attr.3.rst @@ -0,0 +1,122 @@ +.. _mpi_comm_set_attr: + + +MPI_Comm_set_attr +================= + +.. include_body + +:ref:`MPI_Comm_set_attr` - Stores attribute value associated with a key. + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Comm_set_attr(MPI_Comm comm, int comm_keyval, void *attribute_val) + + +Fortran Syntax (see FORTRAN 77 NOTES) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_COMM_SET_ATTR(COMM, COMM_KEYVAL, ATTRIBUTE_VAL, IERROR) + INTEGER COMM, COMM_KEYVAL, IERROR + INTEGER(KIND=MPI_ADDRESS_KIND) ATTRIBUTE_VAL + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Comm_set_attr(comm, comm_keyval, attribute_val, ierror) + TYPE(MPI_Comm), INTENT(IN) :: comm + INTEGER, INTENT(IN) :: comm_keyval + INTEGER(KIND=MPI_ADDRESS_KIND), INTENT(IN) :: attribute_val + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT/OUTPUT PARAMETER +---------------------- +* ``comm``: Communicator from which attribute will be attached (handle). + +INPUT PARAMETERS +---------------- +* ``comm_keyval``: Key value (integer). +* ``attribute_val``: Attribute value. + +OUTPUT PARAMETER +---------------- +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +:ref:`MPI_Comm_set_attr` stores the stipulated attribute value *attribute_val* +for subsequent retrieval by :ref:`MPI_Comm_get_attr`. If the value is already +present, then the outcome is as if :ref:`MPI_Comm_delete_attr` was first called +to delete the previous value (and the callback function delete_fn was +executed), and a new value was next stored. The call is erroneous if +there is no key with value *comm_keyval*; in particular +MPI_KEYVAL_INVALID is an erroneous key value. The call will fail if the +delete_fn function returned an error code other than MPI_SUCCESS. + +This function replaces :ref:`MPI_Attr_put`, the use of which is deprecated. The +C binding is identical. The Fortran binding differs in that +*attribute_val* is an address-sized integer. + + +FORTRAN 77 NOTES +---------------- + +The MPI standard prescribes portable Fortran syntax for the +*ATTRIBUTE_VAL* argument only for Fortran 90. Sun FORTRAN 77 users may +use the non-portable syntax + +:: + + INTEGER*MPI_ADDRESS_KIND ATTRIBUTE_VAL + +where MPI_ADDRESS_KIND is a constant defined in mpif.h and gives the +length of the declared integer in bytes. + + +NOTES +----- + +Values of the permanent attributes MPI_TAG_UB, MPI_HOST, MPI_IO, and +MPI_WTIME_IS_GLOBAL may not be changed. + +The type of the attribute value depends on whether C or Fortran is being +used. In C, an attribute value is a pointer (void \*); in Fortran, it is +a single, address-size integer system for which a pointer does not fit +in an integer. + +If an attribute is already present, the delete function (specified when +the corresponding keyval was created) will be called. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. diff --git a/docs/man-openmpi/man3/MPI_Comm_set_errhandler.3.rst b/docs/man-openmpi/man3/MPI_Comm_set_errhandler.3.rst new file mode 100644 index 00000000000..bdcd7cc635e --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Comm_set_errhandler.3.rst @@ -0,0 +1,80 @@ +.. _mpi_comm_set_errhandler: + + +MPI_Comm_set_errhandler +======================= + +.. include_body + +:ref:`MPI_Comm_set_errhandler` - Attaches a new error handler to a +communicator. + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Comm_set_errhandler(MPI_Comm comm, + MPI_Errhandler errhandler) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_COMM_SET_ERRHANDLER(COMM, ERRHANDLER, IERROR) + INTEGER COMM, ERRHANDLER, IERROR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Comm_set_errhandler(comm, errhandler, ierror) + TYPE(MPI_Comm), INTENT(IN) :: comm + TYPE(MPI_Errhandler), INTENT(IN) :: errhandler + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT/OUTPUT PARAMETER +---------------------- +* ``comm``: Communicator (handle). + +OUTPUT PARAMETERS +----------------- +* ``errhandler``: New error handler for communicator (handle). +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +:ref:`MPI_Comm_set_errhandler` attaches a new error handler to a communicator. +The error handler must be either a predefined error handler or an error +handler created by a call to :ref:`MPI_Comm_create_errhandler`. This call is +identical to :ref:`MPI_Errhandler_set`, the use of which is deprecated. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. diff --git a/docs/man-openmpi/man3/MPI_Comm_set_info.3.rst b/docs/man-openmpi/man3/MPI_Comm_set_info.3.rst new file mode 100644 index 00000000000..44703f55acf --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Comm_set_info.3.rst @@ -0,0 +1,107 @@ +.. _mpi_comm_set_info: + + +MPI_Comm_set_info +================= + +.. include_body + +:ref:`MPI_Comm_set_info` - Set communicator info hints + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Comm_set_info(MPI_Comm comm, MPI_Info info) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_COMM_SET_INFO(COMM, INFO, IERROR) + INTEGER COMM, INFO, IERROR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Comm_set_info(comm, info, ierror) + TYPE(MPI_Comm), INTENT(IN) :: comm + TYPE(MPI_Info), INTENT(IN) :: info + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETERS +---------------- +* ``comm``: Communicator on which to set info hints +* ``info``: Info object containing hints to be set on *comm* + +OUTPUT PARAMETERS +----------------- +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +:ref:`MPI_COMM_SET_INFO` sets new values for the hints of the communicator +associated with *comm*. :ref:`MPI_COMM_SET_INFO` is a collective routine. The +info object may be different on each process, but any info entries that +an implementation requires to be the same on all processes must appear +with the same value in each process's *info* object. + +The following info key assertions may be accepted by Open MPI: + +*mpi_assert_no_any_tag* (boolean): If set to true, then the +implementation may assume that the process will not use the MPI_ANY_TAG +wildcard on the given communicator. + +*mpi_assert_no_any_source* (boolean): If set to true, then the +implementation may assume that the process will not use the +MPI_ANY_SOURCE wildcard on the given communicator. + +*mpi_assert_exact_length* (boolean): If set to true, then the +implementation may assume that the lengths of messages received by the +process are equal to the lengths of the corresponding receive buffers, +for point-to-point communication operations on the given communicator. + +*mpi_assert_allow_overtaking* (boolean): If set to true, then the +implementation may assume that point-to-point communications on the +given communicator do not rely on the non-overtaking rule specified in +MPI-3.1 Section 3.5. In other words, the application asserts that send +operations are not required to be matched at the receiver in the order +in which the send operations were performed by the sender, and receive +operations are not required to be matched in the order in which they +were performed by the receiver. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. + + +.. seealso:: + :ref:`MPI_Comm_get_info` :ref:`MPI_Info_create` :ref:`MPI_Info_set` :ref:`MPI_Info_free` diff --git a/docs/man-openmpi/man3/MPI_Comm_set_name.3.rst b/docs/man-openmpi/man3/MPI_Comm_set_name.3.rst new file mode 100644 index 00000000000..9d85703aec2 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Comm_set_name.3.rst @@ -0,0 +1,112 @@ +.. _mpi_comm_set_name: + + +MPI_Comm_set_name +================= + +.. include_body + +:ref:`MPI_Comm_set_name` - Associates a name with a communicator. + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Comm_set_name(MPI_Comm comm, const char *comm_name) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_COMM_SET_NAME(COMM, COMM_NAME, IERROR) + INTEGER COMM, IERROR + CHARACTER*(*) COMM_NAME + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Comm_set_name(comm, comm_name, ierror) + TYPE(MPI_Comm), INTENT(IN) :: comm + CHARACTER(LEN=*), INTENT(IN) :: comm_name + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT/OUTPUT PARAMETER +---------------------- +* ``comm``: Communicator whose identifier is to be set (handle). + +INPUT PARAMETER +--------------- +* ``comm_name``: Character string to be used as the identifier for the communicator (string). + +OUTPUT PARAMETER +---------------- +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +:ref:`MPI_Comm_set_name` allows a user to associate a name string with a +communicator. The character string that is passed to :ref:`MPI_Comm_set_name` +is saved inside the MPI library (so it can be freed by the caller +immediately after the call, or allocated on the stack). Leading spaces +in *name* are significant, but trailing ones are not. + +:ref:`MPI_Comm_set_name` is a local (noncollective) operation, which affects +only the name of the communicator as seen in the process that made the +:ref:`MPI_Comm_set_name` call. There is no requirement that the same (or any) +name be assigned to a communicator in every process where it exists. + +The length of the name that can be stored is limited to the value of +MPI_MAX_OBJECT_NAME in Fortran and MPI_MAX_OBJECT_NAME-1 in C (to allow +for the null terminator). Attempts to set names longer than this will +result in truncation of the name. MPI_MAX_OBJECT_NAME must have a value +of at least 64. + + +NOTES +----- + +Since :ref:`MPI_Comm_set_name` is provided to help debug code, it is sensible +to give the same name to a communicator in all of the processes where it +exists, to avoid confusion. + +Regarding name length, under circumstances of store exhaustion, an +attempt to set a name of any length could fail; therefore, the value of +MPI_MAX_OBJECT_NAME should be viewed only as a strict upper bound on the +name length, not a guarantee that setting names of less than this length +will always succeed. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. + + +.. seealso:: + :ref:`MPI_Comm_get_name` diff --git a/docs/man-openmpi/man3/MPI_Comm_size.3.rst b/docs/man-openmpi/man3/MPI_Comm_size.3.rst new file mode 100644 index 00000000000..b106e04ae21 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Comm_size.3.rst @@ -0,0 +1,99 @@ +.. _mpi_comm_size: + + +MPI_Comm_size +============= + +.. include_body + +:ref:`MPI_Comm_size` - Returns the size of the group associated with a +communicator. + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Comm_size(MPI_Comm comm, int *size) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_COMM_SIZE(COMM, SIZE, IERROR) + INTEGER COMM, SIZE, IERROR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Comm_size(comm, size, ierror) + TYPE(MPI_Comm), INTENT(IN) :: comm + INTEGER, INTENT(OUT) :: size + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETER +--------------- +* ``comm``: Communicator (handle). + +OUTPUT PARAMETERS +----------------- +* ``size``: Number of processes in the group of comm (integer). +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +This function indicates the number of processes involved in a +communicator. For MPI_COMM_WORLD, it indicates the total number of +processes available. This function is equivalent to accessing the +communicator's group with :ref:`MPI_Comm_group`, computing the size using +:ref:`MPI_Group_size`, and then freeing the temporary group via :ref:`MPI_Group_free`. +If the communicator is an inter-communicator (enables communication +between two groups), this function returns the size of the local group. +To return the size of the remote group, use the :ref:`MPI_Comm_remote_size` +function. + +This call is often used with :ref:`MPI_Comm_rank` to determine the amount of +concurrency available for a specific library or program. :ref:`MPI_Comm_rank` +indicates the rank of the process that calls it in the range from 0 . . +. size-1, where size is the return value of :ref:`MPI_Comm_size`. + + +NOTE +---- + +MPI_COMM_NULL is not considered a valid argument to this function. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. + + +.. seealso:: + :ref:`MPI_Comm_group` :ref:`MPI_Comm_rank` :ref:`MPI_Comm_compare` diff --git a/docs/man-openmpi/man3/MPI_Comm_spawn.3.rst b/docs/man-openmpi/man3/MPI_Comm_spawn.3.rst new file mode 100644 index 00000000000..977193fe4de --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Comm_spawn.3.rst @@ -0,0 +1,287 @@ +.. _mpi_comm_spawn: + + +MPI_Comm_spawn +============== + +.. include_body + +:ref:`MPI_Comm_spawn` - Spawns a number of identical binaries. + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Comm_spawn(const char *command, char *argv[], int maxprocs, + MPI_Info info, int root, MPI_Comm comm, + MPI_Comm *intercomm, int array_of_errcodes[]) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_COMM_SPAWN(COMMAND, ARGV, MAXPROCS, INFO, ROOT, COMM, + INTERCOMM, ARRAY_OF_ERRCODES, IERROR) + + CHARACTER*(*) COMMAND, ARGV(*) + INTEGER INFO, MAXPROCS, ROOT, COMM, INTERCOMM, + ARRAY_OF_ERRCODES(*), IERROR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Comm_spawn(command, argv, maxprocs, info, root, comm, intercomm, + array_of_errcodes, ierror) + CHARACTER(LEN=*), INTENT(IN) :: command, argv(*) + INTEGER, INTENT(IN) :: maxprocs, root + TYPE(MPI_Info), INTENT(IN) :: info + TYPE(MPI_Comm), INTENT(IN) :: comm + TYPE(MPI_Comm), INTENT(OUT) :: intercomm + INTEGER :: array_of_errcodes(*) + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETERS +---------------- +* ``command``: Name of program to be spawned (string, significant only at *root*). +* ``argv``: Arguments to *command* (array of strings, significant only at *root*). +* ``maxprocs``: Maximum number of processes to start (integer, significant only at *root*). +* ``info``: A set of key-value pairs telling the runtime system where and how to start the processes (handle, significant only at *root*). +* ``root``: Rank of process in which previous arguments are examined (integer). +* ``comm``: Intracommunicator containing group of spawning processes (handle). + +OUTPUT PARAMETER +---------------- +* ``intercomm``: Intercommunicator between original group and the newly spawned group (handle). +* ``array_of_errcodes``: One code per process (array of integers). +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +:ref:`MPI_Comm_spawn` tries to start *maxprocs* identical copies of the MPI +program specified by *command*, establishing communication with them and +returning an intercommunicator. The spawned processes are referred to as +children. The children have their own MPI_COMM_WORLD, which is separate +from that of the parents. :ref:`MPI_Comm_spawn` is collective over *comm*, and +also may not return until :ref:`MPI_Init` has been called in the children. +Similarly, :ref:`MPI_Init` in the children may not return until all parents +have called :ref:`MPI_Comm_spawn`. In this sense, :ref:`MPI_Comm_spawn` in the parents +and :ref:`MPI_Init` in the children form a collective operation over the union +of parent and child processes. The intercommunicator returned by +:ref:`MPI_Comm_spawn` contains the parent processes in the local group and the +child processes in the remote group. The ordering of processes in the +local and remote groups is the same as the as the ordering of the group +of the *comm* in the parents and of MPI_COMM_WORLD of the children, +respectively. This intercommunicator can be obtained in the children +through the function :ref:`MPI_Comm_get_parent`. + +The MPI standard allows an implementation to use the MPI_UNIVERSE_SIZE +attribute of MPI_COMM_WORLD to specify the number of processes that will +be active in a program. Although this implementation of the MPI standard +defines MPI_UNIVERSE_SIZE, it does not allow the user to set its value. +If you try to set the value of MPI_UNIVERSE_SIZE, you will get an error +message. + +The *command* Argument + +The *command* argument is a string containing the name of a program to +be spawned. The string is null-terminated in C. In Fortran, leading and +trailing spaces are stripped. MPI looks for the file first in the +working directory of the spawning process. + +The *argv* Argument + +*argv* is an array of strings containing arguments that are passed to +the program. The first element of *argv* is the first argument passed to +*command*, not, as is conventional in some contexts, the command itself. +The argument list is terminated by NULL in C and an empty string in +Fortran (note that it is the MPI application's responsibility to ensure +that the last entry of the *argv* array is an empty string; the compiler +will not automatically insert it). In Fortran, leading and trailing +spaces are always stripped, so that a string consisting of all spaces is +considered an empty string. The constant MPI_ARGV_NULL may be used in C +and Fortran to indicate an empty argument list. In C, this constant is +the same as NULL. + +In C, the :ref:`MPI_Comm_spawn` argument *argv* differs from the *argv* +argument of *main* in two respects. First, it is shifted by one element. +Specifically, *argv*\ [0] of *main* contains the name of the program +(given by *command*). *argv*\ [1] of *main* corresponds to *argv*\ [0] +in :ref:`MPI_Comm_spawn`, *argv*\ [2] of *main* to *argv*\ [1] of +:ref:`MPI_Comm_spawn`, and so on. Second, *argv* of :ref:`MPI_Comm_spawn` must be +null-terminated, so that its length can be determined. Passing an *argv* +of MPI_ARGV_NULL to :ref:`MPI_Comm_spawn` results in *main* receiving *argc* of +1 and an *argv* whose element 0 is the name of the program. + +The *maxprocs* Argument + +Open MPI tries to spawn *maxprocs* processes. If it is unable to spawn +*maxprocs* processes, it raises an error of class MPI_ERR_SPAWN. If MPI +is able to spawn the specified number of processes, :ref:`MPI_Comm_spawn` +returns successfully and the number of spawned processes, *m*, is given +by the size of the remote group of *intercomm*. + +A spawn call with the default behavior is called hard. A spawn call for +which fewer than *maxprocs* processes may be returned is called soft. + +The *info* Argument + +The *info* argument is an opaque handle of type MPI_Info in C and +INTEGER in Fortran. It is a container for a number of user-specified +(*key,value*) pairs. *key* and *value* are strings (null-terminated +char\* in C, character*(*) in Fortran). Routines to create and +manipulate the *info* argument are described in Section 4.10 of the +MPI-2 standard. + +For the SPAWN calls, *info* provides additional, +implementation-dependent instructions to MPI and the runtime system on +how to start processes. An application may pass MPI_INFO_NULL in C or +Fortran. Portable programs not requiring detailed control over process +locations should use MPI_INFO_NULL. + +The following keys for *info* are recognized in Open MPI. (The reserved +values mentioned in Section 5.3.4 of the MPI-2 standard are not +implemented.) + +:: + + Key Type Description + --- ---- ----------- + + host char * Host on which the process should be + spawned. See the orte_host man + page for an explanation of how this + will be used. + hostfile char * Hostfile containing the hosts on which + the processes are to be spawned. See + the orte_hostfile man page for + an explanation of how this will be + used. + add-host char * Add the specified host to the list of + hosts known to this job and use it for + the associated process. This will be + used similarly to the -host option. + add-hostfile char * Hostfile containing hosts to be added + to the list of hosts known to this job + and use it for the associated + process. This will be used similarly + to the -hostfile option. + wdir char * Directory where the executable is + located. If files are to be + pre-positioned, then this location is + the desired working directory at time + of execution - if not specified, then + it will automatically be set to + ompi_preload_files_dest_dir. + ompi_prefix char * Same as the --prefix command line + argument to mpirun. + ompi_preload_binary bool If set to true, pre-position the + specified executable onto the remote + host. A destination directory must + also be provided. + ompi_preload_files char * A comma-separated list of files that + are to be pre-positioned in addition + to the executable. Note that this + option does not depend upon + ompi_preload_binary - files can + be moved to the target even if an + executable is not moved. + ompi_stdin_target char * Comma-delimited list of ranks to + receive stdin when forwarded. + ompi_non_mpi bool If set to true, launching a non-MPI + application; the returned communicator + will be MPI_COMM_NULL. Failure to set + this flag when launching a non-MPI + application will cause both the child + and parent jobs to "hang". + ompi_param char * Pass an OMPI MCA parameter to the + child job. If that parameter already + exists in the environment, the value + will be overwritten by the provided + value. + mapper char * Mapper to be used for this job + map_by char * Mapping directive indicating how + processes are to be mapped (slot, + node, socket, etc.). + rank_by char * Ranking directive indicating how + processes are to be ranked (slot, + node, socket, etc.). + bind_to char * Binding directive indicating how + processes are to be bound (core, slot, + node, socket, etc.). + path char * List of directories to search for + the executable + npernode char * Number of processes to spawn on + each node of the allocation + pernode bool Equivalent to npernode of 1 + ppr char * Spawn specified number of processes + on each of the identified object type + env char * Newline-delimited list of envars to + be passed to the spawned procs + +*bool* info keys are actually strings but are evaluated as follows: if +the string value is a number, it is converted to an integer and cast to +a boolean (meaning that zero integers are false and non-zero values are +true). If the string value is (case-insensitive) "yes" or "true", the +boolean is true. If the string value is (case-insensitive) "no" or +"false", the boolean is false. All other string values are unrecognized, +and therefore false. + +The *root* Argument + +All arguments before the *root* argument are examined only on the +process whose rank in *comm* is equal to *root*. The value of these +arguments on other processes is ignored. + +The *array_of_errcodes* Argument + +The *array_of_errcodes* is an array of length *maxprocs* in which MPI +reports the status of the processes that MPI was requested to start. If +all *maxprocs* processes were spawned, *array_of_errcodes* is filled in +with the value MPI_SUCCESS. If anyof the processes are *not* spawned, +*array_of_errcodes* is filled in with the value MPI_ERR_SPAWN. In C or +Fortran, an application may pass MPI_ERRCODES_IGNORE if it is not +interested in the error codes. + + +NOTES +----- + +Completion of :ref:`MPI_Comm_spawn` in the parent does not necessarily mean +that :ref:`MPI_Init` has been called in the children (although the returned +intercommunicator can be used immediately). + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. + + +.. seealso:: + :ref:`MPI_Comm_spawn_multiple` :ref:`MPI_Comm_get_parent` mpirun(1) diff --git a/docs/man-openmpi/man3/MPI_Comm_spawn_multiple.3.rst b/docs/man-openmpi/man3/MPI_Comm_spawn_multiple.3.rst new file mode 100644 index 00000000000..30d0ba350c9 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Comm_spawn_multiple.3.rst @@ -0,0 +1,285 @@ +.. _mpi_comm_spawn_multiple: + + +MPI_Comm_spawn_multiple +======================= + +.. include_body + +:ref:`MPI_Comm_spawn_multiple` - Spawns multiple binaries, or the same +binary with multiple sets of arguments. + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Comm_spawn_multiple(int count, char *array_of_commands[], + char **array_of_argv[], const int array_of_maxprocs[], const MPI_Info + array_of_info[], int root, MPI_Comm comm, MPI_Comm *intercomm, + int array_of_errcodes[]) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_COMM_SPAWN_MULTIPLE(COUNT, ARRAY_OF_COMMANDS, ARRAY_OF_ARGV, + ARRAY_OF_MAXPROCS, ARRAY_OF_INFO, ROOT, COMM, INTERCOMM, + ARRAY_OF_ERRCODES, IERROR) + INTEGER COUNT, ARRAY_OF_INFO(*), ARRAY_OF_MAXPROCS(*), ROOT, + COMM, INTERCOMM, ARRAY_OF_ERRCODES(*), IERROR + CHARACTER*(*) ARRAY_OF_COMMANDS(*), ARRAY_OF_ARGV(COUNT, *) + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Comm_spawn_multiple(count, array_of_commands, array_of_argv, + array_of_maxprocs, array_of_info, root, comm, intercomm, + array_of_errcodes, ierror) + INTEGER, INTENT(IN) :: count, array_of_maxprocs(*), root + CHARACTER(LEN=*), INTENT(IN) :: array_of_commands(*) + CHARACTER(LEN=*), INTENT(IN) :: array_of_argv(count, *) + TYPE(MPI_Info), INTENT(IN) :: array_of_info(*) + TYPE(MPI_Comm), INTENT(IN) :: comm + TYPE(MPI_Comm), INTENT(OUT) :: intercomm + INTEGER :: array_of_errcodes(*) + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETERS +---------------- +* ``count``: Number of commands (positive integer, significant to MPI only at *root* -- see NOTES). +* ``array_of_commands``: Programs to be executed (array of strings, significant only at *root*). +* ``array_of_argv``: Arguments for *commands* (array of array of strings, significant only at *root*). +* ``array_of_maxprocs``: Maximum number of processes to start for each command (array of integers, significant only at *root*). +* ``array_of_info``: Info objects telling the runtime system where and how to start processes (array of handles, significant only at *root*). +* ``root``: Rank of process in which previous arguments are examined (integer). +* ``comm``: Intracommunicator containing group of spawning processes (handle). + +OUTPUT PARAMETERS +----------------- +* ``intercomm``: Intercommunicator between original group and the newly spawned group (handle). +* ``array_of_errcodes``: One code per process (array of integers). +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +:ref:`MPI_Comm_spawn_multiple` is identical to :ref:`MPI_Comm_spawn` except that it +can specify multiple executables. The first argument, *count*, indicates +the number of executables. The next three arguments are arrays of the +corresponding arguments in :ref:`MPI_Comm_spawn`. The next argument, +*array_of_info*, is an array of *info* arguments, one for each +executable. See the INFO ARGUMENTS section for more information. + +For the Fortran version of *array_of_argv*, the element +*array_of_argv*\ (i,j) is the jth argument to command number i. + +In any language, an application may use the constant MPI_ARGVS_NULL +(which is likely to be (char \***)0 in C) to specify that no arguments +should be passed to any commands. The effect of setting individual +elements of *array_of_argv* to MPI_ARGV_NULL is not defined. To specify +arguments for some commands but not others, the commands without +arguments should have a corresponding *argv* whose first element is null +((char \*)0 in C and empty string in Fortran). + +All of the spawned processes have the same MPI_COMM_WORLD. Their ranks +in MPI_COMM_WORLD correspond directly to the order in which the commands +are specified in :ref:`MPI_Comm_spawn_multiple`. Assume that m1 processes are +generated by the first command, m2 by the second, etc. The processes +corresponding to the first command have ranks 0, 1,..., m1-1. The +processes in the second command have ranks m1, m1+1, ..., m1+m2-1. The +processes in the third have ranks m1+m2, m1+m2+1, ..., m1+m2+m3-1, etc. + +The *array_of_errcodes* argument is 1-dimensional array of size + +:: + + _ count + \ n , + /_ i=1 i + +where i is the ith element of *array_of_maxprocs*. Command number *i* +corresponds to the i contiguous slots in this array from element + +:: + + _ _ + _ i-1 | _ i | + \ n , to | \ n | -1 + /_ j=1 i | /_ j=1 j | + |_ _| + +Error codes are treated as for :ref:`MPI_Comm_spawn`. + + +INFO ARGUMENTS +-------------- + +The following keys for *info* are recognized in "#PACKAGE_NAME#". (The +reserved values mentioned in Section 5.3.4 of the MPI-2 standard are not +implemented.) + +:: + + Key Type Description + --- ---- ----------- + + host char * Comma-separated list of hosts on which + the processes should be spawned. See + the orte_host man page for an + explanation of how this will be used. + hostfile char * Hostfile containing the hosts on which + the processes are to be spawned. See + the orte_hostfile man page for + an explanation of how this will be + used. + add-host char * Add the specified hosts to the list of + hosts known to this job and use it for + the associated processes. This will be + used similarly to the -host option. + add-hostfile char * Hostfile containing hosts to be added + to the list of hosts known to this job + and use it for the associated + process. This will be used similarly + to the -hostfile option. + wdir char * Directory where the executable is + located. If files are to be + pre-positioned, then this location is + the desired working directory at time + of execution - if not specified, then + it will automatically be set to + ompi_preload_files_dest_dir. + ompi_prefix char * Same as the --prefix command line + argument to mpirun. + ompi_preload_binary bool If set to true, pre-position the + specified executable onto the remote + host. A destination directory must + also be provided. + ompi_preload_files char * A comma-separated list of files that + are to be pre-positioned in addition + to the executable. Note that this + option does not depend upon + ompi_preload_binary - files can + be moved to the target even if an + executable is not moved. + ompi_stdin_target char * Comma-delimited list of ranks to + receive stdin when forwarded. + ompi_non_mpi bool If set to true, launching a non-MPI + application; the returned communicator + will be MPI_COMM_NULL. Failure to set + this flag when launching a non-MPI + application will cause both the child + and parent jobs to "hang". + ompi_param char * Pass an OMPI MCA parameter to the + child job. If that parameter already + exists in the environment, the value + will be overwritten by the provided + value. + mapper char * Mapper to be used for this job + map_by char * Mapping directive indicating how + processes are to be mapped (slot, + node, socket, etc.). + rank_by char * Ranking directive indicating how + processes are to be ranked (slot, + node, socket, etc.). + bind_to char * Binding directive indicating how + processes are to be bound (core, slot, + node, socket, etc.). + path char * List of directories to search for + the executable + npernode char * Number of processes to spawn on + each node of the allocation + pernode bool Equivalent to npernode of 1 + ppr char * Spawn specified number of processes + on each of the identified object type + env char * Newline-delimited list of envars to + be passed to the spawned procs + +*bool* info keys are actually strings but are evaluated as follows: if +the string value is a number, it is converted to an integer and cast to +a boolean (meaning that zero integers are false and non-zero values are +true). If the string value is (case-insensitive) "yes" or "true", the +boolean is true. If the string value is (case-insensitive) "no" or +"false", the boolean is false. All other string values are unrecognized, +and therefore false. + +Note that if any of the info handles have *ompi_non_mpi* set to true, +then all info handles must have it set to true. If some are set to true, +but others are set to false (or are unset), MPI_ERR_INFO will be +returned. + +Note that in "#PACKAGE_NAME#", the first array location in +*array_of_info* is applied to all the commands in *array_of_commands*. + + +NOTES +----- + +The argument *count* is interpreted by MPI only at the root, as is +*array_of_argv*. Since the leading dimension of *array_of_argv* is +*count*, a nonpositive value of *count* at a nonroot node could +theoretically cause a runtime bounds check error, even though +*array_of_argv* should be ignored by the subroutine. If this happens, +you should explicitly supply a reasonable value of *count* on the +nonroot nodes. + +Similar to :ref:`MPI_Comm_spawn`, it is the application's responsibility to +terminate each individual set of argv in the *array_of_argv* argument. +In C, each argv array is terminated by a NULL pointer. In Fortran, each +argv array is terminated by an empty string (note that compilers will +not automatically insert this blank string; the application must ensure +to have enough space for an empty string entry as the last element of +the array). + +Other restrictions apply to the *array_of_argv* parameter; see +:ref:`MPI_Comm_spawn`'s description of the *argv* parameter for more +details. + +MPI-3.1 implies (but does not directly state) that the argument +*array_of_commands* must be an array of strings of length *count*. +Unlike the *array_of_argv* parameter, *array_of_commands* does not need +to be terminated with a NULL pointer in C or a blank string in Fortran. +Older versions of Open MPI required that *array_of_commands* be +terminated with a blank string in Fortran; that is no longer required in +this version of Open MPI. + +Calling :ref:`MPI_Comm_spawn` many times would create many sets of children +with different MPI_COMM_WORLDs, whereas :ref:`MPI_Comm_spawn_multiple` creates +children with a single MPI_COMM_WORLD, so the two methods are not +completely equivalent. Also if you need to spawn multiple executables, +you may get better performance by using :ref:`MPI_Comm_spawn_multiple` instead +of calling :ref:`MPI_Comm_spawn` several times. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. + + +.. seealso:: + :ref:`MPI_Comm_spawn` :ref:`MPI_Comm_get_parent` mpirun(1) diff --git a/docs/man-openmpi/man3/MPI_Comm_split.3.rst b/docs/man-openmpi/man3/MPI_Comm_split.3.rst new file mode 100644 index 00000000000..a9396130595 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Comm_split.3.rst @@ -0,0 +1,137 @@ +.. _mpi_comm_split: + + +MPI_Comm_split +============== + +.. include_body + +:ref:`MPI_Comm_split` - Creates new communicators based on colors and keys. + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Comm_split(MPI_Comm comm, int color, int key, + MPI_Comm *newcomm) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_COMM_SPLIT(COMM, COLOR, KEY, NEWCOMM, IERROR) + INTEGER COMM, COLOR, KEY, NEWCOMM, IERROR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Comm_split(comm, color, key, newcomm, ierror) + TYPE(MPI_Comm), INTENT(IN) :: comm + INTEGER, INTENT(IN) :: color, key + TYPE(MPI_Comm), INTENT(OUT) :: newcomm + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETERS +---------------- +* ``comm``: Communicator (handle). +* ``color``: Control of subset assignment (nonnegative integer). +* ``key``: Control of rank assignment (integer). + +OUTPUT PARAMETERS +----------------- +* ``newcomm``: New communicator (handle). +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +This function partitions the group associated with comm into disjoint +subgroups, one for each value of color. Each subgroup contains all +processes of the same color. Within each subgroup, the processes are +ranked in the order defined by the value of the argument key, with ties +broken according to their rank in the old group. A new communicator is +created for each subgroup and returned in newcomm. A process may supply +the color value MPI_UNDEFINED, in which case newcomm returns +MPI_COMM_NULL. This is a collective call, but each process is permitted +to provide different values for color and key. + +When you call :ref:`MPI_Comm_split` on an inter-communicator, the processes on +the left with the same color as those on the right combine to create a +new inter-communicator. The key argument describes the relative rank of +processes on each side of the inter-communicator. The function returns +MPI_COMM_NULL for those colors that are specified on only one side of +the inter-communicator, or for those that specify MPI_UNDEFINED as the +color. + +A call to MPI_Comm_create(comm, *group*, *newcomm*) is equivalent to a +call to MPI_Comm_split(comm, *color*,\ *key*, *newcomm*), where all +members of *group* provide *color* = 0 and *key* = rank in group, and +all processes that are not members of *group* provide *color* = +MPI_UNDEFINED. The function :ref:`MPI_Comm_split` allows more general +partitioning of a group into one or more subgroups with optional +reordering. + +The value of *color* must be nonnegative or MPI_UNDEFINED. + + +NOTES +----- + +This is an extremely powerful mechanism for dividing a single +communicating group of processes into k subgroups, with k chosen +implicitly by the user (by the number of colors asserted over all the +processes). Each resulting communicator will be nonoverlapping. Such a +division could be useful for defining a hierarchy of computations, such +as for multigrid or linear algebra. + +Multiple calls to :ref:`MPI_Comm_split` can be used to overcome the requirement +that any call have no overlap of the resulting communicators (each +process is of only one color per call). In this way, multiple +overlapping communication structures can be created. Creative use of the +color and key in such splitting operations is encouraged. + +Note that, for a fixed color, the keys need not be unique. It is +:ref:`MPI_Comm_split`'s responsibility to sort processes in ascending order +according to this key, and to break ties in a consistent way. If all the +keys are specified in the same way, then all the processes in a given +color will have the relative rank order as they did in their parent +group. (In general, they will have different ranks.) + +Essentially, making the key value zero for all processes of a given +color means that one needn't really pay attention to the rank-order of +the processes in the new communicator. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. + + +.. seealso:: + :ref:`MPI_Comm_create` :ref:`MPI_Intercomm_create` :ref:`MPI_Comm_dup` :ref:`MPI_Comm_free` diff --git a/docs/man-openmpi/man3/MPI_Comm_split_type.3.rst b/docs/man-openmpi/man3/MPI_Comm_split_type.3.rst new file mode 100644 index 00000000000..baa2714b117 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Comm_split_type.3.rst @@ -0,0 +1,160 @@ +.. _mpi_comm_split_type: + + +MPI_Comm_split_type +=================== + +.. include_body + +:ref:`MPI_Comm_split_type` - Creates new communicators based on colors and +keys. + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Comm_split_type(MPI_Comm comm, int split_type, int key, + MPI_Info info, MPI_Comm *newcomm) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_COMM_SPLIT_TYPE(COMM, SPLIT_TYPE, KEY, INFO, NEWCOMM, IERROR) + INTEGER COMM, SPLIT_TYPE, KEY, INFO, NEWCOMM, IERROR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Comm_split_type(comm, split_type, key, info, newcomm, ierror) + TYPE(MPI_Comm), INTENT(IN) :: comm + INTEGER, INTENT(IN) :: split_type, key + TYPE(MPI_Info), INTENT(IN) :: info + TYPE(MPI_Comm), INTENT(OUT) :: newcomm + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETERS +---------------- +* ``comm``: Communicator (handle). +* ``split_type``: Type of processes to be grouped together (integer). +* ``key``: Control of rank assignment (integer). +* ``info``: Info argument (handle). + +OUTPUT PARAMETERS +----------------- +* ``newcomm``: New communicator (handle). +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +This function partitions the group associated with *comm* into disjoint +subgroups, based on the type specied by *split_type*. Each subgroup +contains all processes of the same type. Within each subgroup, the +processes are ranked in the order defined by the value of the argument +*key*, with ties broken according to their rank in the old group. A new +communicator is created for each subgroup and returned in newcomm. This +is a collective call; all processes must provide the same *split_type*, +but each process is permitted to provide different values for key. An +exception to this rule is that a process may supply the type value +MPI_UNDEFINED, in which case newcomm returns MPI_COMM_NULL. + + +SPLIT TYPES +----------- + +MPI_COMM_TYPE_SHARED + This type splits the communicator into subcommunicators, each of + which can create a shared memory region. + +OMPI_COMM_TYPE_NODE + Synonym for MPI_COMM_TYPE_SHARED. + +OMPI_COMM_TYPE_HWTHREAD + This type splits the communicator into subcommunicators, each of + which belongs to the same hardware thread. + +OMPI_COMM_TYPE_CORE + This type splits the communicator into subcommunicators, each of + which belongs to the same core/processing unit. + +OMPI_COMM_TYPE_L1CACHE + This type splits the communicator into subcommunicators, each of + which belongs to the same L1 cache. + +OMPI_COMM_TYPE_L2CACHE + This type splits the communicator into subcommunicators, each of + which belongs to the same L2 cache. + +OMPI_COMM_TYPE_L3CACHE + This type splits the communicator into subcommunicators, each of + which belongs to the same L3 cache. + +OMPI_COMM_TYPE_SOCKET + This type splits the communicator into subcommunicators, each of + which belongs to the same socket. + +OMPI_COMM_TYPE_NUMA + This type splits the communicator into subcommunicators, each of + which belongs to the same NUMA-node. + +OMPI_COMM_TYPE_BOARD + This type splits the communicator into subcommunicators, each of + which belongs to the same board. + +OMPI_COMM_TYPE_HOST + This type splits the communicator into subcommunicators, each of + which belongs to the same host. + +OMPI_COMM_TYPE_CU + This type splits the communicator into subcommunicators, each of + which belongs to the same computational unit. + +OMPI_COMM_TYPE_CLUSTER + This type splits the communicator into subcommunicators, each of + which belongs to the same cluster. + + +NOTES +----- + +The communicator keys denoted with an *OMPI\_* prefix instead of an +*MPI\_* prefix are specific to Open MPI, and are not part of the MPI +standard. Their use should be protected by the *OPEN_MPI* C preprocessor +macro. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. + + +.. seealso:: + :ref:`MPI_Comm_create` :ref:`MPI_Intercomm_create` :ref:`MPI_Comm_dup` :ref:`MPI_Comm_free` + :ref:`MPI_Comm_split` diff --git a/docs/man-openmpi/man3/MPI_Comm_test_inter.3.rst b/docs/man-openmpi/man3/MPI_Comm_test_inter.3.rst new file mode 100644 index 00000000000..e67eed00afb --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Comm_test_inter.3.rst @@ -0,0 +1,113 @@ +.. _mpi_comm_test_inter: + + +MPI_Comm_test_inter +=================== + +.. include_body + +:ref:`MPI_Comm_test_inter` - Tests to see if a comm is an +intercommunicator. + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Comm_test_inter(MPI_Comm comm, int *flag) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_COMM_TEST_INTER(COMM, FLAG, IERROR) + INTEGER COMM, IERROR + LOGICAL FLAG + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Comm_test_inter(comm, flag, ierror) + TYPE(MPI_Comm), INTENT(IN) :: comm + LOGICAL, INTENT(OUT) :: flag + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETER +--------------- +* ``comm``: Communicator (handle). + +OUTPUT PARAMETERS +----------------- +* ``flag (Logical.)``: +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +This local routine allows the calling process to determine the type of a +communicator. It returns true for an intercommunicator, false for an +intracommunicator. + +The type of communicator also affects the value returned by three other +functions. When dealing with an intracommunicator (enables communication +within a single group), the functions listed below return the expected +values, group size, group, and rank. When dealing with an +inter-communicator, however, they return the following values: + +:: + + MPI_Comm_size Returns the size of the local group. + MPI_Comm_group Returns the local group. + MPI_Comm_rank Returns the rank in the local group. + +To return the remote group and remote group size of an +inter-communicator, use the :ref:`MPI_Comm_remote_group` and +:ref:`MPI_Comm_remote_size` functions. + +The operation :ref:`MPI_Comm_compare` is valid for intercommunicators. Both +communicators must be either intra- or intercommunicators, or else +MPI_UNEQUAL results. Both corresponding local and remote groups must +compare correctly to get the results MPI_CONGRUENT and MPI_SIMILAR. In +particular, it is possible for MPI_SIMILAR to result because either the +local or remote groups were similar but not identical. + +The following accessors provide consistent access to the remote group of +an intercommunicator: :ref:`MPI_Comm_remote_size`, :ref:`MPI_Comm_remote_group`. + +The intercommunicator accessors (:ref:`MPI_Comm_test_inter`, +:ref:`MPI_Comm_remote_size`, MPI_Comm_remote_group) are all local operations. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. + + +.. seealso:: + :ref:`MPI_Comm_remote_group` :ref:`MPI_Comm_remote_size` :ref:`MPI_Intercomm_create` + :ref:`MPI_Intercomm_merge` diff --git a/docs/man-openmpi/man3/MPI_Compare_and_swap.3.rst b/docs/man-openmpi/man3/MPI_Compare_and_swap.3.rst new file mode 100644 index 00000000000..767885c7836 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Compare_and_swap.3.rst @@ -0,0 +1,126 @@ +.. _mpi_compare_and_swap: + + +MPI_Compare_and_swap +==================== + +.. include_body + +:ref:`MPI_Compare_and_swap` - Perform RMA compare-and-swap + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Compare_and_swap(const void *origin_addr, const void *compare_addr, + void *result_addr, MPI_Datatype datatype, int target_rank, + MPI_Aint target_disp, MPI_Win win) + + +Fortran Syntax (see FORTRAN 77 NOTES) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_COMPARE_AND_SWAP(ORIGIN_ADDR, COMPARE_ADDR, RESULT_ADDR, DATATYPE, TARGET_RANK, + TARGET_DISP, WIN, IERROR) + ORIGIN_ADDR, COMPARE_ADDR, RESULT_ADDR(*) + INTEGER(KIND=MPI_ADDRESS_KIND) TARGET_DISP + INTEGER DATATYPE, TARGET_RANK, WIN, IERROR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Compare_and_swap(origin_addr, compare_addr, result_addr, datatype, + target_rank, target_disp, win, ierror) + TYPE(*), DIMENSION(..), INTENT(IN) :: origin_addr, compare_addr + TYPE(*), DIMENSION(..) :: result_addr + TYPE(MPI_Datatype), INTENT(IN) :: datatype + INTEGER, INTENT(IN) :: target_rank + INTEGER(KIND=MPI_ADDRESS_KIND), INTENT(IN) :: target_disp + TYPE(MPI_Win), INTENT(IN) :: win + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETERS +---------------- +* ``origin_addr``: Initial address of buffer (choice). +* ``compare_addr``: Initial address of compare buffer (choice). +* ``result_addr``: Initial address of result buffer (choice). +* ``datatype``: Data type of the entry in origin, result, and target buffers (handle). +* ``target_rank``: Rank of target (nonnegative integer). +* ``target_disp``: Displacement from start of window to beginning of target buffer (nonnegative integer). +* ``win``: Window object (handle). + +OUTPUT PARAMETER +---------------- +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +This function compares one element of type *datatype* in the compare +buffer *compare_addr* with the buffer at offset *target_disp* in the +target window specified by *target_rank* and *win* and replaces the +value at the target with the value in the origin buffer *origin_addr* if +the compare buffer and the target buffer are identical. The original +value at the target is returned in the buffer *result_addr*. The +parameter *datatype* must belong to one of the following categories of +predefined datatypes: C integer, Fortran integer, Logical, +Multi-language types, or Byte as specified in MPI-3 section 5.9.2 on page 176. + +The origin and result buffers (*origin_addr* and *result_addr*) must be +disjoint. + + +FORTRAN 77 NOTES +---------------- + +The MPI standard prescribes portable Fortran syntax for the +*TARGET_DISP* argument only for Fortran 90. FORTRAN 77 users may use the +non-portable syntax + +:: + + INTEGER*MPI_ADDRESS_KIND TARGET_DISP + +where MPI_ADDRESS_KIND is a constant defined in mpif.h and gives the +length of the declared integer in bytes. + + +NOTES +----- + +It is the user's responsibility to guarantee that, when using the +accumulate functions, the target displacement argument is such that +accesses to the window are properly aligned according to the data type +arguments in the call to the :ref:`MPI_Compare_and_swap` function. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler +MPI_ERRORS_RETURN may be used to cause error values to be returned. Note +that MPI does not guarantee that an MPI program can continue past an +error. diff --git a/docs/man-openmpi/man3/MPI_Dims_create.3.rst b/docs/man-openmpi/man3/MPI_Dims_create.3.rst new file mode 100644 index 00000000000..7877ea3f5d3 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Dims_create.3.rst @@ -0,0 +1,116 @@ +.. _mpi_dims_create: + + +MPI_Dims_create +=============== + +.. include_body + +:ref:`MPI_Dims_create` - Creates a division of processors in a Cartesian +grid. + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Dims_create(int nnodes, int ndims, int dims[]) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_DIMS_CREATE(NNODES, NDIMS, DIMS, IERROR) + INTEGER NNODES, NDIMS, DIMS(*), IERROR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Dims_create(nnodes, ndims, dims, ierror) + INTEGER, INTENT(IN) :: nnodes, ndims + INTEGER, INTENT(INOUT) :: dims(ndims) + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETERS +---------------- +* ``nnodes``: Number of nodes in a grid (integer). +* ``ndims``: Number of Cartesian dimensions (integer). + +IN/OUT PARAMETER +---------------- +* ``dims``: Integer array of size ndims specifying the number of nodes in each dimension. + +OUTPUT PARAMETER +---------------- +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +For Cartesian topologies, the function :ref:`MPI_Dims_create` helps the user +select a balanced distribution of processes per coordinate direction, +depending on the number of processes in the group to be balanced and +optional constraints that can be specified by the user. One use is to +partition all the processes (the size of MPI_COMM_WORLD's group) into an +n-dimensional topology. + +The entries in the array *dims* are set to describe a Cartesian grid +with *ndims* dimensions and a total of *nnodes* nodes. The dimensions +are set to be as close to each other as possible, using an appropriate +divisibility algorithm. The caller may further constrain the operation +of this routine by specifying elements of array dims. If dims[i] is set +to a positive number, the routine will not modify the number of nodes in +dimension i; only those entries where dims[i] = 0 are modified by the +call. + +Negative input values of dims[i] are erroneous. An error will occur if +nnodes is not a multiple of ((pi) over (i, dims[i] != 0)) dims[i]. + +For dims[i] set by the call, dims[i] will be ordered in nonincreasing +order. Array dims is suitable for use as input to routine +:ref:`MPI_Cart_create`. :ref:`MPI_Dims_create` is local. + +**Example:** + +:: + + + dims + before dims + call function call on return + ----------------------------------------------------- + (0,0) MPI_Dims_create(6, 2, dims) (3,2) + (0,0) MPI_Dims_create(7, 2, dims) (7,1) + (0,3,0) MPI_Dims_create(6, 3, dims) (2,3,1) + (0,3,0) MPI_Dims_create(7, 3, dims) erroneous call + ------------------------------------------------------ + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. diff --git a/docs/man-openmpi/man3/MPI_Dist_graph_create.3.rst b/docs/man-openmpi/man3/MPI_Dist_graph_create.3.rst new file mode 100644 index 00000000000..1870f1e3768 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Dist_graph_create.3.rst @@ -0,0 +1,152 @@ +.. _mpi_dist_graph_create: + + +MPI_Dist_graph_create +===================== + +.. include_body + +:ref:`MPI_Dist_graph_create` - Makes a new communicator to which topology +information has been attached. + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Dist_graph_create(MPI_Comm comm_old, int n, const int sources[], + const int degrees[], const int destinations[], const int weights[], + MPI_Info info, int reorder, MPI_Comm *comm_dist_graph) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_DIST_GRAPH_CREATE(COMM_OLD, N, SOURCES, DEGREES, DESTINATIONS, WEIGHTS, + INFO, REORDER, COMM_DIST_GRAPH, IERROR) + INTEGER COMM_OLD, N, SOURCES(*), DEGRES(*), WEIGHTS(*), INFO + INTEGER COMM_DIST_GRAPH, IERROR + LOGICAL REORDER + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Dist_Graph_create(comm_old, n, sources, degrees, destinations, weights, + info, reorder, comm_dist_graph, ierror) + TYPE(MPI_Comm), INTENT(IN) :: comm_old + INTEGER, INTENT(IN) :: n, sources(n), degrees(n), destinations(*) + INTEGER, INTENT(IN) :: weights(*) + TYPE(MPI_Info), INTENT(IN) :: info + LOGICAL, INTENT(IN) :: reorder + TYPE(MPI_Comm), INTENT(OUT) :: comm_dist_graph + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETERS +---------------- +* ``comm_old``: Input communicator without topology (handle). +* ``n``: Number of source nodes for which this process specifies edges (non-negative integer). +* ``sources``: Array containing the *n* source nodes for which this process specifies edges (array of non-negative integers). +* ``degrees``: Array specifying the number of destinations for each source node in the source node array (array of non-negative integers). +* ``destinations``: Destination nodes for the source nodes in the source node array (array of non-negative integers). +* ``weights``: Weights for source to destination edges (array of non-negative integers). +* ``info``: Hints on optimization and interpretation of weights (handle). +* ``reorder``: Ranking may be reordered (true) or not (false) (logical). + +OUTPUT PARAMETERS +----------------- +* ``comm_dist_graph``: Communicator with distributed graph topology added (handle). +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +:ref:`MPI_Dist_graph_create` creates a new communicator *comm_dist_graph* with +distrubuted graph topology and returns a handle to the new communicator. +The number of processes in *comm_dist_graph* is identical to the number +of processes in *comm_old*. Concretely, each process calls the +constructor with a set of directed (source,destination) communication +edges as described below. Every process passes an array of *n* source +nodes in the *sources* array. For each source node, a non-negative +number of destination nodes is specied in the *degrees* array. The +destination nodes are stored in the corresponding consecutive segment of +the *destinations* array. More precisely, if the i-th node in sources is +s, this specifies *degrees*\ [i] *edges* (s,d) with d of the j-th such +edge stored in +*destinations*\ [*degrees*\ [0]+...+\ *degrees*\ [i-1]+j]. The weight of +this edge is stored in +*weights*\ [*degrees*\ [0]+...+\ *degrees*\ [i-1]+j]. Both the *sources* +and the *destinations* arrays may contain the same node more than once, +and the order in which nodes are listed as destinations or sources is +not signicant. Similarly, different processes may specify edges with the +same source and destination nodes. Source and destination nodes must be +process ranks of comm_old. Different processes may specify different +numbers of source and destination nodes, as well as different source to +destination edges. This allows a fully distributed specification of the +communication graph. Isolated processes (i.e., processes with no +outgoing or incoming edges, that is, processes that do not occur as +source or destination node in the graph specication) are allowed. The +call to :ref:`MPI_Dist_graph_create` is collective. + +If reorder = false, all processes will have the same rank in +comm_dist_graph as in comm_old. If reorder = true then the MPI library +is free to remap to other processes (of comm_old) in order to improve +communication on the edges of the communication graph. The weight +associated with each edge is a hint to the MPI library about the amount +or intensity of communication on that edge, and may be used to compute a + + +WEIGHTS +------- + +Weights are specied as non-negative integers and can be used to +influence the process remapping strategy and other internal MPI +optimizations. For instance, approximate count arguments of later +communication calls along specic edges could be used as their edge +weights. Multiplicity of edges can likewise indicate more intense +communication between pairs of processes. However, the exact meaning of +edge weights is not specied by the MPI standard and is left to the +implementation. An application can supply the special value +MPI_UNWEIGHTED for the weight array to indicate that all edges have the +same (effectively no) weight. It is erroneous to supply MPI_UNWEIGHTED +for some but not all processes of comm_old. If the graph is weighted but +*n* = 0, then MPI_WEIGHTS_EMPTY or any arbitrary array may be passed to +weights. Note that MPI_UNWEIGHTED and MPI_WEIGHTS_EMPTY are not special +weight values; rather they are special values for the total array +argument. In Fortran, MPI_UNWEIGHTED and MPI_WEIGHTS_EMPTY are objects +like MPI_BOTTOM (not usable for initialization or assignment). See MPI-3 +section 2.5.4. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. + + +.. seealso:: + :ref:`MPI_Dist_graph_create_adjacent` :ref:`MPI_Dist_graph_neighbors` + :ref:`MPI_Dist_graph_neighbors_count` diff --git a/docs/man-openmpi/man3/MPI_Dist_graph_create_adjacent.3.rst b/docs/man-openmpi/man3/MPI_Dist_graph_create_adjacent.3.rst new file mode 100644 index 00000000000..9d17d79c410 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Dist_graph_create_adjacent.3.rst @@ -0,0 +1,141 @@ +.. _mpi_dist_graph_create_adjacent: + + +MPI_Dist_graph_create_adjacent +============================== + +.. include_body + +:ref:`MPI_Dist_graph_create_adjacent` - Makes a new communicator to which +topology information has been attached. + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Dist_graph_create_adjacent(MPI_Comm comm_old, int indegree, const int sources[], + const int sourceweights[], int outdegree, const int destinations[], const int destweights[], + MPI_Info info, int reorder, MPI_Comm *comm_dist_graph) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_DIST_GRAPH_CREATE_ADJACENT(COMM_OLD, INDEGREE, SOURCES, SOURCEWEIGHTS, OUTDEGREE, + DESTINATIONS, DESTWEIGHTS, INFO, REORDER, COMM_DIST_GRAPH, IERROR) + INTEGER COMM_OLD, INDEGREE, SOURCES(*), SOURCEWEIGHTS(*), OUTDEGREE, DESTINATIONS(*), DESTWEIGHTS(*), INFO + INTEGER COMM_DIST_GRAPH, IERROR + LOGICAL REORDER + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Dist_Graph_create_adjacent(comm_old, ndegree, sources, sourceweights, + outdegree, destinations, destweights, info, reorder, + comm_dist_graph, ierror) + TYPE(MPI_Comm), INTENT(IN) :: comm_old + INTEGER, INTENT(IN) :: indegree, sources(indegree), outdegree, destinations(outdegree) + INTEGER, INTENT(IN) :: sourceweights(*), destweights(*) + TYPE(MPI_Info), INTENT(IN) :: info + LOGICAL, INTENT(IN) :: reorder + TYPE(MPI_Comm), INTENT(OUT) :: comm_dist_graph + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETERS +---------------- +* ``comm_old``: Input communicator without topology (handle). +* ``indegree``: Size of *sources* and *sourceweights* arrays (non-negative integer). +* ``sources``: Ranks of processes for which the calling process is a destination (array of non-negative integers). +* ``sourceweights``: Weights of the edges into the calling process (array of non-negative integers). +* ``outdegree``: Size of *destinations* and *destweights* arrays (non-negative integer). +* ``destinations``: Ranks of processes for which the calling process is a source (array of non-negative integers). +* ``destweights``: Weights of the edges out of the calling process (array of non-negative integers). +* ``info``: Hints on optimization and interpretation of weights (handle). +* ``reorder``: Ranking may be reordered (true) or not (false) (logical). + +OUTPUT PARAMETERS +----------------- +* ``comm_dist_graph``: Communicator with distributed graph topology added (handle). +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +:ref:`MPI_Dist_graph_create_adjacent` creats a new communicator +*comm_dist_graph* with distrubuted graph topology and returns a handle +to the new communicator. The number of processes in *comm_dist_graph* is +identical to the number of processes in *comm_old*. Each process passes +all information about its incoming and outgoing edges in the virtual +distributed graph topology. The calling processes must ensure that each +edge of the graph is described in the source and in the destination +process with the same weights. If there are multiple edges for a given +(source,dest) pair, then the sequence of the weights of these edges does +not matter. The complete communication topology is the combination of +all edges shown in the *sources* arrays of all processes in comm_old, +which must be identical to the combination of all edges shown in the +*destinations* arrays. Source and destination ranks must be process +ranks of comm_old. This allows a fully distributed specication of the +communication graph. Isolated processes (i.e., processes with no +outgoing or incoming edges, that is, processes that have specied +indegree and outdegree as zero and thus do not occur as source or +destination rank in the graph specication) are allowed. The call to +:ref:`MPI_Dist_graph_create_adjacent` is collective. + + +WEIGHTS +------- + +Weights are specied as non-negative integers and can be used to +influence the process remapping strategy and other internal MPI +optimizations. For instance, approximate count arguments of later +communication calls along specic edges could be used as their edge +weights. Multiplicity of edges can likewise indicate more intense +communication between pairs of processes. However, the exact meaning of +edge weights is not specied by the MPI standard and is left to the +implementation. An application can supply the special value +MPI_UNWEIGHTED for the weight array to indicate that all edges have the +same (effectively no) weight. It is erroneous to supply MPI_UNWEIGHTED +for some but not all processes of comm_old. If the graph is weighted but +*indegree* or *outdegree* is zero, then MPI_WEIGHTS_EMPTY or any +arbitrary array may be passed to sourceweights or destweights +respectively. Note that MPI_UNWEIGHTED and MPI_WEIGHTS_EMPTY are not +special weight values; rather they are special values for the total +array argument. In Fortran, MPI_UNWEIGHTED and MPI_WEIGHTS_EMPTY are +objects like MPI_BOTTOM (not usable for initialization or assignment). +See MPI-3 section 2.5.4. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. + + +.. seealso:: + :ref:`MPI_Dist_graph_create` :ref:`MPI_Dist_graph_neighbors` + :ref:`MPI_Dist_graph_neighbors_count` diff --git a/docs/man-openmpi/man3/MPI_Dist_graph_neighbors.3.rst b/docs/man-openmpi/man3/MPI_Dist_graph_neighbors.3.rst new file mode 100644 index 00000000000..8a2d4c141ec --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Dist_graph_neighbors.3.rst @@ -0,0 +1,100 @@ +.. _mpi_dist_graph_neighbors: + + +MPI_Dist_graph_neighbors +======================== + +.. include_body + +:ref:`MPI_Dist_graph_neighbors` - Returns the neighbors of the calling +process in a distributed graph topology. + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Dist_graph_neighbors(MPI_Comm comm, int maxindegree, int sources[], int sourceweights[], + int maxoutdegree, int destinations[], int destweights[]) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_DIST_GRAPH_NEIGHBORS(COMM, MAXINDEGREE, SOURCES, SOURCEWEIGHTS, + MAXOUTDEGREE, DESTINATIONS, DESTWEIGHTS, IERROR) + INTEGER COMM, MAXINDEGREE, SOURCES(*), SOURCEWEIGHTS(*), MAXOUTDEGREE, + DESTINATIONS(*), DESTWEIGHTS(*), IERROR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Dist_Graph_neighbors(comm, maxindegree, sources, sourceweights, + maxoutdegree, destinations, destweights, ierror) + TYPE(MPI_Comm), INTENT(IN) :: comm + INTEGER, INTENT(IN) :: maxindegree, maxoutdegree + INTEGER, INTENT(OUT) :: sources(maxindegree), destinations(maxoutdegree) + INTEGER :: sourceweights(*), destweights(*) + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETERS +---------------- +* ``comm``: Communicator with distributed graph topology (handle). +* ``maxindegree``: Size of *sources* and *sourceweights* arrays (non-negative integer). +* ``maxoutdegree``: Size of *destinations* and *destweights* arrays (non-negative integer). + +OUTPUT PARAMETERS +----------------- +* ``sources``: Processes for which the calling process is a destination (array of non-negative integers). +* ``sourceweights``: Weights of the edges into the calling process (array of non-negative integers). +* ``destinations``: Processes for which the calling process is a source (array of non-negative integers). +* ``destweights``: Weights of the edges out of the calling process (array of non-negative integers). +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +:ref:`MPI_Dist_graph_neighbors` returns the source and destination ranks in a +distributed graph topology for the calling process. This call will +return up to *maxindegree* source ranks in the *sources* array and up to +*maxoutdegree* destination ranks in the *destinations* array. If weights +were specified at the time of the communicator's creation then the +associated weights are returned in the *sourceweights* and *destweights* +arrays. If the communicator was created with +:ref:`MPI_Dist_graph_create_adjacent` then the order of the values in *sources* +and *destinations* is identical to the input that was used by the +process with the same rank in comm_old in the creation call. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. + + +.. seealso:: + :ref:`MPI_Dist_graph_neighbors_count` diff --git a/docs/man-openmpi/man3/MPI_Dist_graph_neighbors_count.3.rst b/docs/man-openmpi/man3/MPI_Dist_graph_neighbors_count.3.rst new file mode 100644 index 00000000000..9860368b082 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Dist_graph_neighbors_count.3.rst @@ -0,0 +1,89 @@ +.. _mpi_dist_graph_neighbors_count: + + +MPI_Dist_graph_neighbors_count +============================== + +.. include_body + +:ref:`MPI_Dist_graph_neighbors_count` - Returns the number of in and out +edges for the calling processes in a distributed graph topology and a +flag indicating whether the distributed graph is weighted. + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Dist_graph_neighbors_count(MPI_Comm comm, int *indegree, + int *outdegree, int *weighted) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_DIST_GRAPH_NEIGHBORS_COUNT(COMM, INDEGREE, OUTDEGREE, WEIGHTED, IERROR) + INTEGER COMM, INDEGREE, OUTDEGREE, IERROR + LOGICAL WEIGHTED + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Dist_graph_neighbors_count(comm, indegree, outdegree, weighted, ierror) + TYPE(MPI_Comm), INTENT(IN) :: comm + INTEGER, INTENT(IN) :: indegree, outdegree + INTEGER, INTENT(OUT) :: weighted + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETERS +---------------- +* ``comm``: Communicator with distributed graph topology (handle). + +OUTPUT PARAMETERS +----------------- +* ``indegree``: Number of edges into this process (non-negative integer). +* ``outdegree``: Number of edges out of this process (non-negative integer). +* ``weighted``: False if MPI_UNWEIGHTED was supplied during creation, true otherwise (logical). +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +:ref:`MPI_Dist_graph_neighbors_count` and :ref:`MPI_Graph_neighbors` provide adjacency +information for a distributed graph topology. +:ref:`MPI_Dist_graph_neighbors_count` returns the number of sources and +destinations for the calling process. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. + + +.. seealso:: + :ref:`MPI_Dist_graph_neighbors` diff --git a/docs/man-openmpi/man3/MPI_Errhandler_create.3.rst b/docs/man-openmpi/man3/MPI_Errhandler_create.3.rst new file mode 100644 index 00000000000..cd09cb8aef2 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Errhandler_create.3.rst @@ -0,0 +1,101 @@ +.. _mpi_errhandler_create: + + +MPI_Errhandler_create +===================== + +.. include_body + +:ref:`MPI_Errhandler_create` - Creates an MPI-style error handler -- use of +this routine is deprecated. + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Errhandler_create(MPI_Handler_function *function, + MPI_Errhandler *errhandler) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + INCLUDE 'mpif.h' + MPI_ERRHANDLER_CREATE(FUNCTION, ERRHANDLER, IERROR) + EXTERNAL FUNCTION + INTEGER ERRHANDLER, IERROR + + +INPUT PARAMETER +--------------- +* ``function``: User-defined error handling procedure. + +OUTPUT PARAMETERS +----------------- +* ``errhandler``: MPI error handler (handle). +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +Note that use of this routine is *deprecated* as of MPI-2. Please use +:ref:`MPI_Comm_create_errhandler` instead. + +Registers the user routine function for use as an MPI error handler. +Returns in errhandler a handle to the registered error handler. + +In the C language, the user routine should be a C function of type +MPI_Handler_function, which is defined as + +:: + + typedef void (MPI_Handler_function)(MPI_Comm *, int *, ...); + +The first argument is the communicator in use. The second is the error +code to be returned by the MPI routine that raised the error. If the +routine would have returned MPI_ERR_IN_STATUS, it is the error code +returned in the status for the request that caused the error handler to +be invoked. The remaining arguments are stdargs arguments whose number +and meaning is implementation-dependent. An implementation should +clearly document these arguments. Addresses are used so that the handler +may be written in Fortran. + + +NOTE +---- + +The MPI-1 Standard states that an implementation may make the output +value (errhandler) simply the address of the function. However, the +action of MPI_Errhandler\_ free makes this impossible, since it is +required to set the value of the argument to MPI_ERRHANDLER_NULL. In +addition, the actual error handler must remain until all communicators +that use it are freed. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. + + +.. seealso:: + :ref:`MPI_Comm_create_errhandler` :ref:`MPI_Comm_get_errhandler` + :ref:`MPI_Comm_set_errhandler` diff --git a/docs/man-openmpi/man3/MPI_Errhandler_free.3.rst b/docs/man-openmpi/man3/MPI_Errhandler_free.3.rst new file mode 100644 index 00000000000..af390182389 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Errhandler_free.3.rst @@ -0,0 +1,81 @@ +.. _mpi_errhandler_free: + + +MPI_Errhandler_free +=================== + +.. include_body + +:ref:`MPI_Errhandler_free` - Frees an MPI-style error handler. + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Errhandler_free(MPI_Errhandler *errhandler) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_ERRHANDLER_FREE(ERRHANDLER, IERROR) + INTEGER ERRHANDLER, IERROR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Errhandler_free(errhandler, ierror) + TYPE(MPI_Errhandler), INTENT(INOUT) :: errhandler + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETER +--------------- +* ``errhandler``: MPI error handler (handle). Set to MPI_ERRHANDLER_NULL on exit. + +OUTPUT PARAMETER +---------------- +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +Marks the error handler associated with errhandler for deallocation and +sets errhandler to MPI_ERRHANDLER_NULL. The error handler will be +deallocated after all communicators associated with it have been +deallocated. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. + + +.. seealso:: + :ref:`MPI_Comm_create_errhandler` :ref:`MPI_Comm_get_errhandler` + :ref:`MPI_Comm_set_errhandler` diff --git a/docs/man-openmpi/man3/MPI_Errhandler_get.3.rst b/docs/man-openmpi/man3/MPI_Errhandler_get.3.rst new file mode 100644 index 00000000000..09f0ebfd4a8 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Errhandler_get.3.rst @@ -0,0 +1,77 @@ +.. _mpi_errhandler_get: + + +MPI_Errhandler_get +================== + +.. include_body + +:ref:`MPI_Errhandler_get` - Gets the error handler for a communicator -- +use of this routine is deprecated. + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Errhandler_get(MPI_Comm comm, MPI_Errhandler *errhandler) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + INCLUDE 'mpif.h' + MPI_ERRHANDLER_GET(COMM, ERRHANDLER, IERROR) + INTEGER COMM, ERRHANDLER, IERROR + + +INPUT PARAMETER +--------------- +* ``comm``: Communicator to get the error handler from (handle). + +OUTPUT PARAMETERS +----------------- +* ``errhandler``: MPI error handler currently associated with communicator (handle). +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +Note that use of this routine is *deprecated* as of MPI-2. Please use +:ref:`MPI_Comm_get_errhandler` instead. + +Returns in errhandler (a handle to) the error handler that is currently +associated with communicator comm. + +**Example:** A library function may register at its entry point the +current error handler for a communicator, set its own private error +handler for this communicator, and restore before exiting the previous +error handler. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. + + +.. seealso:: + :ref:`MPI_Comm_create_errhandler` :ref:`MPI_Comm_get_errhandler` + :ref:`MPI_Comm_set_errhandler` diff --git a/docs/man-openmpi/man3/MPI_Errhandler_set.3.rst b/docs/man-openmpi/man3/MPI_Errhandler_set.3.rst new file mode 100644 index 00000000000..4aebea67f3d --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Errhandler_set.3.rst @@ -0,0 +1,73 @@ +.. _mpi_errhandler_set: + + +MPI_Errhandler_set +================== + +.. include_body + +:ref:`MPI_Errhandler_set` - Sets the error handler for a communicator -- +use of this routine is deprecated. + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Errhandler_set(MPI_Comm comm, MPI_Errhandler errhandler) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + INCLUDE 'mpif.h' + MPI_ERRHANDLER_SET(COMM, ERRHANDLER, IERROR) + INTEGER COMM, ERRHANDLER, IERROR + + +INPUT PARAMETERS +---------------- +* ``comm``: Communicator to set the error handler for (handle). +* ``errhandler``: New MPI error handler for communicator (handle). + +OUTPUT PARAMETER +---------------- +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +Note that use of this routine is *deprecated* as of MPI-2. Please use +:ref:`MPI_Comm_set_errhandler` instead. + +Associates the new error handler errhandler with communicator comm at +the calling process. Note that an error handler is always associated +with the communicator. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. + + +.. seealso:: + :ref:`MPI_Comm_create_errhandler` :ref:`MPI_Comm_get_errhandler` + :ref:`MPI_Comm_set_errhandler` diff --git a/docs/man-openmpi/man3/MPI_Error_class.3.rst b/docs/man-openmpi/man3/MPI_Error_class.3.rst new file mode 100644 index 00000000000..ed47097a43c --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Error_class.3.rst @@ -0,0 +1,80 @@ +.. _mpi_error_class: + + +MPI_Error_class +=============== + +.. include_body + +:ref:`MPI_Error_class` - Converts an error code into an error class. + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Error_class(int errorcode, int *errorclass) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_ERROR_CLASS(ERRORCODE, ERRORCLASS, IERROR) + INTEGER ERRORCODE, ERRORCLASS, IERROR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Error_class(errorcode, errorclass, ierror) + INTEGER, INTENT(IN) :: errorcode + INTEGER, INTENT(OUT) :: errorclass + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETER +--------------- +* ``errorcode``: Error code returned by an MPI routine. + +OUTPUT PARAMETERS +----------------- +* ``errorclass``: Error class associated with errorcode. +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +The function :ref:`MPI_Error_class` maps each standard error code (error class) +onto itself. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. + + +.. seealso:: + :ref:`MPI_Error_string` diff --git a/docs/man-openmpi/man3/MPI_Error_string.3.rst b/docs/man-openmpi/man3/MPI_Error_string.3.rst new file mode 100644 index 00000000000..4d0c0f9234f --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Error_string.3.rst @@ -0,0 +1,87 @@ +.. _mpi_error_string: + + +MPI_Error_string +================ + +.. include_body + +:ref:`MPI_Error_string` - Returns a string for a given error code. + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Error_string(int errorcode, char *string, int *resultlen) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_ERROR_STRING(ERRORCODE, STRING, RESULTLEN, IERROR) + INTEGER ERRORCODE, RESULTLEN, IERROR + CHARACTER*(*) STRING + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Error_string(errorcode, string, resultlen, ierror) + INTEGER, INTENT(IN) :: errorcode + CHARACTER(LEN=MPI_MAX_ERROR_STRING), INTENT(OUT) :: string + INTEGER, INTENT(OUT) :: resultlen + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETER +--------------- +* ``errorcode``: Error code returned by an MPI routine or an MPI error class. + +OUTPUT PARAMETERS +----------------- +* ``string``: Text that corresponds to the errorcode. +* ``resultlen``: Length of string. +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +Returns the error string associated with an error code or class. The +argument string must represent storage that is at least +MPI_MAX_ERROR_STRING characters long. + +The number of characters actually written is returned in the output +argument, resultlen. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. + + +.. seealso:: + :ref:`MPI_Error_class` diff --git a/docs/man-openmpi/man3/MPI_Exscan.3.rst b/docs/man-openmpi/man3/MPI_Exscan.3.rst new file mode 100644 index 00000000000..08b8a7f39bb --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Exscan.3.rst @@ -0,0 +1,186 @@ +.. _mpi_exscan: + + +MPI_Exscan +========== + +.. include_body + +:ref:`MPI_Exscan`, :ref:`MPI_Iexscan` - Computes an exclusive scan (partial +reduction) + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Exscan(const void *sendbuf, void *recvbuf, int count, + MPI_Datatype datatype, MPI_Op op, MPI_Comm comm) + + int MPI_Iexscan(const void *sendbuf, void *recvbuf, int count, + MPI_Datatype datatype, MPI_Op op, MPI_Comm comm, + MPI_Request *request) + + int MPI_Exscan_init(const void *sendbuf, void *recvbuf, int count, + MPI_Datatype datatype, MPI_Op op, MPI_Comm comm, + MPI_Info info, MPI_Request *request) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_EXSCAN(SENDBUF, RECVBUF, COUNT, DATATYPE, OP, COMM, IERROR) + SENDBUF(*), RECVBUF(*) + INTEGER COUNT, DATATYPE, OP, COMM, IERROR + + MPI_IEXSCAN(SENDBUF, RECVBUF, COUNT, DATATYPE, OP, COMM, REQUEST, IERROR) + SENDBUF(*), RECVBUF(*) + INTEGER COUNT, DATATYPE, OP, COMM, REQUEST, IERROR + + MPI_EXSCAN_INIT(SENDBUF, RECVBUF, COUNT, DATATYPE, OP, COMM, INFO, REQUEST, IERROR) + SENDBUF(*), RECVBUF(*) + INTEGER COUNT, DATATYPE, OP, COMM, INFO, REQUEST, IERROR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Exscan(sendbuf, recvbuf, count, datatype, op, comm, ierror) + TYPE(*), DIMENSION(..), INTENT(IN) :: sendbuf + TYPE(*), DIMENSION(..) :: recvbuf + INTEGER, INTENT(IN) :: count + TYPE(MPI_Datatype), INTENT(IN) :: datatype + TYPE(MPI_Op), INTENT(IN) :: op + TYPE(MPI_Comm), INTENT(IN) :: comm + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + MPI_Iexscan(sendbuf, recvbuf, count, datatype, op, comm, request, ierror) + TYPE(*), DIMENSION(..), INTENT(IN), ASYNCHRONOUS :: sendbuf + TYPE(*), DIMENSION(..), ASYNCHRONOUS :: recvbuf + INTEGER, INTENT(IN) :: count + TYPE(MPI_Datatype), INTENT(IN) :: datatype + TYPE(MPI_Op), INTENT(IN) :: op + TYPE(MPI_Comm), INTENT(IN) :: comm + TYPE(MPI_Request), INTENT(OUT) :: request + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + MPI_Exscan_init(sendbuf, recvbuf, count, datatype, op, comm, info, request, ierror) + TYPE(*), DIMENSION(..), INTENT(IN), ASYNCHRONOUS :: sendbuf + TYPE(*), DIMENSION(..), ASYNCHRONOUS :: recvbuf + INTEGER, INTENT(IN) :: count + TYPE(MPI_Datatype), INTENT(IN) :: datatype + TYPE(MPI_Op), INTENT(IN) :: op + TYPE(MPI_Comm), INTENT(IN) :: comm + TYPE(MPI_Info), INTENT(IN) :: info + TYPE(MPI_Request), INTENT(OUT) :: request + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETERS +---------------- +* ``sendbuf``: Send buffer (choice). +* ``count``: Number of elements in input buffer (integer). +* ``datatype``: Data type of elements of input buffer (handle). +* ``op``: Operation (handle). +* ``comm``: Communicator (handle). + +OUTPUT PARAMETERS +----------------- +* ``recvbuf``: Receive buffer (choice). +* ``request``: Request (handle, non-blocking only). +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +:ref:`MPI_Exscan` is used to perform an exclusive prefix reduction on data +distributed across the calling processes. The operation returns, in the +*recvbuf* of the process with rank i, the reduction (calculated +according to the function *op*) of the values in the *sendbuf*\ s of +processes with ranks 0, ..., i-1. Compare this with the functionality of +:ref:`MPI_Scan`, which calculates over the range 0, ..., i (inclusive). The +type of operations supported, their semantics, and the constraints on +send and receive buffers are as for :ref:`MPI_Reduce`. + +The value in *recvbuf* on process 0 is undefined and unreliable as +*recvbuf* is not significant for process 0. The value of *recvbuf* on +process 1 is always the value in *sendbuf* on process 0. + + +USE OF IN-PLACE OPTION +---------------------- + +The \`in place' option for intracommunicators is specified by passing +MPI_IN_PLACE in the *sendbuf* argument. In this case, the input data is +taken from the receive buffer, and replaced by the output data. + +Note that MPI_IN_PLACE is a special kind of value; it has the same +restrictions on its use as MPI_BOTTOM. + +Because the in-place option converts the receive buffer into a +send-and-receive buffer, a Fortran binding that includes INTENT must +mark these as INOUT, not OUT. + + +NOTES +----- + +MPI does not specify which process computes which operation. In +particular, both processes 0 and 1 may participate in the computation +even though the results for both processes' *recvbuf* are degenerate. +Therefore, all processes, including 0 and 1, must provide the same *op*. + +It can be argued, from a mathematical perspective, that the definition +of :ref:`MPI_Exscan` is unsatisfactory because the output at process 0 is +undefined. The "mathematically correct" output for process 0 would be +the unit element of the reduction operation. However, such a definition +of an exclusive scan would not work with user-defined *op* functions as +there is no way for MPI to "know" the unit value for these custom +operations. + + +NOTES ON COLLECTIVE OPERATIONS +------------------------------ + +The reduction functions of type MPI_Op do not return an error value. As +a result, if the functions detect an error, all they can do is either +call :ref:`MPI_Abort` or silently skip the problem. Thus, if the error handler +is changed from MPI_ERRORS_ARE_FATAL to something else (e.g., +MPI_ERRORS_RETURN), then no error may be indicated. + +The reason for this is the performance problems in ensuring that all +collective routines return the same error value. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. + +See the MPI man page for a full list of MPI error codes. + + +.. seealso:: + :ref:`MPI_Op_create` :ref:`MPI_Reduce` :ref:`MPI_Scan` diff --git a/docs/man-openmpi/man3/MPI_Exscan_init.3.rst b/docs/man-openmpi/man3/MPI_Exscan_init.3.rst new file mode 100644 index 00000000000..925d49cdd79 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Exscan_init.3.rst @@ -0,0 +1,9 @@ +.. _mpi_exscan_init: + +MPI_Exscan_init +=============== + .. include_body + +.. include:: ../man3/MPI_Exscan.3.rst + :start-after: .. include_body + diff --git a/docs/man-openmpi/man3/MPI_Fetch_and_op.3.rst b/docs/man-openmpi/man3/MPI_Fetch_and_op.3.rst new file mode 100644 index 00000000000..d148861993d --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Fetch_and_op.3.rst @@ -0,0 +1,140 @@ +.. _mpi_fetch_and_op: + + +MPI_Fetch_and_op +================ + +.. include_body + +:ref:`MPI_Fetch_and_op` - Combines the contents of the origin buffer with +that of a target buffer and returns the target buffer value. + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Fetch_and_op(const void *origin_addr, void *result_addr, + MPI_Datatype datatype, int target_rank, MPI_Aint target_disp, + MPI_Op op, MPI_Win win) + + +Fortran Syntax (see FORTRAN 77 NOTES) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_FETCH_AND_OP(ORIGIN_ADDR, RESULT_ADDR, DATATYPE, TARGET_RANK, + TARGET_DISP, OP, WIN, IERROR) + ORIGIN_ADDR, RESULT_ADDR(*) + INTEGER(KIND=MPI_ADDRESS_KIND) TARGET_DISP + INTEGER DATATYPE, TARGET_RANK, OP, WIN, IERROR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Fetch_and_op(origin_addr, result_addr, datatype, target_rank, + target_disp, op, win, ierror) + TYPE(*), DIMENSION(..), INTENT(IN) :: origin_addr + TYPE(*), DIMENSION(..) :: result_addr + TYPE(MPI_Datatype), INTENT(IN) :: datatype + INTEGER, INTENT(IN) :: target_rank + INTEGER(KIND=MPI_ADDRESS_KIND), INTENT(IN) :: target_disp + TYPE(MPI_Op), INTENET(IN) :: op + TYPE(MPI_Win), INTENT(IN) :: win + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETERS +---------------- +* ``origin_addr``: Initial address of buffer (choice). +* ``result_addr``: Initial address of result buffer (choice). +* ``datatype``: Data type of the entry in origin, result, and target buffers (handle). +* ``target_rank``: Rank of target (nonnegative integer). +* ``target_disp``: Displacement from start of window to beginning of target buffer (nonnegative integer). +* ``op``: Reduce operation (handle). +* ``win``: Window object (handle). + +OUTPUT PARAMETER +---------------- +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +Accumulate one element of type *datatype* from the origin buffer +(*origin_addr*) to the buffer at offset *target_disp*, in the target +window specified by *target_rank* and *win*, using the operation *op* +and return in the result buffer *result_addr* the contents of the target +buffer before the accumulation. + +The origin and result buffers (*origin_addr* and *result_addr*) must be +disjoint. Any of the predefined operations for MPI_Rreduce, as well +as MPI_NO_OP or MPI_REPLACE, can be specified as *op*; user-defined +functions cannot be used. The *datatype* argument must be a predefined +datatype. The operation is executed atomically. + +A new predefined operation, MPI_REPLACE, is defined. It corresponds to +the associative function f(a, b) =b; that is, the current value in the +target memory is replaced by the value supplied by the origin. + +A new predefined operation, MPI_NO_OP, is defined. It corresponds to the +assiciative function f(a, b) = a; that is the current value in the +target memory is returned in the result buffer at the origin and no +operation is performed on the target buffer. + + +FORTRAN 77 NOTES +---------------- + +The MPI standard prescribes portable Fortran syntax for the +*TARGET_DISP* argument only for Fortran 90. FORTRAN 77 users may use the +non-portable syntax + +:: + + INTEGER*MPI_ADDRESS_KIND TARGET_DISP + +where MPI_ADDRESS_KIND is a constant defined in mpif.h and gives the +length of the declared integer in bytes. + + +NOTES +----- + +It is the user's responsibility to guarantee that, when using the +accumulate functions, the target displacement argument is such that +accesses to the window are properly aligned according to the data type +arguments in the call to the :ref:`MPI_Fetch_and_op` function. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler +MPI_ERRORS_RETURN may be used to cause error values to be returned. Note +that MPI does not guarantee that an MPI program can continue past an +error. + + +.. seealso:: + :ref:`MPI_Get_accumulate` :ref:`MPI_Reduce` diff --git a/docs/man-openmpi/man3/MPI_File_c2f.3.rst b/docs/man-openmpi/man3/MPI_File_c2f.3.rst new file mode 100644 index 00000000000..a72208c98ec --- /dev/null +++ b/docs/man-openmpi/man3/MPI_File_c2f.3.rst @@ -0,0 +1,9 @@ +.. _mpi_file_c2f: + +MPI_File_c2f +============ + .. include_body + +.. include:: ../man3/MPI_Comm_f2c.3.rst + :start-after: .. include_body + diff --git a/docs/man-openmpi/man3/MPI_File_call_errhandler.3.rst b/docs/man-openmpi/man3/MPI_File_call_errhandler.3.rst new file mode 100644 index 00000000000..e16de736dd7 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_File_call_errhandler.3.rst @@ -0,0 +1,81 @@ +.. _mpi_file_call_errhandler: + + +MPI_File_call_errhandler +======================== + +.. include_body + +:ref:`MPI_File_call_errhandler` - Passes the supplied error code to the +error handler assigned to a file + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_File_call_errhandler(MPI_File fh, int errorcode) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_FILE_CALL_ERRHANDLER(FH, ERRORCODE, IERROR) + INTEGER FH, IERRORCODE, IERROR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_File_call_errhandler(fh, errorcode, ierror) + TYPE(MPI_File), INTENT(IN) :: fh + INTEGER, INTENT(IN) :: errorcode + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETERS +---------------- +* ``fh``: file with error handler (handle). +* ``errorcode``: MPI error code (integer). + +OUTPUT PARAMETER +---------------- +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +This function invokes the error handler assigned to the file handle *fh* +with the supplied error code *errorcode*. If the error handler was +successfully called, the process is not aborted, and the error handler +returns, this function returns MPI_SUCCESS. + +Unlike errors on communicators and windows, the default errorhandler for +files is MPI_ERRORS_RETURN. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +See the MPI man page for a full list of MPI error codes. + + +.. seealso:: + :ref:`MPI_File_create_errhandler` :ref:`MPI_File_set_errhandler` diff --git a/docs/man-openmpi/man3/MPI_File_close.3.rst b/docs/man-openmpi/man3/MPI_File_close.3.rst new file mode 100644 index 00000000000..f1f74ccc6da --- /dev/null +++ b/docs/man-openmpi/man3/MPI_File_close.3.rst @@ -0,0 +1,77 @@ +.. _mpi_file_close: + + +MPI_File_close +============== + +.. include_body + +:ref:`MPI_File_close` - Closes a file (collective). + + +SYNTAX +------ + + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_File_close(MPI_File *fh) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_FILE_CLOSE(FH, IERROR) + INTEGER FH, IERROR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_File_close(fh, ierror) + TYPE(MPI_File), INTENT(INOUT) :: fh + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT/OUTPUT PARAMETER +---------------------- +* ``fh``: File handle (handle). + +OUTPUT PARAMETER +---------------- +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +:ref:`MPI_File_close` first synchronizes file state, then closes the file +associated with *fh.* :ref:`MPI_File_close` is a collective routine. The user +is responsible for ensuring that all outstanding requests associated +with *fh* have completed before calling :ref:`MPI_File_close`. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. For MPI I/O function errors, the default error handler is set to +MPI_ERRORS_RETURN. The error handler may be changed with +:ref:`MPI_File_set_errhandler`; the predefined error handler +MPI_ERRORS_ARE_FATAL may be used to make I/O errors fatal. Note that MPI +does not guarantee that an MPI program can continue past an error. diff --git a/docs/man-openmpi/man3/MPI_File_create_errhandler.3.rst b/docs/man-openmpi/man3/MPI_File_create_errhandler.3.rst new file mode 100644 index 00000000000..98d35aa701e --- /dev/null +++ b/docs/man-openmpi/man3/MPI_File_create_errhandler.3.rst @@ -0,0 +1,106 @@ +.. _mpi_file_create_errhandler: + + +MPI_File_create_errhandler +========================== + +.. include_body + +:ref:`MPI_File_create_errhandler` - Creates an MPI-style error handler that +can be attached to a file. + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_File_create_errhandler(MPI_File_errhandler_function *function, + MPI_Errhandler *errhandler) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_FILE_CREATE_ERRHANDLER(FUNCTION, ERRHANDLER, IERROR) + EXTERNAL FUNCTION + INTEGER ERRHANDLER, IERROR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_File_create_errhandler(file_errhandler_fn, errhandler, ierror) + PROCEDURE(MPI_File_errhandler_function) :: file_errhandler_fn + TYPE(MPI_Errhandler), INTENT(OUT) :: errhandler + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +DEPRECATED TYPE NAME NOTE +------------------------- + +MPI-2.2 deprecated the MPI_File_errhandler_fn and +MPI::file::Errhandler_fn types in favor of MPI_File_errhandler_function +and MPI::File::Errhandler_function, respectively. Open MPI supports both +names (indeed, the \_fn names are typedefs to the \_function names). + + +INPUT PARAMETER +--------------- +* ``function``: User-defined error handling procedure (function). + +OUTPUT PARAMETERS +----------------- +* ``errhandler``: MPI error handler (handle). +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +Registers the user routine *function* for use as an MPI error handler. +Returns in errhandler a handle to the registered error handler. + +In the C language, the user routine *function* should be a C function of +type MPI_File_errhandler_function, which is defined as + +:: + + typedef void (MPI_File_errhandler_function)(MPI_File *, int *, + ...); + +The first argument to *function* is the file in use. The second is the +error code to be returned by the MPI routine that raised the error. + +In the Fortran language, the user routine should be of the form: + +.. code-block:: fortran + + SUBROUTINE FILE_ERRHANDLER_FUNCTION(FILE, ERROR_CODE, ...) + INTEGER FILE, ERROR_CODE + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. For MPI I/O function errors, the default error handler is set to +MPI_ERRORS_RETURN. The error handler may be changed with +:ref:`MPI_File_set_errhandler`; the predefined error handler +MPI_ERRORS_ARE_FATAL may be used to make I/O errors fatal. Note that MPI +does not guarantee that an MPI program can continue past an error. diff --git a/docs/man-openmpi/man3/MPI_File_delete.3.rst b/docs/man-openmpi/man3/MPI_File_delete.3.rst new file mode 100644 index 00000000000..a4840d0bde4 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_File_delete.3.rst @@ -0,0 +1,81 @@ +.. _mpi_file_delete: + + +MPI_File_delete +=============== + +.. include_body + +:ref:`MPI_File_delete` - Deletes a file. + + +SYNTAX +------ + + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_File_delete(const char *filename, MPI_Info info) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_FILE_DELETE(FILENAME, INFO, IERROR) + CHARACTER*(*) FILENAME + INTEGER INFO, IERROR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_File_delete(filename, info, ierror) + CHARACTER(LEN=*), INTENT(IN) :: filename + TYPE(MPI_Info), INTENT(IN) :: info + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETERS +---------------- +* ``filename``: Name of file to delete (string). +* ``info``: Info object (handle). + +OUTPUT PARAMETER +---------------- +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +:ref:`MPI_File_delete` deletes the file identified by the file name *filename*, +provided it is not currently open by any process. It is an error to +delete the file with :ref:`MPI_File_delete` if some process has it open, but +:ref:`MPI_File_delete` does not check this. If the file does not exist, +:ref:`MPI_File_delete` returns an error in the class MPI_ERR_NO_SUCH_FILE. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. For MPI I/O function errors, the default error handler is set to +MPI_ERRORS_RETURN. The error handler may be changed with +:ref:`MPI_File_set_errhandler`; the predefined error handler +MPI_ERRORS_ARE_FATAL may be used to make I/O errors fatal. Note that MPI +does not guarantee that an MPI program can continue past an error. diff --git a/docs/man-openmpi/man3/MPI_File_f2c.3.rst b/docs/man-openmpi/man3/MPI_File_f2c.3.rst new file mode 100644 index 00000000000..33410d1c0c3 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_File_f2c.3.rst @@ -0,0 +1,9 @@ +.. _mpi_file_f2c: + +MPI_File_f2c +============ + .. include_body + +.. include:: ../man3/MPI_Comm_f2c.3.rst + :start-after: .. include_body + diff --git a/docs/man-openmpi/man3/MPI_File_get_amode.3.rst b/docs/man-openmpi/man3/MPI_File_get_amode.3.rst new file mode 100644 index 00000000000..e8734df4bca --- /dev/null +++ b/docs/man-openmpi/man3/MPI_File_get_amode.3.rst @@ -0,0 +1,78 @@ +.. _mpi_file_get_amode: + + +MPI_File_get_amode +================== + +.. include_body + +:ref:`MPI_File_get_amode` - Returns access mode associated with an open +file. + + +SYNTAX +------ + + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_File_get_amode(MPI_File fh, int *amode) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_FILE_GET_AMODE(FH, AMODE, IERROR) + INTEGER FH, AMODE, IERROR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_File_get_amode(fh, amode, ierror) + TYPE(MPI_File), INTENT(IN) :: fh + INTEGER, INTENT(OUT) :: amode + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETER +--------------- +* ``fh``: File handle (handle). + +OUTPUT PARAMETERS +----------------- +* ``amode``: File access mode used to open the file (integer). +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +:ref:`MPI_File_get_amode` returns, in *amode,* the access mode associated with +the open file *fh.* + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. For MPI I/O function errors, the default error handler is set to +MPI_ERRORS_RETURN. The error handler may be changed with +:ref:`MPI_File_set_errhandler`; the predefined error handler +MPI_ERRORS_ARE_FATAL may be used to make I/O errors fatal. Note that MPI +does not guarantee that an MPI program can continue past an error. diff --git a/docs/man-openmpi/man3/MPI_File_get_atomicity.3.rst b/docs/man-openmpi/man3/MPI_File_get_atomicity.3.rst new file mode 100644 index 00000000000..579eb951660 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_File_get_atomicity.3.rst @@ -0,0 +1,81 @@ +.. _mpi_file_get_atomicity: + + +MPI_File_get_atomicity +====================== + +.. include_body + +:ref:`MPI_File_get_atomicity` - Returns current consistency semantics for +data-access operations. + + +SYNTAX +------ + + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_File_get_atomicity(MPI_File fh, int *flag) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_FILE_GET_ATOMICITY(FH, FLAG, IERROR) + INTEGER FH, IERROR + LOGICAL FLAG + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_File_get_atomicity(fh, flag, ierror) + TYPE(MPI_File), INTENT(IN) :: fh + LOGICAL, INTENT(OUT) :: flag + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETER +--------------- +* ``fh``: File handle (handle). + +OUTPUT PARAMETER +---------------- +* ``flag``: true if atomic mode is enabled, false if nonatomic mode is enabled (boolean). +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +:ref:`MPI_File_get_atomicity` returns the current consistency semantics for +data access operations on the set of file handles created by one +collective :ref:`MPI_File_open`. If *flag* is *true,* atomic mode is currently +enabled; if *flag* is *false,* nonatomic mode is currently enabled. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. For MPI I/O function errors, the default error handler is set to +MPI_ERRORS_RETURN. The error handler may be changed with +:ref:`MPI_File_set_errhandler`; the predefined error handler +MPI_ERRORS_ARE_FATAL may be used to make I/O errors fatal. Note that MPI +does not guarantee that an MPI program can continue past an error. diff --git a/docs/man-openmpi/man3/MPI_File_get_byte_offset.3.rst b/docs/man-openmpi/man3/MPI_File_get_byte_offset.3.rst new file mode 100644 index 00000000000..844079e6472 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_File_get_byte_offset.3.rst @@ -0,0 +1,101 @@ +.. _mpi_file_get_byte_offset: + + +MPI_File_get_byte_offset +======================== + +.. include_body + +:ref:`MPI_File_get_byte_offset` - Converts a view-relative offset into an +absolute byte position. + + +SYNTAX +------ + + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_File_get_byte_offset(MPI_File fh, MPI_Offset offset, + MPI_Offset *disp) + + +Fortran Syntax (see FORTRAN 77 NOTES) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_FILE_GET_BYTE_OFFSET(FH, OFFSET, DISP, IERROR) + INTEGER FH, IERROR + INTEGER(KIND=MPI_OFFSET_KIND) OFFSET, DISP + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_File_get_byte_offset(fh, offset, disp, ierror) + TYPE(MPI_File), INTENT(IN) :: fh + INTEGER(KIND=MPI_OFFSET_KIND), INTENT(IN) :: offset + INTEGER(KIND=MPI_OFFSET_KIND), INTENT(OUT) :: disp + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETERS +---------------- +* ``fh``: File handle (handle). +* ``offset``: Offset (integer). + +OUTPUT PARAMETERS +----------------- +* ``disp``: Absolute byte position of offset (integer). +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +:ref:`MPI_File_get_byte_offset` converts an offset specified for the current +view to its corresponding displacement value, or absolute byte position, +from the beginning of the file. The absolute byte position of *offset* +relative to the current view of *fh* is returned in *disp*. + + +FORTRAN 77 NOTES +---------------- + +The MPI standard prescribes portable Fortran syntax for the *OFFSET* and +*DISP* arguments only for Fortran 90. Sun FORTRAN 77 users may use the +non-portable syntax + +:: + + INTEGER*MPI_OFFSET_KIND OFFSET + or + INTEGER*MPI_OFFSET_KIND DISP + +where MPI_OFFSET_KIND is a constant defined in mpif.h and gives the +length of the declared integer in bytes. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. For MPI I/O function errors, the default error handler is set to +MPI_ERRORS_RETURN. The error handler may be changed with +:ref:`MPI_File_set_errhandler`; the predefined error handler +MPI_ERRORS_ARE_FATAL may be used to make I/O errors fatal. Note that MPI +does not guarantee that an MPI program can continue past an error. diff --git a/docs/man-openmpi/man3/MPI_File_get_errhandler.3.rst b/docs/man-openmpi/man3/MPI_File_get_errhandler.3.rst new file mode 100644 index 00000000000..1bda7df7167 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_File_get_errhandler.3.rst @@ -0,0 +1,77 @@ +.. _mpi_file_get_errhandler: + + +MPI_File_get_errhandler +======================= + +.. include_body + +:ref:`MPI_File_get_errhandler` - Gets the error handler for a file. + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_File_get_errhandler(MPI_File file, MPI_Errhandler + *errhandler) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_FILE_GET_ERRHANDLER(FILE, ERRHANDLER, IERROR) + INTEGER FILE, ERRHANDLER, IERROR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_File_get_errhandler(file, errhandler, ierror) + TYPE(MPI_File), INTENT(IN) :: file + TYPE(MPI_Errhandler), INTENT(OUT) :: errhandler + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETER +--------------- +* ``file``: File (handle). + +OUTPUT PARAMETERS +----------------- +* ``errhandler``: MPI error handler currently associated with file (handle). +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +Returns in *errhandler* (a handle to) the error handler that is +currently associated with file *file*. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. For MPI I/O function errors, the default error handler is set to +MPI_ERRORS_RETURN. The error handler may be changed with +:ref:`MPI_File_set_errhandler`; the predefined error handler +MPI_ERRORS_ARE_FATAL may be used to make I/O errors fatal. Note that MPI +does not guarantee that an MPI program can continue past an error. diff --git a/docs/man-openmpi/man3/MPI_File_get_group.3.rst b/docs/man-openmpi/man3/MPI_File_get_group.3.rst new file mode 100644 index 00000000000..f774af6785b --- /dev/null +++ b/docs/man-openmpi/man3/MPI_File_get_group.3.rst @@ -0,0 +1,80 @@ +.. _mpi_file_get_group: + + +MPI_File_get_group +================== + +.. include_body + +:ref:`MPI_File_get_group` - Returns a duplicate of the process group of a +file. + + +SYNTAX +------ + + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_File_get_group(MPI_File fh, MPI_Group *group) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_FILE_GET_GROUP(FH, GROUP, IERROR) + INTEGER FH, GROUP, IERROR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_File_get_group(fh, group, ierror) + TYPE(MPI_File), INTENT(IN) :: fh + TYPE(MPI_Group), INTENT(OUT) :: group + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETER +--------------- +* ``fh``: File handle (handle). + +OUTPUT PARAMETERS +----------------- +* ``group``: Group that opened the file (handle). +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +:ref:`MPI_File_get_group` returns a duplicate of the group of the communicator +used to open the file associated with *fh.* The group is returned in +*group.* The user is responsible for freeing *group,* using +:ref:`MPI_Group_free`. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. For MPI I/O function errors, the default error handler is set to +MPI_ERRORS_RETURN. The error handler may be changed with +:ref:`MPI_File_set_errhandler`; the predefined error handler +MPI_ERRORS_ARE_FATAL may be used to make I/O errors fatal. Note that MPI +does not guarantee that an MPI program can continue past an error. diff --git a/docs/man-openmpi/man3/MPI_File_get_info.3.rst b/docs/man-openmpi/man3/MPI_File_get_info.3.rst new file mode 100644 index 00000000000..474673c0cf5 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_File_get_info.3.rst @@ -0,0 +1,153 @@ +.. _mpi_file_get_info: + + +MPI_File_get_info +================= + +.. include_body + +:ref:`MPI_File_get_info` - Returns a new info object containing values for +current hints associated with a file. + + +SYNTAX +------ + + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_File_get_info(MPI_File fh, MPI_Info *info_used) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_FILE_GET_INFO(FH, INFO_USED, IERROR) + INTEGER FH, INFO_USED, IERROR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_File_get_info(fh, info_used, ierror) + TYPE(MPI_File), INTENT(IN) :: fh + TYPE(MPI_Info), INTENT(OUT) :: info_used + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETER +--------------- +* ``fh``: File handle (handle). + +OUTPUT PARAMETERS +----------------- +* ``info_used``: New info object (handle). +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +:ref:`MPI_File_get_info` returns a new info object containing all the hints +that the system currently associates with the file *fh*. The current +setting of all hints actually used by the system related to this open +file is returned in *info_used*. The user is responsible for freeing +*info_used* via :ref:`MPI_Info_free`. + +Note that the set of hints returned in *info_used* may be greater or +smaller than the set of hints passed in to :ref:`MPI_File_open`, +:ref:`MPI_File_set_view`, and :ref:`MPI_File_set_info`, as the system +may not recognize some hints set by the user, and may automatically +set other hints that the user has not requested to be set. See the +:ref:`HINTS section ` for a list of +hints that can be set. + +.. _man-openmpi-mpi-file-get-info: + + +HINTS +----- + +The following hints can be used as values for the *info_used* argument. + +**SETTABLE HINTS** + +* ``shared_file_timeout``: Amount of time (in seconds) to wait for + access to the shared file pointer before exiting with + ``MPI_ERR_TIMEDOUT``. + +* ``rwlock_timeout``: Amount of time (in seconds) to wait for + obtaining a read or write lock on a contiguous chunk of a UNIX file + before exiting with ``MPI_ERR_TIMEDOUT``. + +* ``noncoll_read_bufsize``: Maximum size of the buffer used by MPI I/O + to satisfy read requests in the noncollective data-access + routines. + + .. note:: A buffer size smaller than the distance (in bytes) in a + UNIX file between the first byte and the last byte of the + access request causes MPI I/O to iterate and perform + multiple UNIX ``read()`` or ``write()`` calls. If the + request includes multiple noncontiguous chunks of data, + and the buffer size is greater than the size of those + chunks, then the UNIX ``read()`` or ``write()`` (made at + the MPI I/O level) will access data not requested by this + process in order to reduce the total number of ``write()`` + calls made. If this is not desirable behavior, you should + reduce this buffer size to equal the size of the + contiguous chunks within the aggregate request. + +* ``noncoll_write_bufsize``: Maximum size of the buffer used by MPI + I/O to satisfy write requests in the noncollective data-access + routines. + + See the above note in ``noncoll_read_bufsize``. + +* ``coll_read_bufsize``: Maximum size of the buffer used by MPI I/O to + satisfy read requests in the collective data-access routines. + + See the above note in ``noncoll_read_bufsize``. + +* ``coll_write_bufsize``: Maximum size of the buffer used by MPI I/O + to satisfy write requests in the collective data-access + routines. + + See the above note in ``noncoll_read_bufsize``. + +* ``mpiio_concurrency``: (boolean) controls whether nonblocking + I/O routines can bind an extra thread to an LWP. + +* ``mpiio_coll_contiguous``: (boolean) controls whether subsequent + collective data accesses will request collectively contiguous + regions of the file. + +**NON-SETTABLE HINTS** + +* ``filename``: Access this hint to get the name of the file. + + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. For MPI I/O function errors, the default error handler is set to +MPI_ERRORS_RETURN. The error handler may be changed with +:ref:`MPI_File_set_errhandler`; the predefined error handler +MPI_ERRORS_ARE_FATAL may be used to make I/O errors fatal. Note that MPI +does not guarantee that an MPI program can continue past an error. diff --git a/docs/man-openmpi/man3/MPI_File_get_position.3.rst b/docs/man-openmpi/man3/MPI_File_get_position.3.rst new file mode 100644 index 00000000000..c0338706ff2 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_File_get_position.3.rst @@ -0,0 +1,95 @@ +.. _mpi_file_get_position: + + +MPI_File_get_position +===================== + +.. include_body + +:ref:`MPI_File_get_position` - Returns the current position of the +individual file pointer. + + +SYNTAX +------ + + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_File_get_position(MPI_File fh, MPI_Offset *offset) + + +Fortran Syntax (see FORTRAN 77 NOTES) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_FILE_GET_POSITION(FH, OFFSET, IERROR) + INTEGER FH, IERROR + INTEGER(KIND=MPI_OFFSET_KIND) OFFSET + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_File_get_position(fh, offset, ierror) + TYPE(MPI_File), INTENT(IN) :: fh + INTEGER(KIND=MPI_OFFSET_KIND), INTENT(OUT) :: offset + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETER +--------------- +* ``fh``: File handle (handle). + +OUTPUT PARAMETERS +----------------- +* ``offset``: Offset of the individual file pointer (integer). +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +:ref:`MPI_File_get_position` returns, in *offset,* the current position of the +individual file pointer in *etype* units relative to the current +displacement and file type. + + +FORTRAN 77 NOTES +---------------- + +The MPI standard prescribes portable Fortran syntax for the *OFFSET* +argument only for Fortran 90. Sun FORTRAN 77 users may use the +non-portable syntax + +:: + + INTEGER*MPI_OFFSET_KIND OFFSET + +where MPI_ADDRESS_KIND is a constant defined in mpif.h and gives the +length of the declared integer in bytes. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. For MPI I/O function errors, the default error handler is set to +MPI_ERRORS_RETURN. The error handler may be changed with +:ref:`MPI_File_set_errhandler`; the predefined error handler +MPI_ERRORS_ARE_FATAL may be used to make I/O errors fatal. Note that MPI +does not guarantee that an MPI program can continue past an error. diff --git a/docs/man-openmpi/man3/MPI_File_get_position_shared.3.rst b/docs/man-openmpi/man3/MPI_File_get_position_shared.3.rst new file mode 100644 index 00000000000..7f4578401b6 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_File_get_position_shared.3.rst @@ -0,0 +1,95 @@ +.. _mpi_file_get_position_shared: + + +MPI_File_get_position_shared +============================ + +.. include_body + +:ref:`MPI_File_get_position_shared` - Returns the current position of the +shared file pointer. + + +SYNTAX +------ + + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_File_get_position_shared(MPI_File fh, MPI_Offset *offset) + + +Fortran Syntax (see FORTRAN 77 NOTES) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_FILE_GET_POSITION_SHARED(FH, OFFSET, IERROR) + INTEGER FH, IERROR + INTEGER(KIND=MPI_OFFSET_KIND) OFFSET + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_File_get_position_shared(fh, offset, ierror) + TYPE(MPI_File), INTENT(IN) :: fh + INTEGER(KIND=MPI_OFFSET_KIND), INTENT(OUT) :: offset + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETER +--------------- +* ``fh``: File handle (handle). + +OUTPUT PARAMETERS +----------------- +* ``offset``: Offset of the shared file pointer (integer). +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +:ref:`MPI_File_get_position_shared` returns, in *offset,* the current position +of the shared file pointer in *etype* units relative to the current +displacement and file type. + + +FORTRAN 77 NOTES +---------------- + +The MPI standard prescribes portable Fortran syntax for the *OFFSET* +argument only for Fortran 90. Sun FORTRAN 77 users may use the +non-portable syntax + +:: + + INTEGER*MPI_OFFSET_KIND OFFSET + +where MPI_ADDRESS_KIND is a constant defined in mpif.h and gives the +length of the declared integer in bytes. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. For MPI I/O function errors, the default error handler is set to +MPI_ERRORS_RETURN. The error handler may be changed with +:ref:`MPI_File_set_errhandler`; the predefined error handler +MPI_ERRORS_ARE_FATAL may be used to make I/O errors fatal. Note that MPI +does not guarantee that an MPI program can continue past an error. diff --git a/docs/man-openmpi/man3/MPI_File_get_size.3.rst b/docs/man-openmpi/man3/MPI_File_get_size.3.rst new file mode 100644 index 00000000000..90117a28e11 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_File_get_size.3.rst @@ -0,0 +1,100 @@ +.. _mpi_file_get_size: + + +MPI_File_get_size +================= + +.. include_body + +:ref:`MPI_File_get_size` - Returns the current size of the file. + + +SYNTAX +------ + + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_File_get_size(MPI_File fh, MPI_Offset *size) + + +Fortran Syntax (see FORTRAN 77 NOTES) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_FILE_GET_SIZE(FH, SIZE, IERROR) + INTEGER FH, ERROR + INTEGER(KIND=MPI_OFFSET_KIND) SIZE + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_File_get_size(fh, size, ierror) + TYPE(MPI_File), INTENT(IN) :: fh + INTEGER(KIND=MPI_OFFSET_KIND), INTENT(OUT) :: size + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETERS +---------------- +* ``fh``: File handle (handle). +* ``size``: Size of the file in bytes (integer). + +OUTPUT PARAMETER +---------------- +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +:ref:`MPI_File_get_size` returns, in *size* , the current size in bytes of the +file associated with the file handle *fh*. Note that the file size +returned by Solaris may not represent the number of bytes physically +allocated for the file in those cases where all bytes in this file have +not been written at least once. + + +FORTRAN 77 NOTES +---------------- + +The MPI standard prescribes portable Fortran syntax for the *SIZE* +argument only for Fortran 90. Sun FORTRAN 77 users may use the +non-portable syntax + +:: + + INTEGER*MPI_OFFSET_KIND SIZE + +where MPI_ADDRESS_KIND is a constant defined in mpif.h and gives the +length of the declared integer in bytes. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. For MPI I/O function errors, the default error handler is set to +MPI_ERRORS_RETURN. The error handler may be changed with +:ref:`MPI_File_set_errhandler`; the predefined error handler +MPI_ERRORS_ARE_FATAL may be used to make I/O errors fatal. Note that MPI +does not guarantee that an MPI program can continue past an error. + + +.. seealso:: + :ref:`MPI_File_preallocate` diff --git a/docs/man-openmpi/man3/MPI_File_get_type_extent.3.rst b/docs/man-openmpi/man3/MPI_File_get_type_extent.3.rst new file mode 100644 index 00000000000..713566eab64 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_File_get_type_extent.3.rst @@ -0,0 +1,118 @@ +.. _mpi_file_get_type_extent: + + +MPI_File_get_type_extent +======================== + +.. include_body + +:ref:`MPI_File_get_type_extent` - Returns the extent of the data type in a +file. + + +SYNTAX +------ + + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_File_get_type_extent(MPI_File fh, MPI_Datatype + datatype, MPI_Aint *extent) + + +Fortran Syntax (see FORTRAN 77 NOTES) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_FILE_GET_TYPE_EXTENT(FH, DATATYPE, EXTENT, IERROR) + INTEGER FH, DATATYPE, IERROR + INTEGER(KIND=MPI_ADDRESS_KIND) EXTENT + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_File_get_type_extent(fh, datatype, extent, ierror) + TYPE(MPI_File), INTENT(IN) :: fh + TYPE(MPI_Datatype), INTENT(IN) :: datatype + INTEGER(KIND=MPI_ADDRESS_KIND), INTENT(OUT) :: extent + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETERS +---------------- +* ``fh``: File handle (handle). +* ``datatype``: Data type (handle). + +OUTPUT PARAMETERS +----------------- +* ``extent``: Data type extent (integer). +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +:ref:`MPI_File_get_type_extent` can be used to calculate *extent* for +*datatype* in the file. The extent is the same for all processes +accessing the file associated with *fh*. If the current view uses a +user-defined data representation, :ref:`MPI_File_get_type_extent` uses the +*dtype_file_extent_fn* callback to calculate the extent. + + +FORTRAN 77 NOTES +---------------- + +The MPI standard prescribes portable Fortran syntax for the *EXTENT* +argument only for Fortran 90. FORTRAN 77 users may use the non-portable +syntax + +:: + + INTEGER*MPI_ADDRESS_KIND EXTENT + +where MPI_ADDRESS_KIND is a constant defined in mpif.h and gives the +length of the declared integer in bytes. + + +NOTES +----- + +If the file data representation is other than "native," care must be +taken in constructing etypes and file types. Any of the data-type +constructor functions may be used; however, for those functions that +accept displacements in bytes, the displacements must be specified in +terms of their values in the file for the file data representation being +used. MPI will interpret these byte displacements as is; no scaling will +be done. The function :ref:`MPI_File_get_type_extent` can be used to calculate +the extents of data types in the file. For etypes and file types that +are portable data types, MPI will scale any displacements in the data +types to match the file data representation. Data types passed as +arguments to read/write routines specify the data layout in memory; +therefore, they must always be constructed using displacements +corresponding to displacements in memory. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. For MPI I/O function errors, the default error handler is set to +MPI_ERRORS_RETURN. The error handler may be changed with +:ref:`MPI_File_set_errhandler`; the predefined error handler +MPI_ERRORS_ARE_FATAL may be used to make I/O errors fatal. Note that MPI +does not guarantee that an MPI program can continue past an error. diff --git a/docs/man-openmpi/man3/MPI_File_get_view.3.rst b/docs/man-openmpi/man3/MPI_File_get_view.3.rst new file mode 100644 index 00000000000..e852416546d --- /dev/null +++ b/docs/man-openmpi/man3/MPI_File_get_view.3.rst @@ -0,0 +1,106 @@ +.. _mpi_file_get_view: + + +MPI_File_get_view +================= + +.. include_body + +:ref:`MPI_File_get_view` - Returns the process's view of data in the file. + + +SYNTAX +------ + + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_File_get_view(MPI_File fh, MPI_Offset *disp, + MPI_Datatype *etype, MPI_Datatype *filetype, + char *datarep) + + +Fortran Syntax (see FORTRAN 77 NOTES) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_FILE_GET_VIEW(FH, DISP, ETYPE, + FILETYPE, DATAREP, IERROR) + INTEGER FH, ETYPE, FILETYPE, IERROR + CHARACTER*(*) DATAREP + INTEGER(KIND=MPI_OFFSET_KIND) DISP + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_File_get_view(fh, disp, etype, filetype, datarep, ierror) + TYPE(MPI_File), INTENT(IN) :: fh + INTEGER(KIND=MPI_OFFSET_KIND), INTENT(OUT) :: disp + TYPE(MPI_Datatype), INTENT(OUT) :: etype, filetype + CHARACTER(LEN=*), INTENT(OUT) :: datarep + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETER +--------------- +* ``fh``: File handle (handle). + +OUTPUT PARAMETERS +----------------- +* ``disp``: Displacement (integer). +* ``etype``: Elementary data type (handle). +* ``filetype``: File type (handle). See Restrictions, below. +* ``datarep``: Data representation (string). +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +The :ref:`MPI_File_get_view` routine returns the process's view of the data in +the file. The current values of the displacement, etype, and filetype +are returned in *disp,* *etype,* and *filetype,* respectively. + +The :ref:`MPI_File_get_view` interface allows the user to pass a +data-representation string via the *datarep* argument. + + +FORTRAN 77 NOTES +---------------- + +The MPI standard prescribes portable Fortran syntax for the *DISP* +argument only for Fortran 90. FORTRAN 77 users may use the non-portable +syntax. + +:: + + INTEGER*MPI_OFFSET_KIND DISP + +where MPI_OFFSET_KIND is a constant defined in mpif.h and gives the +length of the declared integer in bytes. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. For MPI I/O function errors, the default error handler is set to +MPI_ERRORS_RETURN. The error handler may be changed with +:ref:`MPI_File_set_errhandler`; the predefined error handler +MPI_ERRORS_ARE_FATAL may be used to make I/O errors fatal. Note that MPI +does not guarantee that an MPI program can continue past an error. diff --git a/docs/man-openmpi/man3/MPI_File_iread.3.rst b/docs/man-openmpi/man3/MPI_File_iread.3.rst new file mode 100644 index 00000000000..c434ffb4d54 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_File_iread.3.rst @@ -0,0 +1,96 @@ +.. _mpi_file_iread: + + +MPI_File_iread +============== + +.. include_body + +:ref:`MPI_File_iread` - Reads a file starting at the location specified by +the individual file pointer (nonblocking, noncollective). + + +SYNTAX +------ + + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_File_iread(MPI_File fh, void *buf, int count, + MPI_Datatype datatype, MPI_Request *request) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_FILE_IREAD(FH, BUF, COUNT, DATATYPE, REQUEST, IERROR) + BUF(*) + INTEGER FH, COUNT, DATATYPE, REQUEST, IERROR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_File_iread(fh, buf, count, datatype, request, ierror) + TYPE(MPI_File), INTENT(IN) :: fh + TYPE(*), DIMENSION(..), ASYNCHRONOUS :: buf + INTEGER, INTENT(IN) :: count + TYPE(MPI_Datatype), INTENT(IN) :: datatype + TYPE(MPI_Request), INTENT(OUT) :: request + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT/OUTPUT PARAMETER +---------------------- +* ``fh``: File handle (handle). + +INPUT PARAMETERS +---------------- +* ``count``: Number of elements in the buffer (integer). +* ``datatype``: Data type of each buffer element (handle). + +OUTPUT PARAMETERS +----------------- +* ``buf``: Initial address of buffer (choice). +* ``request``: Request object (handle). +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +:ref:`MPI_File_iread` is a nonblocking version of :ref:`MPI_File_read`. It attempts to +read from the file associated with *fh* at the current individual file +pointer position maintained by the system in which a total number of +*count* data items having *datatype* type are read into the user's +buffer *buf.* The data is taken out of those parts of the file specified +by the current view. :ref:`MPI_File_iread` stores the number of data-type +elements actually read in *status.* All other fields of *status* are +undefined. It is erroneous to call this function if MPI_MODE_SEQUENTIAL +mode was specified when the file was opened. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. For MPI I/O function errors, the default error handler is set to +MPI_ERRORS_RETURN. The error handler may be changed with +:ref:`MPI_File_set_errhandler`; the predefined error handler +MPI_ERRORS_ARE_FATAL may be used to make I/O errors fatal. Note that MPI +does not guarantee that an MPI program can continue past an error. diff --git a/docs/man-openmpi/man3/MPI_File_iread_all.3.rst b/docs/man-openmpi/man3/MPI_File_iread_all.3.rst new file mode 100644 index 00000000000..a592df4a8e4 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_File_iread_all.3.rst @@ -0,0 +1,96 @@ +.. _mpi_file_iread_all: + + +MPI_File_iread_all +================== + +.. include_body + +:ref:`MPI_File_iread_all` - Reads a file starting at the location specified +by the individual file pointer (nonblocking, collective). + + +SYNTAX +------ + + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_File_iread_all(MPI_File fh, void *buf, int count, + MPI_Datatype datatype, MPI_Request *request) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_FILE_IREAD_ALL(FH, BUF, COUNT, DATATYPE, REQUEST, IERROR) + BUF(*) + INTEGER FH, COUNT, DATATYPE, REQUEST, IERROR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_File_iread_all(fh, buf, count, datatype, request, ierror) + TYPE(MPI_File), INTENT(IN) :: fh + TYPE(*), DIMENSION(..) :: buf + INTEGER, INTENT(IN) :: count + TYPE(MPI_Datatype), INTENT(IN) :: datatype + TYPE(MPI_Request), INTENT(OUT) :: request + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT/OUTPUT PARAMETER +---------------------- +* ``fh``: File handle (handle). + +INPUT PARAMETERS +---------------- +* ``count``: Number of elements in the buffer (integer). +* ``datatype``: Data type of each buffer element (handle). + +OUTPUT PARAMETERS +----------------- +* ``buf``: Initial address of buffer (choice). +* ``request``: Request object (handle). +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +:ref:`MPI_File_iread_all` is a nonblocking version of :ref:`MPI_File_read_all`. It +attempts to read from the file associated with *fh* at the current +individual file pointer position maintained by the system in which a +total number of *count* data items having *datatype* type are read into +the user's buffer *buf.* The data is taken out of those parts of the +file specified by the current view. :ref:`MPI_File_iread_all` stores the number +of data-type elements actually read in *status.* All other fields of +*status* are undefined. It is erroneous to call this function if +MPI_MODE_SEQUENTIAL mode was specified when the file was opened. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. For MPI I/O function errors, the default error handler is set to +MPI_ERRORS_RETURN. The error handler may be changed with +:ref:`MPI_File_set_errhandler`; the predefined error handler +MPI_ERRORS_ARE_FATAL may be used to make I/O errors fatal. Note that MPI +does not guarantee that an MPI program can continue past an error. diff --git a/docs/man-openmpi/man3/MPI_File_iread_at.3.rst b/docs/man-openmpi/man3/MPI_File_iread_at.3.rst new file mode 100644 index 00000000000..12e8a17b811 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_File_iread_at.3.rst @@ -0,0 +1,113 @@ +.. _mpi_file_iread_at: + + +MPI_File_iread_at +================= + +.. include_body + +:ref:`MPI_File_iread_at` - Reads a file at an explicitly specified offset +(nonblocking, noncollective). + + +SYNTAX +------ + + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_File_iread_at(MPI_File fh, MPI_Offset offset, + void *buf, int count, MPI_Datatype datatype, + MPI_Request *request) + + +Fortran Syntax (see FORTRAN 77 NOTES) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_FILE_IREAD_AT(FH, OFFSET, BUF, COUNT, DATATYPE, REQUEST, IERROR) + BUF(*) + INTEGER FH, COUNT, DATATYPE, REQUEST, IERROR + INTEGER(KIND=MPI_OFFSET_KIND) OFFSET + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_File_iread_at(fh, offset, buf, count, datatype, request, ierror) + TYPE(MPI_File), INTENT(IN) :: fh + INTEGER(KIND=MPI_OFFSET_KIND), INTENT(IN) :: offset + TYPE(*), DIMENSION(..), ASYNCHRONOUS :: buf + INTEGER, INTENT(IN) :: count + TYPE(MPI_Datatype), INTENT(IN) :: datatype + TYPE(MPI_Request), INTENT(OUT) :: request + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETERS +---------------- +* ``fh``: File handle (handle). +* ``offset``: File offset (integer). +* ``count``: Number of elements in the buffer (integer). +* ``datatype``: Data type of each buffer element (handle). + +OUTPUT PARAMETERS +----------------- +* ``buf``: Initial address of the buffer (choice). +* ``request``: Request object (handle). +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +:ref:`MPI_File_iread_at` is the nonblocking version of :ref:`MPI_File_read_at`. + +:ref:`MPI_File_iread_at` is a nonblocking routine that attempts to read from +the file associated with *fh* at the *offset* position a total number of +*count* data items having *datatype* type into the user's buffer *buf.* +The *offset* is in etype units relative to the current view. That is, +holes are not counted when locating an offset. The data is taken out of +those parts of the file specified by the current view. :ref:`MPI_File_iread_at` +stores the number of *datatype* elements actually read in *status.* All +other fields of *status* are undefined. + + +FORTRAN 77 NOTES +---------------- + +The MPI standard prescribes portable Fortran syntax for the *OFFSET* +argument only for Fortran 90. Sun FORTRAN 77 users may use the +non-portable syntax + +:: + + INTEGER*MPI_OFFSET_KIND OFFSET + +where MPI_OFFSET_KIND is a constant defined in mpif.h and gives the +length of the declared integer in bytes. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. For MPI I/O function errors, the default error handler is set to +MPI_ERRORS_RETURN. The error handler may be changed with +:ref:`MPI_File_set_errhandler`; the predefined error handler +MPI_ERRORS_ARE_FATAL may be used to make I/O errors fatal. Note that MPI +does not guarantee that an MPI program can continue past an error. diff --git a/docs/man-openmpi/man3/MPI_File_iread_at_all.3.rst b/docs/man-openmpi/man3/MPI_File_iread_at_all.3.rst new file mode 100644 index 00000000000..9c77ef7edcc --- /dev/null +++ b/docs/man-openmpi/man3/MPI_File_iread_at_all.3.rst @@ -0,0 +1,114 @@ +.. _mpi_file_iread_at_all: + + +MPI_File_iread_at_all +===================== + +.. include_body + +:ref:`MPI_File_iread_at_all` - Reads a file at an explicitly specified +offset (nonblocking, collective). + + +SYNTAX +------ + + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_File_iread_at_all(MPI_File fh, MPI_Offset offset, + void *buf, int count, MPI_Datatype datatype, + MPI_Request *request) + + +Fortran Syntax (see FORTRAN 77 NOTES) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_FILE_IREAD_AT_ALL(FH, OFFSET, BUF, COUNT, DATATYPE, REQUEST, IERROR) + BUF(*) + INTEGER FH, COUNT, DATATYPE, REQUEST, IERROR + INTEGER(KIND=MPI_OFFSET_KIND) OFFSET + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_File_iread_at_all(fh, offset, buf, count, datatype, request, ierror) + TYPE(MPI_File), INTENT(IN) :: fh + INTEGER(KIND=MPI_OFFSET_KIND), INTENT(IN) :: offset + TYPE(*), DIMENSION(..) :: buf + INTEGER, INTENT(IN) :: count + TYPE(MPI_Datatype), INTENT(IN) :: datatype + TYPE(MPI_Request), INTENT(OUT) :: request + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETERS +---------------- +* ``fh``: File handle (handle). +* ``offset``: File offset (integer). +* ``count``: Number of elements in the buffer (integer). +* ``datatype``: Data type of each buffer element (handle). + +OUTPUT PARAMETERS +----------------- +* ``buf``: Initial address of the buffer (choice). +* ``request``: Request object (handle). +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +:ref:`MPI_File_iread_at_all` is the nonblocking version of +:ref:`MPI_File_read_at_all`. + +:ref:`MPI_File_iread_at_all` is a nonblocking routine that attempts to read +from the file associated with *fh* at the *offset* position a total +number of *count* data items having *datatype* type into the user's +buffer *buf.* The *offset* is in etype units relative to the current +view. That is, holes are not counted when locating an offset. The data +is taken out of those parts of the file specified by the current view. +:ref:`MPI_File_iread_at_all` stores the number of *datatype* elements actually +read in *status.* All other fields of *status* are undefined. + + +FORTRAN 77 NOTES +---------------- + +The MPI standard prescribes portable Fortran syntax for the *OFFSET* +argument only for Fortran 90. Sun FORTRAN 77 users may use the +non-portable syntax + +:: + + INTEGER*MPI_OFFSET_KIND OFFSET + +where MPI_OFFSET_KIND is a constant defined in mpif.h and gives the +length of the declared integer in bytes. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. For MPI I/O function errors, the default error handler is set to +MPI_ERRORS_RETURN. The error handler may be changed with +:ref:`MPI_File_set_errhandler`; the predefined error handler +MPI_ERRORS_ARE_FATAL may be used to make I/O errors fatal. Note that MPI +does not guarantee that an MPI program can continue past an error. diff --git a/docs/man-openmpi/man3/MPI_File_iread_shared.3.rst b/docs/man-openmpi/man3/MPI_File_iread_shared.3.rst new file mode 100644 index 00000000000..f0a3b68bbe8 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_File_iread_shared.3.rst @@ -0,0 +1,93 @@ +.. _mpi_file_iread_shared: + + +MPI_File_iread_shared +===================== + +.. include_body + +:ref:`MPI_File_iread_shared` - Reads a file using the shared file pointer +(nonblocking, noncollective). + + +SYNTAX +------ + + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_File_iread_shared(MPI_File fh, void *buf, int count, + MPI_Datatype datatype, MPI_Request *request) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_FILE_IREAD_SHARED(FH, BUF, COUNT, DATATYPE, REQUEST, IERROR) + BUF(*) + INTEGER FH, COUNT, DATATYPE, REQUEST, IERROR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_File_iread_shared(fh, buf, count, datatype, request, ierror) + TYPE(MPI_File), INTENT(IN) :: fh + TYPE(*), DIMENSION(..), ASYNCHRONOUS :: buf + INTEGER, INTENT(IN) :: count + TYPE(MPI_Datatype), INTENT(IN) :: datatype + TYPE(MPI_Request), INTENT(OUT) :: request + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT/OUTPUT PARAMETER +---------------------- +* ``fh``: File handle (handle). + +INPUT PARAMETERS +---------------- +* ``count``: Number of elements in buffer (integer). +* ``datatype``: Data type of each buffer element (handle). + +OUTPUT PARAMETERS +----------------- +* ``buf``: Initial address of buffer (choice). +* ``request``: Request object (handle). +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +:ref:`MPI_File_iread_shared` is a nonblocking version of the +:ref:`MPI_File_read_shared` interface. It uses the shared file pointer to read +files. The order of serialization among the processors is not +deterministic for this noncollective routine, so you need to use other +methods of synchronization to impose a particular order among +processors. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. For MPI I/O function errors, the default error handler is set to +MPI_ERRORS_RETURN. The error handler may be changed with +:ref:`MPI_File_set_errhandler`; the predefined error handler +MPI_ERRORS_ARE_FATAL may be used to make I/O errors fatal. Note that MPI +does not guarantee that an MPI program can continue past an error. diff --git a/docs/man-openmpi/man3/MPI_File_iwrite.3.rst b/docs/man-openmpi/man3/MPI_File_iwrite.3.rst new file mode 100644 index 00000000000..9d5a516d94a --- /dev/null +++ b/docs/man-openmpi/man3/MPI_File_iwrite.3.rst @@ -0,0 +1,98 @@ +.. _mpi_file_iwrite: + + +MPI_File_iwrite +=============== + +.. include_body + +:ref:`MPI_File_iwrite` - Writes a file starting at the location specified +by the individual file pointer (nonblocking, noncollective). + + +SYNTAX +------ + + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_File_iwrite(MPI_File fh, const void *buf, int count, + MPI_Datatype datatype, MPI_Request *request) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_FILE_IWRITE(FH, BUF, COUNT, DATATYPE, REQUEST, IERROR) + BUF(*) + INTEGER FH, COUNT, DATATYPE, REQUEST, IERROR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_File_iwrite(fh, buf, count, datatype, request, ierror) + TYPE(MPI_File), INTENT(IN) :: fh + TYPE(*), DIMENSION(..), INTENT(IN), ASYNCHRONOUS :: buf + INTEGER, INTENT(IN) :: count + TYPE(MPI_Datatype), INTENT(IN) :: datatype + TYPE(MPI_Request), INTENT(OUT) :: request + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT/OUTPUT PARAMETER +---------------------- +* ``fh``: File handle (handle). + +INPUT PARAMETERS +---------------- +* ``buf``: Initial address of buffer (choice). +* ``count``: Number of elements in buffer (integer). +* ``datatype``: Data type of each buffer element (handle). + +OUTPUT PARAMETER +---------------- +* ``request``: Request object (handle). +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +:ref:`MPI_File_iwrite` is a nonblocking version of the :ref:`MPI_File_write` +interface. It attempts to write into the file associated with *fh* (at +the current individual file pointer position maintained by the system) a +total number of *count* data items having *datatype* type from the +user's buffer *buf.* The data is written into those parts of the file +specified by the current view. :ref:`MPI_File_iwrite` stores the number of +*datatype* elements actually written in *status.* All other fields of +*status* are undefined. + +It is erroneous to call this function if MPI_MODE_SEQUENTIAL mode was +specified when the file was open. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. For MPI I/O function errors, the default error handler is set to +MPI_ERRORS_RETURN. The error handler may be changed with +:ref:`MPI_File_set_errhandler`; the predefined error handler +MPI_ERRORS_ARE_FATAL may be used to make I/O errors fatal. Note that MPI +does not guarantee that an MPI program can continue past an error. diff --git a/docs/man-openmpi/man3/MPI_File_iwrite_all.3.rst b/docs/man-openmpi/man3/MPI_File_iwrite_all.3.rst new file mode 100644 index 00000000000..0b33cabc323 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_File_iwrite_all.3.rst @@ -0,0 +1,98 @@ +.. _mpi_file_iwrite_all: + + +MPI_File_iwrite_all +=================== + +.. include_body + +:ref:`MPI_File_iwrite_all` - Writes a file starting at the location +specified by the individual file pointer (nonblocking, collective). + + +SYNTAX +------ + + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_File_iwrite_all(MPI_File fh, const void *buf, int count, + MPI_Datatype datatype, MPI_Request *request) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_FILE_IWRITE_ALL(FH, BUF, COUNT, DATATYPE, REQUEST, IERROR) + BUF(*) + INTEGER FH, COUNT, DATATYPE, REQUEST, IERROR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_File_iwrite_all(fh, buf, count, datatype, request, ierror) + TYPE(MPI_File), INTENT(IN) :: fh + TYPE(*), DIMENSION(..) :: buf + INTEGER, INTENT(IN) :: count + TYPE(MPI_Datatype), INTENT(IN) :: datatype + TYPE(MPI_Request), INTENT(OUT) :: request + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT/OUTPUT PARAMETER +---------------------- +* ``fh``: File handle (handle). + +INPUT PARAMETERS +---------------- +* ``buf``: Initial address of buffer (choice). +* ``count``: Number of elements in buffer (integer). +* ``datatype``: Data type of each buffer element (handle). + +OUTPUT PARAMETER +---------------- +* ``request``: Request object (handle). +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +:ref:`MPI_File_iwrite_all` is a nonblocking version of the :ref:`MPI_File_write_all` +interface. It attempts to write into the file associated with *fh* (at +the current individual file pointer position maintained by the system) a +total number of *count* data items having *datatype* type from the +user's buffer *buf.* The data is written into those parts of the file +specified by the current view. :ref:`MPI_File_iwrite_all` stores the number of +*datatype* elements actually written in *status.* All other fields of +*status* are undefined. + +It is erroneous to call this function if MPI_MODE_SEQUENTIAL mode was +specified when the file was open. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. For MPI I/O function errors, the default error handler is set to +MPI_ERRORS_RETURN. The error handler may be changed with +:ref:`MPI_File_set_errhandler`; the predefined error handler +MPI_ERRORS_ARE_FATAL may be used to make I/O errors fatal. Note that MPI +does not guarantee that an MPI program can continue past an error. diff --git a/docs/man-openmpi/man3/MPI_File_iwrite_at.3.rst b/docs/man-openmpi/man3/MPI_File_iwrite_at.3.rst new file mode 100644 index 00000000000..836986672fd --- /dev/null +++ b/docs/man-openmpi/man3/MPI_File_iwrite_at.3.rst @@ -0,0 +1,118 @@ +.. _mpi_file_iwrite_at: + + +MPI_File_iwrite_at +================== + +.. include_body + +:ref:`MPI_File_iwrite_at` - Writes a file at an explicitly specified offset +(nonblocking, noncollective). + + +SYNTAX +------ + + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_File_iwrite_at(MPI_File fh, MPI_Offset offset, + const void *buf, int count, MPI_Datatype datatype, MPI_Request *request) + + +Fortran Syntax (see FORTRAN 77 NOTES) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_FILE_IWRITE_AT(FH, OFFSET, BUF, COUNT, DATATYPE, REQUEST, IERROR) + BUF(*) + INTEGER FH, COUNT, DATATYPE, REQUEST, IERROR + INTEGER(KIND=MPI_OFFSET_KIND) OFFSET + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_File_iwrite_at(fh, offset, buf, count, datatype, request, ierror) + TYPE(MPI_File), INTENT(IN) :: fh + INTEGER(KIND=MPI_OFFSET_KIND), INTENT(IN) :: offset + TYPE(*), DIMENSION(..), INTENT(IN), ASYNCHRONOUS :: buf + INTEGER, INTENT(IN) :: count + TYPE(MPI_Datatype), INTENT(IN) :: datatype + TYPE(MPI_Request), INTENT(OUT) :: request + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT/OUTPUT PARAMETER +---------------------- +* ``fh``: File handle (handle). + +INPUT PARAMETERS +---------------- +* ``offset``: File offset (integer). +* ``buf``: Initial address of buffer (choice). +* ``count``: Number of elements in buffer (integer). +* ``datatype``: Data type of each buffer element (handle). + +OUTPUT PARAMETERS +----------------- +* ``request``: Request object (handle). +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +:ref:`MPI_File_iwrite_at` is a nonblocking version of :ref:`MPI_File_write_at`. It +attempts to write into the file associated with *fh* (at the *offset* +position) a total number of *count* data items having *datatype* type +from the user's buffer *buf.* The offset is in *etype* units relative to +the current view. That is, holes are not counted when locating an +offset. The data is written into those parts of the file specified by +the current view. :ref:`MPI_File_iwrite_at` stores the number of *datatype* +elements actually written in *status.* All other fields of *status* are +undefined. The request structure can be passed to :ref:`MPI_Wait` or :ref:`MPI_Test`, +which will return a status with the number of bytes actually accessed. + +It is erroneous to call this function if MPI_MODE_SEQUENTIAL mode was +specified when the file was open. + + +FORTRAN 77 NOTES +---------------- + +The MPI standard prescribes portable Fortran syntax for the *OFFSET* +argument only for Fortran 90. FORTRAN 77 users may use the non-portable +syntax + +:: + + INTEGER*MPI_OFFSET_KIND OFFSET + +where MPI_OFFSET_KIND is a constant defined in mpif.h and gives the +length of the declared integer in bytes. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. For MPI I/O function errors, the default error handler is set to +MPI_ERRORS_RETURN. The error handler may be changed with +:ref:`MPI_File_set_errhandler`; the predefined error handler +MPI_ERRORS_ARE_FATAL may be used to make I/O errors fatal. Note that MPI +does not guarantee that an MPI program can continue past an error. diff --git a/docs/man-openmpi/man3/MPI_File_iwrite_at_all.3.rst b/docs/man-openmpi/man3/MPI_File_iwrite_at_all.3.rst new file mode 100644 index 00000000000..c62a640d591 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_File_iwrite_at_all.3.rst @@ -0,0 +1,119 @@ +.. _mpi_file_iwrite_at_all: + + +MPI_File_iwrite_at_all +====================== + +.. include_body + +:ref:`MPI_File_iwrite_at_all` - Writes a file at an explicitly specified +offset (nonblocking, collective). + + +SYNTAX +------ + + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_File_iwrite_at_all(MPI_File fh, MPI_Offset offset, + const void *buf, int count, MPI_Datatype datatype, MPI_Request *request) + + +Fortran Syntax (see FORTRAN 77 NOTES) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_FILE_IWRITE_AT_ALL(FH, OFFSET, BUF, COUNT, DATATYPE, REQUEST, IERROR) + BUF(*) + INTEGER FH, COUNT, DATATYPE, REQUEST, IERROR + INTEGER(KIND=MPI_OFFSET_KIND) OFFSET + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_File_iwrite_at_all(fh, offset, buf, count, datatype, request, ierror) + TYPE(MPI_File), INTENT(IN) :: fh + INTEGER(KIND=MPI_OFFSET_KIND), INTENT(IN) :: offset + TYPE(*), DIMENSION(..) :: buf + INTEGER, INTENT(IN) :: count + TYPE(MPI_Datatype), INTENT(IN) :: datatype + TYPE(MPI_Request), INTENT(OUT) :: request + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT/OUTPUT PARAMETER +---------------------- +* ``fh``: File handle (handle). + +INPUT PARAMETERS +---------------- +* ``offset``: File offset (integer). +* ``buf``: Initial address of buffer (choice). +* ``count``: Number of elements in buffer (integer). +* ``datatype``: Data type of each buffer element (handle). + +OUTPUT PARAMETERS +----------------- +* ``request``: Request object (handle). +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +:ref:`MPI_File_iwrite_at_all` is a nonblocking version of +:ref:`MPI_File_write_at_all`. It attempts to write into the file associated +with *fh* (at the *offset* position) a total number of *count* data +items having *datatype* type from the user's buffer *buf.* The offset is +in *etype* units relative to the current view. That is, holes are not +counted when locating an offset. The data is written into those parts of +the file specified by the current view. :ref:`MPI_File_iwrite_at_all` stores +the number of *datatype* elements actually written in *status.* All +other fields of *status* are undefined. The request structure can be +passed to :ref:`MPI_Wait` or :ref:`MPI_Test`, which will return a status with the +number of bytes actually accessed. + +It is erroneous to call this function if MPI_MODE_SEQUENTIAL mode was +specified when the file was open. + + +FORTRAN 77 NOTES +---------------- + +The MPI standard prescribes portable Fortran syntax for the *OFFSET* +argument only for Fortran 90. FORTRAN 77 users may use the non-portable +syntax + +:: + + INTEGER*MPI_OFFSET_KIND OFFSET + +where MPI_OFFSET_KIND is a constant defined in mpif.h and gives the +length of the declared integer in bytes. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. For MPI I/O function errors, the default error handler is set to +MPI_ERRORS_RETURN. The error handler may be changed with +:ref:`MPI_File_set_errhandler`; the predefined error handler +MPI_ERRORS_ARE_FATAL may be used to make I/O errors fatal. Note that MPI +does not guarantee that an MPI program can continue past an error. diff --git a/docs/man-openmpi/man3/MPI_File_iwrite_shared.3.rst b/docs/man-openmpi/man3/MPI_File_iwrite_shared.3.rst new file mode 100644 index 00000000000..be31be9acb8 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_File_iwrite_shared.3.rst @@ -0,0 +1,91 @@ +.. _mpi_file_iwrite_shared: + + +MPI_File_iwrite_shared +====================== + +.. include_body + +:ref:`MPI_File_iwrite_shared` - Writes a file using the shared file pointer +(nonblocking, noncollective). + + +SYNTAX +------ + + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_File_iwrite_shared(MPI_File fh, const void *buf, int count, MPI_Datatype + datatype, MPI_Request *request) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_FILE_IWRITE_SHARED(FH, BUF, COUNT, DATATYPE, REQUEST, IERROR) + BUF(*) + INTEGER FH, COUNT, DATATYPE, REQUEST, IERROR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_File_iwrite_shared(fh, buf, count, datatype, request, ierror) + TYPE(MPI_File), INTENT(IN) :: fh + TYPE(*), DIMENSION(..), INTENT(IN), ASYNCHRONOUS :: buf + INTEGER, INTENT(IN) :: count + TYPE(MPI_Datatype), INTENT(IN) :: datatype + TYPE(MPI_Request), INTENT(OUT) :: request + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT/OUTPUT PARAMETER +---------------------- +* ``fh``: File handle (handle). + +INPUT PARAMETERS +---------------- +* ``count``: Number of elements in buffer (integer). +* ``datatype``: Data type of each buffer element (handle). + +OUTPUT PARAMETERS +----------------- +* ``buf``: Initial address of buffer (choice). +* ``request``: Request object (handle). +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +:ref:`MPI_File_iwrite_shared` is a nonblocking routine that uses the shared +file pointer to write files. The order of serialization is not +deterministic for this noncollective routine, so you need to use other +methods of synchronization to impose a particular order. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. For MPI I/O function errors, the default error handler is set to +MPI_ERRORS_RETURN. The error handler may be changed with +:ref:`MPI_File_set_errhandler`; the predefined error handler +MPI_ERRORS_ARE_FATAL may be used to make I/O errors fatal. Note that MPI +does not guarantee that an MPI program can continue past an error. diff --git a/docs/man-openmpi/man3/MPI_File_open.3.rst b/docs/man-openmpi/man3/MPI_File_open.3.rst new file mode 100644 index 00000000000..a9bebd729f1 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_File_open.3.rst @@ -0,0 +1,201 @@ +.. _mpi_file_open: + + +MPI_File_open +============= + +.. include_body + +:ref:`MPI_File_open` - Opens a file (collective). + + +SYNTAX +------ + + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_File_open(MPI_Comm comm, const char *filename, + int amode, MPI_Info info, + MPI_File *fh) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_FILE_OPEN(COMM, FILENAME, AMODE, INFO, FH, IERROR) + CHARACTER*(*) FILENAME + INTEGER COMM, AMODE, INFO, FH, IERROR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_File_open(comm, filename, amode, info, fh, ierror) + TYPE(MPI_Comm), INTENT(IN) :: comm + CHARACTER(LEN=*), INTENT(IN) :: filename + INTEGER, INTENT(IN) :: amode + TYPE(MPI_Info), INTENT(IN) :: info + TYPE(MPI_File), INTENT(OUT) :: fh + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETERS +---------------- +* ``comm``: Communicator (handle). +* ``filename``: Name of file to open (string). +* ``amode``: File access mode (integer). +* ``info``: Info object (handle). + +OUTPUT PARAMETERS +----------------- +* ``fh``: New file handle (handle). +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +:ref:`MPI_File_open` opens the file identified by the filename +*filename* on all processes in the *comm* communicator +group. :ref:`MPI_File_open` is a collective routine; all processes +must provide the same value for *amode,* and all processes must +provide filenames that reference the same file which are textually +identical (note: Open MPI I/O plugins may have restrictions on +characters that can be used in filenames. For example, the ROMIO +plugin may disallow the colon (":") character from appearing in a +filename). A process can open a file independently of other processes +by using the MPI_COMM_SELF communicator. The file handle returned, +*fh,* can be subsequently used to access the file until the file is +closed using :ref:`MPI_File_close`. Before calling +:ref:`MPI_Finalize`, the user is required to close (via +MPI_File_close) all files that were opened with +:ref:`MPI_File_open`. Note that the communicator *comm* is unaffected +by :ref:`MPI_File_open` and continues to be usable in all MPI +routines. Furthermore, use of *comm* will not interfere with I/O +behavior. + +Initially, all processes view the file as a linear byte stream; that is, +the *etype* and *filetype* are both MPI_BYTE. The file view can be +changed via the :ref:`MPI_File_set_view` routine. + +The following access modes are supported (specified in amode, in a +bit-vector OR in one of the following integer constants): + +* ``MPI_MODE_APPEND`` +* ``MPI_MODE_CREATE``: Create the file if it does not exist. +* ``MPI_MODE_DELETE_ON_CLOSE`` +* ``MPI_MODE_EXCL``: Error creating a file that already exists. +* ``MPI_MODE_RDONLY``: Read only. +* ``MPI_MODE_RDWR``: Reading and writing. +* ``MPI_MODE_SEQUENTIAL`` +* ``MPI__MODE_WRONLY``: Write only. +* ``MPI_MODE_UNIQUE_OPEN`` + +The modes ``MPI_MODE_RDONLY``, ``MPI_MODE_RDWR``, ``MPI_MODE_WRONLY``, +and ``MPI_MODE_CREATE`` have identical semantics to their POSIX +counterparts. It is erroneous to specify ``MPI_MODE_CREATE`` in +conjunction with ``MPI_MODE_RDONLY``. Errors related to the access +mode are raised in the class ``MPI_ERR_AMODE``. + +On single-node clusters, files are opened by default using nonatomic +mode file consistency semantics. The more stringent atomic-mode +consistency semantics, required for atomicity of overlapping accesses, +are the default when processors in a communicator group reside on more +than one node. This setting can be changed using :ref:`MPI_File_set_atomicity`. + +The :ref:`MPI_File_open` interface allows the user to pass information +via the *info* argument. It can be set to ``MPI_INFO_NULL``. See the +:ref:`HINTS section ` for a list of +hints that can be set. + +.. _man-openmpi-mpi-file-open-hints: + +HINTS +----- + +The following hints can be used as values for the *info* argument. + +**SETTABLE HINTS** + +* ``MPI_INFO_NULL`` + +* ``shared_file_timeout``: Amount of time (in seconds) to wait for + access to the shared file pointer before exiting with + ``MPI_ERR_TIMEDOUT``. + +* ``rwlock_timeout``: Amount of time (in seconds) to wait for + obtaining a read or write lock on a contiguous chunk of a UNIX file + before exiting with ``MPI_ERR_TIMEDOUT``. + +* ``noncoll_read_bufsize``: Maximum size of the buffer used by MPI I/O + to satisfy multiple noncontiguous read requests in the noncollective + data-access routines. + + .. note:: A buffer size smaller than the distance (in bytes) in a + UNIX file between the first byte and the last byte of the + access request causes MPI I/O to iterate and perform + multiple UNIX `read()` or `write()` calls. If the request + includes multiple noncontiguous chunks of data, and the + buffer size is greater than the size of those chunks, then + the UNIX `read()` or `write()` (made at the MPI I/O level) + will access data not requested by this process in order to + reduce the total number of `write()` calls made. If this + is not desirable behavior, you should reduce this buffer + size to equal the size of the contiguous chunks within the + aggregate request. + +* ``noncoll_write_bufsize``: Maximum size of the buffer used by MPI + I/O to satisfy multiple noncontiguous write requests in the + noncollective data-access routines. + + See the above note in ``noncoll_read_bufsize``. + +* ``coll_read_bufsize``: Maximum size of the buffer used by MPI I/O to + satisfy multiple noncontiguous read requests in the collective + data-access routines. + + See the above note in ``noncoll_read_bufsize``. + +* ``coll_write_bufsize``: Maximum size of the buffer used by MPI I/O + to satisfy multiple noncontiguous write requests in the collective + data-access routines. + + See the above note in ``noncoll_read_bufsize``. + +* ``mpiio_concurrency``: (boolean) controls whether nonblocking I/O + routines can bind an extra thread to an LWP. .sp + +* ``mpiio_coll_contiguous``: (boolean) controls whether subsequent + collective data accesses will request collectively contiguous + regions of the file. + +**NON-SETTABLE HINTS** + +* ``filename``: Access this hint to get the name of the file. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. For MPI I/O function errors, the default error handler is set to +MPI_ERRORS_RETURN. The error handler may be changed with +:ref:`MPI_File_set_errhandler`; the predefined error handler +MPI_ERRORS_ARE_FATAL may be used to make I/O errors fatal. Note that MPI +does not guarantee that an MPI program can continue past an error. diff --git a/docs/man-openmpi/man3/MPI_File_preallocate.3.rst b/docs/man-openmpi/man3/MPI_File_preallocate.3.rst new file mode 100644 index 00000000000..386a6106c6c --- /dev/null +++ b/docs/man-openmpi/man3/MPI_File_preallocate.3.rst @@ -0,0 +1,127 @@ +.. _mpi_file_preallocate: + + +MPI_File_preallocate +==================== + +.. include_body + +:ref:`MPI_File_preallocate` - Preallocates a specified amount of storage +space at the beginning of a file (collective). + + +SYNTAX +------ + + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_File_preallocate(MPI_File fh, MPI_Offset size) + + +Fortran Syntax (see FORTRAN 77 NOTES) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_FILE_PREALLOCATE(FH, SIZE, IERROR) + INTEGER FH, IERROR + INTEGER(KIND=MPI_OFFSET_KIND) SIZE + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_File_preallocate(fh, size, ierror) + TYPE(MPI_File), INTENT(IN) :: fh + INTEGER(KIND=MPI_OFFSET_KIND), INTENT(IN) :: size + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT/OUTPUT PARAMETER +---------------------- +* ``fh``: File handle (handle). + +INPUT PARAMETER +--------------- +* ``size``: Size to preallocate file, in bytes (integer). + +OUTPUT PARAMETER +---------------- +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +:ref:`MPI_File_preallocate` ensures that storage space is allocated for the +first *size* bytes of the file associated with *fh*. +:ref:`MPI_File_preallocate` can be a very time-consuming operation. + +:ref:`MPI_File_preallocate` is collective; all processes in the group must pass +identical values for *size*. Regions of the file that have previously +been written are unaffected. For newly allocated regions of the file, +:ref:`MPI_File_preallocate` has the same effect as writing undefined data. If +size is larger than the current file size, the file size increases to +*size*. If *size* is less than or equal to the current file size, the +file size is unchanged. + +The treatment of file pointers, pending nonblocking accesses, and file +consistency is the same as with :ref:`MPI_File_set_size`. If +MPI_MODE_SEQUENTIAL mode was specified when the file was opened, it is +erroneous to call this routine. + + +FORTRAN 77 NOTES +---------------- + +The MPI standard prescribes portable Fortran syntax for the *SIZE* +argument only for Fortran 90. FORTRAN 77 users may use the non-portable +syntax + +:: + + INTEGER*MPI_OFFSET_KIND SIZE + +where MPI_OFFSET_KIND is a constant defined in mpif.h and gives the +length of the declared integer in bytes. + + +NOTES +----- + +When using the collective routine :ref:`MPI_File_set_size` on a UNIX file, if +the size that is set is smaller than the current file size, the file is +truncated at the position defined by size. If the size is set to be +larger than the current file size, the file size becomes the set size. +When the file size is increased this way with :ref:`MPI_File_set_size`, new +regions are created in the file with displacements between the old file +size and the larger, newly set file size. + +Sun MPI I/O does not necessarily allocate file space for such new +regions. You may reserve file space either by using :ref:`MPI_File_preallocate` +or by performing a read or write to certain bytes. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. For MPI I/O function errors, the default error handler is set to +MPI_ERRORS_RETURN. The error handler may be changed with +:ref:`MPI_File_set_errhandler`; the predefined error handler +MPI_ERRORS_ARE_FATAL may be used to make I/O errors fatal. Note that MPI +does not guarantee that an MPI program can continue past an error. diff --git a/docs/man-openmpi/man3/MPI_File_read.3.rst b/docs/man-openmpi/man3/MPI_File_read.3.rst new file mode 100644 index 00000000000..97f5936b56c --- /dev/null +++ b/docs/man-openmpi/man3/MPI_File_read.3.rst @@ -0,0 +1,95 @@ +.. _mpi_file_read: + + +MPI_File_read +============= + +.. include_body + +:ref:`MPI_File_read` - Reads a file starting at the location specified by +the individual file pointer (blocking, noncollective). + + +SYNTAX +------ + + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_File_read(MPI_File fh, void *buf, + int count, MPI_Datatype datatype, MPI_Status *status) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_FILE_READ(FH, BUF, COUNT, + DATATYPE, STATUS, IERROR) + BUF(*) + INTEGER FH, COUNT, DATATYPE, STATUS(MPI_STATUS_SIZE),IERROR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_File_read(fh, buf, count, datatype, status, ierror) + TYPE(MPI_File), INTENT(IN) :: fh + TYPE(*), DIMENSION(..) :: buf + INTEGER, INTENT(IN) :: count + TYPE(MPI_Datatype), INTENT(IN) :: datatype + TYPE(MPI_Status) :: status + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETERS +---------------- +* ``fh``: File handle (handle). +* ``count``: Number of elements in buffer (integer). +* ``datatype``: Data type of each buffer element (handle). + +OUTPUT PARAMETERS +----------------- +* ``buf``: Initial address of buffer (integer). +* ``status``: Status object (status). +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +:ref:`MPI_File_read` attempts to read from the file associated with *fh* (at +the current individual file pointer position maintained by the system) a +total number of *count* data items having *datatype* type into the +user's buffer *buf.* The data is taken out of those parts of the file +specified by the current view. :ref:`MPI_File_read` stores the number of +data-type elements actually read in *status.* All other fields of +*status* are undefined. + +It is erroneous to call this function if MPI_MODE_SEQUENTIAL mode was +specified when the file was opened. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. For MPI I/O function errors, the default error handler is set to +MPI_ERRORS_RETURN. The error handler may be changed with +:ref:`MPI_File_set_errhandler`; the predefined error handler +MPI_ERRORS_ARE_FATAL may be used to make I/O errors fatal. Note that MPI +does not guarantee that an MPI program can continue past an error. diff --git a/docs/man-openmpi/man3/MPI_File_read_all.3.rst b/docs/man-openmpi/man3/MPI_File_read_all.3.rst new file mode 100644 index 00000000000..f03525060b8 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_File_read_all.3.rst @@ -0,0 +1,95 @@ +.. _mpi_file_read_all: + + +MPI_File_read_all +================= + +.. include_body + +:ref:`MPI_File_read_all` - Reads a file starting at the locations specified +by individual file pointers (blocking, collective). + + +SYNTAX +------ + + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_File_read_all(MPI_File fh, void *buf, + int count, MPI_Datatype datatype, MPI_Status *status) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_FILE_READ_ALL(FH, BUF, COUNT, + DATATYPE, STATUS, IERROR) + BUF(*) + INTEGER FH, COUNT, DATATYPE, STATUS(MPI_STATUS_SIZE),IERROR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_File_read_all(fh, buf, count, datatype, status, ierror) + TYPE(MPI_File), INTENT(IN) :: fh + TYPE(*), DIMENSION(..) :: buf + INTEGER, INTENT(IN) :: count + TYPE(MPI_Datatype), INTENT(IN) :: datatype + TYPE(MPI_Status) :: status + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETERS +---------------- +* ``fh``: File handle (handle). +* ``count``: Number of elements in buffer (integer). +* ``datatype``: Data type of each buffer element (handle). + +OUTPUT PARAMETERS +----------------- +* ``buf``: Initial address of buffer (choice). +* ``status``: Status object (status). +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +:ref:`MPI_File_read_all` is a collective routine that attempts to read from the +file associated with *fh* (at the current individual file pointer +position maintained by the system) a total number of *count* data items +having *datatype* type into the user's buffer *buf.* The data is taken +out of those parts of the file specified by the current view. +:ref:`MPI_File_read_all` stores the number of data-type elements actually read +in *status.* All other fields of *status* are undefined. + +It is erroneous to call this function if MPI_MODE_SEQUENTIAL mode was +specified when the file was opened. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. For MPI I/O function errors, the default error handler is set to +MPI_ERRORS_RETURN. The error handler may be changed with +:ref:`MPI_File_set_errhandler`; the predefined error handler +MPI_ERRORS_ARE_FATAL may be used to make I/O errors fatal. Note that MPI +does not guarantee that an MPI program can continue past an error. diff --git a/docs/man-openmpi/man3/MPI_File_read_all_begin.3.rst b/docs/man-openmpi/man3/MPI_File_read_all_begin.3.rst new file mode 100644 index 00000000000..924ce738e8d --- /dev/null +++ b/docs/man-openmpi/man3/MPI_File_read_all_begin.3.rst @@ -0,0 +1,101 @@ +.. _mpi_file_read_all_begin: + + +MPI_File_read_all_begin +======================= + +.. include_body + +:ref:`MPI_File_read_all_begin` - Reads a file starting at the locations +specified by individual file pointers; beginning part of a split +collective routine (nonblocking). + + +SYNTAX +------ + + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_File_read_all_begin(MPI_File fh, void *buf, + int count, MPI_Datatype datatype) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_FILE_READ_ALL_BEGIN(FH, BUF, COUNT, DATATYPE, IERROR) + BUF(*) + INTEGER FH, COUNT, DATATYPE, IERROR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_File_read_all_begin(fh, buf, count, datatype, ierror) + TYPE(MPI_File), INTENT(IN) :: fh + TYPE(*), DIMENSION(..), ASYNCHRONOUS :: buf + INTEGER, INTENT(IN) :: count + TYPE(MPI_Datatype), INTENT(IN) :: datatype + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT/OUTPUT PARAMETER +---------------------- +* ``fh``: File handle (handle). + +INPUT PARAMETERS +---------------- +* ``count``: Number of elements in buffer (integer). +* ``datatype``: Data type of each buffer element (handle). + +OUTPUT PARAMETERS +----------------- +* ``buf``: Initial address of buffer (choice). +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +:ref:`MPI_File_read_all_begin` is the beginning part of a split collective +operation that attempts to read from the file associated with *fh* (at +the current individual file pointer position maintained by the system) a +total number of *count* data items having *datatype* type into the +user's buffer *buf.* The data is taken out of those parts of the file +specified by the current view. + + +NOTES +----- + +All the nonblocking collective routines for data access are "split" into +two routines, each with \_begin or \_end as a suffix. These split +collective routines are subject to the semantic rules described in +Section 9.4.5 of the MPI-2 standard. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. For MPI I/O function errors, the default error handler is set to +MPI_ERRORS_RETURN. The error handler may be changed with +:ref:`MPI_File_set_errhandler`; the predefined error handler +MPI_ERRORS_ARE_FATAL may be used to make I/O errors fatal. Note that MPI +does not guarantee that an MPI program can continue past an error. diff --git a/docs/man-openmpi/man3/MPI_File_read_all_end.3.rst b/docs/man-openmpi/man3/MPI_File_read_all_end.3.rst new file mode 100644 index 00000000000..77fa8022433 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_File_read_all_end.3.rst @@ -0,0 +1,96 @@ +.. _mpi_file_read_all_end: + + +MPI_File_read_all_end +===================== + +.. include_body + +:ref:`MPI_File_read_all_end` - Reads a file starting at the locations +specified by individual file pointers; ending part of a split collective +routine (blocking). + + +SYNTAX +------ + + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_File_read_all_end(MPI_File fh, void *buf, + MPI_Status *status) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_FILE_READ_ALL_END(FH, BUF, STATUS, IERROR) + BUF(*) + INTEGER FH, STATUS(MPI_STATUS_SIZE), IERROR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_File_read_all_end(fh, buf, status, ierror) + TYPE(MPI_File), INTENT(IN) :: fh + TYPE(*), DIMENSION(..), ASYNCHRONOUS :: buf + TYPE(MPI_Status) :: status + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT/OUTPUT PARAMETER +---------------------- +* ``fh``: File handle (handle). + +OUTPUT PARAMETERS +----------------- +* ``buf``: Initial address of buffer (choice). +* ``status``: Status object (status). +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +:ref:`MPI_File_read_all_end` is the ending part of a split collective operation +that stores the number of elements actually read from the file +associated with *fh* (at the current individual file pointer position +maintained by the system) into the user's buffer *buf* in *status.* The +data is taken out of those parts of the file specified by the current +view. All other fields of *status* are undefined. + + +NOTES +----- + +All the nonblocking collective routines for data access are "split" into +two routines, each with \_begin or \_end as a suffix. These split +collective routines are subject to the semantic rules described in +Section 9.4.5 of the MPI-2 standard. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. For MPI I/O function errors, the default error handler is set to +MPI_ERRORS_RETURN. The error handler may be changed with +:ref:`MPI_File_set_errhandler`; the predefined error handler +MPI_ERRORS_ARE_FATAL may be used to make I/O errors fatal. Note that MPI +does not guarantee that an MPI program can continue past an error. diff --git a/docs/man-openmpi/man3/MPI_File_read_at.3.rst b/docs/man-openmpi/man3/MPI_File_read_at.3.rst new file mode 100644 index 00000000000..5eb63ae3368 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_File_read_at.3.rst @@ -0,0 +1,113 @@ +.. _mpi_file_read_at: + + +MPI_File_read_at +================ + +.. include_body + +:ref:`MPI_File_read_at` - Reads a file at an explicitly specified offset +(blocking, noncollective). + + +SYNTAX +------ + + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_File_read_at(MPI_File fh, MPI_Offset offset, + void *buf, int count, MPI_Datatype datatype, + MPI_Status *status) + + +Fortran Syntax (see FORTRAN 77 NOTES) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_FILE_READ_AT(FH, OFFSET, BUF, COUNT, + DATATYPE, STATUS, IERROR) + BUF(*) + INTEGER FH, COUNT, DATATYPE, STATUS(MPI_STATUS_SIZE), IERROR + INTEGER(KIND=MPI_OFFSET_KIND) OFFSET + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_File_read_at(fh, offset, buf, count, datatype, status, ierror) + TYPE(MPI_File), INTENT(IN) :: fh + INTEGER(KIND=MPI_OFFSET_KIND), INTENT(IN) :: offset + TYPE(*), DIMENSION(..) :: buf + INTEGER, INTENT(IN) :: count + TYPE(MPI_Datatype), INTENT(IN) :: datatype + TYPE(MPI_Status) :: status + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETERS +---------------- +* ``fh``: File handle (handle). +* ``offset``: File offset (integer). +* ``count``: Number of elements in buffer (integer). +* ``datatype``: Data type of each buffer element (handle). + +OUTPUT PARAMETERS +----------------- +* ``buf``: Initial address of buffer (choice). +* ``status``: Status object (status). +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +:ref:`MPI_File_read_at` attempts to read from the file associated with *fh* (at +the *offset* position) a total number of *count* data items having +*datatype* type into the user's buffer *buf.* The *offset* is in *etype* +units relative to the current view. That is, holes are not counted when +locating an offset. The data is taken out of those parts of the file +specified by the current view. :ref:`MPI_File_read_at` stores the number of +*datatype* elements actually read in *status.* All other fields of +*status* are undefined. It is erroneous to call this function if +MPI_MODE_SEQUENTIAL mode was specified when the file was opened. + + +FORTRAN 77 NOTES +---------------- + +The MPI standard prescribes portable Fortran syntax for the *OFFSET* +argument only for Fortran 90. FORTRAN 77 users may use the non-portable +syntax + +:: + + INTEGER*MPI_OFFSET_KIND OFFSET + +where MPI_OFFSET_KIND is a constant defined in mpif.h and gives the +length of the declared integer in bytes. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. For MPI I/O function errors, the default error handler is set to +MPI_ERRORS_RETURN. The error handler may be changed with +:ref:`MPI_File_set_errhandler`; the predefined error handler +MPI_ERRORS_ARE_FATAL may be used to make I/O errors fatal. Note that MPI +does not guarantee that an MPI program can continue past an error. diff --git a/docs/man-openmpi/man3/MPI_File_read_at_all.3.rst b/docs/man-openmpi/man3/MPI_File_read_at_all.3.rst new file mode 100644 index 00000000000..ffd38c825cf --- /dev/null +++ b/docs/man-openmpi/man3/MPI_File_read_at_all.3.rst @@ -0,0 +1,114 @@ +.. _mpi_file_read_at_all: + + +MPI_File_read_at_all +==================== + +.. include_body + +:ref:`MPI_File_read_at_all` - Reads a file at explicitly specified offsets +(blocking, collective). + + +SYNTAX +------ + + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_File_read_at_all(MPI_File fh, MPI_Offset offset, + void *buf, int count, MPI_Datatype datatype, + MPI_Status *status) + + +Fortran Syntax (see FORTRAN 77 NOTES) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_FILE_READ_AT_ALL(FH, OFFSET, BUF, COUNT, + DATATYPE, STATUS, IERROR) + BUF(*) + INTEGER FH, COUNT, DATATYPE, STATUS(MPI_STATUS_SIZE), IERROR + INTEGER(KIND=MPI_OFFSET_KIND) OFFSET + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_File_read_at_all(fh, offset, buf, count, datatype, status, ierror) + TYPE(MPI_File), INTENT(IN) :: fh + INTEGER(KIND=MPI_OFFSET_KIND), INTENT(IN) :: offset + TYPE(*), DIMENSION(..) :: buf + INTEGER, INTENT(IN) :: count + TYPE(MPI_Datatype), INTENT(IN) :: datatype + TYPE(MPI_Status) :: status + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETERS +---------------- +* ``fh``: File handle (handle). +* ``offset``: File offset (integer). +* ``count``: Number of elements in buffer (integer). +* ``datatype``: Data type of each buffer element (handle). + +OUTPUT PARAMETERS +----------------- +* ``buf``: Initial address of buffer (choice). +* ``status``: Status object (status). +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +:ref:`MPI_File_read_at_all` is a collective routine that attempts to read from +the file associated with *fh* (at the *offset* position) a total number +of *count* data items having *datatype* type into the user's buffer +*buf.* The *offset* is in etype units relative to the current view. That +is, holes are not counted when locating an offset. The data is taken out +of those parts of the file specified by the current view. +:ref:`MPI_File_read_at_all` stores the number of *datatype* elements actually +read in *status.* All other fields of *status* are undefined. It is +erroneous to call this function if MPI_MODE_SEQUENTIAL mode was +specified when the file was opened. + + +FORTRAN 77 NOTES +---------------- + +The MPI standard prescribes portable Fortran syntax for the *OFFSET* +argument only for Fortran 90. FORTRAN 77 users may use the non-portable +syntax + +:: + + INTEGER*MPI_OFFSET_KIND OFFSET + +where MPI_OFFSET_KIND is a constant defined in mpif.h and gives the +length of the declared integer in bytes. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. For MPI I/O function errors, the default error handler is set to +MPI_ERRORS_RETURN. The error handler may be changed with +:ref:`MPI_File_set_errhandler`; the predefined error handler +MPI_ERRORS_ARE_FATAL may be used to make I/O errors fatal. Note that MPI +does not guarantee that an MPI program can continue past an error. diff --git a/docs/man-openmpi/man3/MPI_File_read_at_all_begin.3.rst b/docs/man-openmpi/man3/MPI_File_read_at_all_begin.3.rst new file mode 100644 index 00000000000..b6c2cbf7dfe --- /dev/null +++ b/docs/man-openmpi/man3/MPI_File_read_at_all_begin.3.rst @@ -0,0 +1,118 @@ +.. _mpi_file_read_at_all_begin: + + +MPI_File_read_at_all_begin +========================== + +.. include_body + +:ref:`MPI_File_read_at_all_begin` - Reads a file at explicitly specified +offsets; beginning part of a split collective routine (nonblocking). + + +SYNTAX +------ + + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_File_read_at_all_begin(MPI_File fh, MPI_Offset + offset, void *buf, int count, MPI_Datatype + datatype) + + +Fortran Syntax (see FORTRAN 77 NOTES) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_FILE_READ_AT_ALL_BEGIN(FH, OFFSET, BUF, + COUNT, DATATYPE, IERROR) + BUF(*) + INTEGER FH, COUNT, DATATYPE, IERROR + INTEGER(KIND=MPI_OFFSET_KIND) OFFSET + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_File_read_at_all_begin(fh, offset, buf, count, datatype, ierror) + TYPE(MPI_File), INTENT(IN) :: fh + INTEGER(KIND=MPI_OFFSET_KIND), INTENT(IN) :: offset + TYPE(*), DIMENSION(..), ASYNCHRONOUS :: buf + INTEGER, INTENT(IN) :: count + TYPE(MPI_Datatype), INTENT(IN) :: datatype + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETERS +---------------- +* ``fh``: File handle (handle). +* ``offset``: File offset (integer). +* ``count``: Number of elements in buffer (integer). +* ``datatype``: Data type of each buffer element. + +OUTPUT PARAMETERS +----------------- +* ``buf``: Initial address of buffer (choice). +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +:ref:`MPI_File_read_at_all_begin` is the beginning part of a split collective +routine that attempts to read from the file associated with *fh* (at the +*offset* position) a total number of *count* data items having +*datatype* type into the user's buffer *buf.* The *offset* is in etype +units relative to the current view. That is, holes are not counted when +locating an offset. The data is taken out of those parts of the file +specified by the current view. + + +FORTRAN 77 NOTES +---------------- + +The MPI standard prescribes portable Fortran syntax for the *OFFSET* +argument only for Fortran 90. FORTRAN 77 users may use the non-portable +syntax + +:: + + INTEGER*MPI_OFFSET_KIND OFFSET + +where MPI_OFFSET_KIND is a constant defined in mpif.h and gives the +length of the declared integer in bytes. + + +NOTES +----- + +All the nonblocking collective routines for data access are "split" into +two routines, each with \_begin or \_end as a suffix. These split +collective routines are subject to the semantic rules described in +Section 9.4.5 of the MPI-2 standard. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. For MPI I/O function errors, the default error handler is set to +MPI_ERRORS_RETURN. The error handler may be changed with +:ref:`MPI_File_set_errhandler`; the predefined error handler +MPI_ERRORS_ARE_FATAL may be used to make I/O errors fatal. Note that MPI +does not guarantee that an MPI program can continue past an error. diff --git a/docs/man-openmpi/man3/MPI_File_read_at_all_end.3.rst b/docs/man-openmpi/man3/MPI_File_read_at_all_end.3.rst new file mode 100644 index 00000000000..d5ded8f06c5 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_File_read_at_all_end.3.rst @@ -0,0 +1,95 @@ +.. _mpi_file_read_at_all_end: + + +MPI_File_read_at_all_end +======================== + +.. include_body + +:ref:`MPI_File_read_at_all_end` - Reads a file at explicitly specified +offsets; ending part of a split collective routine (blocking). + + +SYNTAX +------ + + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_File_read_at_all_end(MPI_File fh, void *buf, + MPI_Status *status) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_FILE_READ_AT_ALL_END(FH, BUF, STATUS, IERROR) + BUF(*) + INTEGER FH, STATUS(MPI_STATUS_SIZE), IERROR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_File_read_at_all_end(fh, buf, status, ierror) + TYPE(MPI_File), INTENT(IN) :: fh + TYPE(*), DIMENSION(..), ASYNCHRONOUS :: buf + TYPE(MPI_Status) :: status + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETER +--------------- +* ``fh``: File handle (handle). + +OUTPUT PARAMETERS +----------------- +* ``buf``: Initial address of buffer (choice). +* ``status``: Status object (status). +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +:ref:`MPI_File_read_at_all_end` is a split collective routine that stores the +number of elements actually read from the file associated with *fh* in +*status.* :ref:`MPI_File_read_at_all_end` blocks until the operation initiated +by :ref:`MPI_File_read_at_all_begin` completes. The data is taken out of those +parts of the file specified by the current view. All other fields of +*status* are undefined. + + +NOTES +----- + +All the nonblocking collective routines for data access are "split" into +two routines, each with \_begin or \_end as a suffix. These split +collective routines are subject to the semantic rules described in +Section 9.4.5 of the MPI-2 standard. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. For MPI I/O function errors, the default error handler is set to +MPI_ERRORS_RETURN. The error handler may be changed with +:ref:`MPI_File_set_errhandler`; the predefined error handler +MPI_ERRORS_ARE_FATAL may be used to make I/O errors fatal. Note that MPI +does not guarantee that an MPI program can continue past an error. diff --git a/docs/man-openmpi/man3/MPI_File_read_ordered.3.rst b/docs/man-openmpi/man3/MPI_File_read_ordered.3.rst new file mode 100644 index 00000000000..745bb165e79 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_File_read_ordered.3.rst @@ -0,0 +1,97 @@ +.. _mpi_file_read_ordered: + + +MPI_File_read_ordered +===================== + +.. include_body + +:ref:`MPI_File_read_ordered` - Reads a file at a location specified by a +shared file pointer (blocking, collective). + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_File_read_ordered(MPI_File fh, void *buf, + int count, MPI_Datatype datatype, + MPI_Status *status) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_FILE_READ_ORDERED(FH, BUF, COUNT, DATATYPE, + STATUS, IERROR) + BUF(*) + INTEGER FH, COUNT, DATATYPE, STATUS(MPI_STATUS_SIZE), IERROR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_File_read_ordered(fh, buf, count, datatype, status, ierror) + TYPE(MPI_File), INTENT(IN) :: fh + TYPE(*), DIMENSION(..) :: buf + INTEGER, INTENT(IN) :: count + TYPE(MPI_Datatype), INTENT(IN) :: datatype + TYPE(MPI_Status) :: status + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETERS +---------------- +* ``fh``: File handle (handle). +* ``count``: Number of elements in buffer (integer). +* ``datatype``: Data type of each buffer element (handle). + +OUTPUT PARAMETERS +----------------- +* ``buf``: Initial address of buffer (choice). +* ``status``: Status object (Status). +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +:ref:`MPI_File_read_ordered` is a collective routine. This routine must be +called by all processes in the communicator group associated with the +file handle *fh.* Each process may pass different argument values for +the *datatype* and *count* arguments. Each process attempts to read, +from the file associated with *fh,* a total number of *count* data items +having *datatype* type into the user's buffer *buf.* For each process, +the location in the file at which data is read is the position at which +the shared file pointer would be after all processes whose ranks within +the group are less than that of this process had read their data. +:ref:`MPI_File_read_ordered` returns the actual number of *datatype* elements +read in *status.* The shared file pointer is updated by the amounts of +data requested by all processes of the group. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. For MPI I/O function errors, the default error handler is set to +MPI_ERRORS_RETURN. The error handler may be changed with +:ref:`MPI_File_set_errhandler`; the predefined error handler +MPI_ERRORS_ARE_FATAL may be used to make I/O errors fatal. Note that MPI +does not guarantee that an MPI program can continue past an error. diff --git a/docs/man-openmpi/man3/MPI_File_read_ordered_begin.3.rst b/docs/man-openmpi/man3/MPI_File_read_ordered_begin.3.rst new file mode 100644 index 00000000000..097e701933b --- /dev/null +++ b/docs/man-openmpi/man3/MPI_File_read_ordered_begin.3.rst @@ -0,0 +1,105 @@ +.. _mpi_file_read_ordered_begin: + + +MPI_File_read_ordered_begin +=========================== + +.. include_body + +:ref:`MPI_File_read_ordered_begin` - Reads a file at a location specified +by a shared file pointer; beginning part of a split collective routine +(nonblocking). + + +SYNTAX +------ + + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_File_read_ordered_begin(MPI_File fh, void *buf, + int count, MPI_Datatype datatype) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_FILE_READ_ORDERED_BEGIN(FH, BUF, COUNT, DATATYPE, IERROR) + BUF(*) + INTEGER FH, COUNT, DATATYPE, IERROR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_File_read_ordered_begin(fh, buf, count, datatype, ierror) + TYPE(MPI_File), INTENT(IN) :: fh + TYPE(*), DIMENSION(..), ASYNCHRONOUS :: buf + INTEGER, INTENT(IN) :: count + TYPE(MPI_Datatype), INTENT(IN) :: datatype + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT/OUTPUT PARAMETER +---------------------- +* ``fh``: File handle (handle). + +INPUT PARAMETERS +---------------- +* ``count``: Number of elements in buffer (integer). +* ``datatype``: Data type of each buffer element (handle). + +OUTPUT PARAMETERS +----------------- +* ``buf``: Initial address of buffer (choice). +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +:ref:`MPI_File_read_ordered_begin` is the beginning part of a split collective, +nonblocking routine that must be called by all processes in the +communicator group associated with the file handle *fh.* Each process +may pass different argument values for the *datatype* and *count* +arguments. Each process attempts to read, from the file associated with +*fh,* a total number of *count* data items having *datatype* type into +the user's buffer *buf.* For each process, the location in the file at +which data is read is the position at which the shared file pointer +would be after all processes whose ranks within the group are less than +that of this process had read their data. + + +NOTES +----- + +All the nonblocking collective routines for data access are "split" into +two routines, each with \_begin or \_end as a suffix. These split +collective routines are subject to the semantic rules described in +Section 9.4.5 of the MPI-2 standard. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. For MPI I/O function errors, the default error handler is set to +MPI_ERRORS_RETURN. The error handler may be changed with +:ref:`MPI_File_set_errhandler`; the predefined error handler +MPI_ERRORS_ARE_FATAL may be used to make I/O errors fatal. Note that MPI +does not guarantee that an MPI program can continue past an error. diff --git a/docs/man-openmpi/man3/MPI_File_read_ordered_end.3.rst b/docs/man-openmpi/man3/MPI_File_read_ordered_end.3.rst new file mode 100644 index 00000000000..1713fc9b72e --- /dev/null +++ b/docs/man-openmpi/man3/MPI_File_read_ordered_end.3.rst @@ -0,0 +1,100 @@ +.. _mpi_file_read_ordered_end: + + +MPI_File_read_ordered_end +========================= + +.. include_body + +:ref:`MPI_File_read_ordered_end` - Reads a file at a location specified by +a shared file pointer; ending part of a split collective routine +(blocking). + + +SYNTAX +------ + + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_File_read_ordered_end(MPI_File fh, void *buf, + MPI_Status *status) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_FILE_READ_ORDERED_END(FH, BUF, STATUS, IERROR) + BUF(*) + INTEGER FH, STATUS(MPI_STATUS_SIZE), IERROR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_File_read_ordered_end(fh, buf, status, ierror) + TYPE(MPI_File), INTENT(IN) :: fh + TYPE(*), DIMENSION(..), ASYNCHRONOUS :: buf + TYPE(MPI_Status) :: status + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT/OUTPUT PARAMETER +---------------------- +* ``fh``: File handle (handle). + +OUTPUT PARAMETERS +----------------- +* ``buf``: Initial address of buffer (choice). +* ``status``: Status object (status). +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +:ref:`MPI_File_read_ordered_end` is the ending part of a split collective +routine that must be called by all processes in the communicator group +associated with the file handle *fh.* MPI_File_rad_ordered_end blocks +until the operation initiated by :ref:`MPI_File_read_ordered_begin` completes. +It attempts to read the file associated with *fh* into the user's buffer +*buf.* The shared file pointer is updated by the amounts of data +requested by all processes of the group. For each process, the location +in the file at which data is read is the position at which the shared +file pointer would be after all processes whose ranks within the group +are less than that of this process had read their data. + + +NOTES +----- + +All the nonblocking collective routines for data access are "split" into +two routines, each with \_begin or \_end as a suffix. These split +collective routines are subject to the semantic rules described in +Section 9.4.5 of the MPI-2 standard. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. For MPI I/O function errors, the default error handler is set to +MPI_ERRORS_RETURN. The error handler may be changed with +:ref:`MPI_File_set_errhandler`; the predefined error handler +MPI_ERRORS_ARE_FATAL may be used to make I/O errors fatal. Note that MPI +does not guarantee that an MPI program can continue past an error. diff --git a/docs/man-openmpi/man3/MPI_File_read_shared.3.rst b/docs/man-openmpi/man3/MPI_File_read_shared.3.rst new file mode 100644 index 00000000000..3f74db534d4 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_File_read_shared.3.rst @@ -0,0 +1,91 @@ +.. _mpi_file_read_shared: + + +MPI_File_read_shared +==================== + +.. include_body + +:ref:`MPI_File_read_shared` - Reads a file using the shared file pointer +(blocking, noncollective). + + +SYNTAX +------ + + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_File_read_shared(MPI_File fh, void *buf, int count, + MPI_Datatype datatype, MPI_Status *status) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_FILE_READ_SHARED(FH, BUF, COUNT, DATATYPE, STATUS, + IERROR) + BUF(*) + INTEGER FH, COUNT, DATATYPE,STATUS(MPI_STATUS_SIZE), IERROR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_File_read_shared(fh, buf, count, datatype, status, ierror) + TYPE(MPI_File), INTENT(IN) :: fh + TYPE(*), DIMENSION(..) :: buf + INTEGER, INTENT(IN) :: count + TYPE(MPI_Datatype), INTENT(IN) :: datatype + TYPE(MPI_Status) :: status + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT/OUTPUT PARAMETER +---------------------- +* ``fh``: File handle (handle). + +INPUT PARAMETERS +---------------- +* ``count``: Number of elements in buffer (integer) +* ``datatype``: Data type of each buffer element (handle). + +OUTPUT PARAMETERS +----------------- +* ``buf``: Initial address of buffer (choice). +* ``status``: Status object (status). +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +:ref:`MPI_File_read_shared` is a blocking routine that uses the shared file +pointer to read files. The order of serialization is not deterministic +for this noncollective routine. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. For MPI I/O function errors, the default error handler is set to +MPI_ERRORS_RETURN. The error handler may be changed with +:ref:`MPI_File_set_errhandler`; the predefined error handler +MPI_ERRORS_ARE_FATAL may be used to make I/O errors fatal. Note that MPI +does not guarantee that an MPI program can continue past an error. diff --git a/docs/man-openmpi/man3/MPI_File_seek.3.rst b/docs/man-openmpi/man3/MPI_File_seek.3.rst new file mode 100644 index 00000000000..eaf3605adee --- /dev/null +++ b/docs/man-openmpi/man3/MPI_File_seek.3.rst @@ -0,0 +1,108 @@ +.. _mpi_file_seek: + + +MPI_File_seek +============= + +.. include_body + +:ref:`MPI_File_seek` - Updates individual file pointers (noncollective). + + +SYNTAX +------ + + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_File_seek(MPI_File fh, MPI_Offset offset, + int whence) + + +Fortran Syntax (see FORTRAN 77 NOTES) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_FILE_SEEK(FH, OFFSET, WHENCE, IERROR) + INTEGER FH, WHENCE, IERROR + INTEGER(KIND=MPI_OFFSET_KIND) OFFSET + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_File_seek(fh, offset, whence, ierror) + TYPE(MPI_File), INTENT(IN) :: fh + INTEGER(KIND=MPI_OFFSET_KIND), INTENT(IN) :: offset + INTEGER, INTENT(IN) :: whence + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETERS +---------------- +* ``fh``: File handle (handle). +* ``offset``: File offset (integer). +* ``whence``: Update mode (integer). + +OUTPUT PARAMETER +---------------- +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +:ref:`MPI_File_seek` updates the individual file pointer according to *whence,* +which could have the following possible values: + + o MPI_SEEK_SET - The pointer is set to *offset.* + + o MPI_SEEK_CUR - The pointer is set to the current pointer position plus *offset.* + + o MPI_SEEK_END - The pointer is set to the end of the file plus *offset.* + +The *offset* can be negative, which allows seeking backwards. It is +erroneous to seek to a negative position in the file. The end of the +file is defined to be the location of the next elementary data item +immediately after the last accessed data item, even if that location is +a hole. + + +FORTRAN 77 NOTES +---------------- + +The MPI standard prescribes portable Fortran syntax for the *OFFSET* +argument only for Fortran 90. FORTRAN 77 users may use the non-portable +syntax + +:: + + INTEGER*MPI_OFFSET_KIND OFFSET + +where MPI_OFFSET_KIND is a constant defined in mpif.h and gives the +length of the declared integer in bytes. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. For MPI I/O function errors, the default error handler is set to +MPI_ERRORS_RETURN. The error handler may be changed with +:ref:`MPI_File_set_errhandler`; the predefined error handler +MPI_ERRORS_ARE_FATAL may be used to make I/O errors fatal. Note that MPI +does not guarantee that an MPI program can continue past an error. diff --git a/docs/man-openmpi/man3/MPI_File_seek_shared.3.rst b/docs/man-openmpi/man3/MPI_File_seek_shared.3.rst new file mode 100644 index 00000000000..24d1ca916a0 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_File_seek_shared.3.rst @@ -0,0 +1,115 @@ +.. _mpi_file_seek_shared: + + +MPI_File_seek_shared +==================== + +.. include_body + +:ref:`MPI_File_seek_shared` - Updates the global shared file pointer +(collective). + + +SYNTAX +------ + + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_File_seek_shared(MPI_File fh, MPI_Offset offset, + int whence) + + +Fortran Syntax (see FORTRAN 77 NOTES) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_FILE_SEEK_SHARED(FH, OFFSET, WHENCE, IERROR) + INTEGER FH, WHENCE, IERROR + INTEGER(KIND=MPI_OFFSET_KIND) OFFSET + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_File_seek_shared(fh, offset, whence, ierror) + TYPE(MPI_File), INTENT(IN) :: fh + INTEGER(KIND=MPI_OFFSET_KIND), INTENT(IN) :: offset + INTEGER, INTENT(IN) :: whence + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETERS +---------------- +* ``fh``: File handle (handle). +* ``offset``: File offset (integer). +* ``whence``: Update mode (integer). + +OUTPUT PARAMETER +---------------- +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +:ref:`MPI_File_seek_shared` updates the shared file pointer according to +*whence,* which could have the following possible values: + + o MPI_SEEK_SET - The pointer is set to *offset.* + + o MPI_SEEK_CUR - The pointer is set to the current pointer position plus *offset.* + + o MPI_SEEK_END - The pointer is set to the end of the file plus *offset.* + +:ref:`MPI_File_seek_shared` is collective; all the processes in the +communicator group associated with the file handle *fh* must call +:ref:`MPI_File_seek_shared` with the same *offset* and *whence.* All processes +in the communicator group are synchronized before the shared file +pointer is updated. + +The *offset* can be negative, which allows seeking backwards. It is +erroneous to seek to a negative position in the view. The end of the +view is defined to be the position of the next elementary data item, +relative to the current view, following the last whole elementary data +item accessible. + + +FORTRAN 77 NOTES +---------------- + +The MPI standard prescribes portable Fortran syntax for the *OFFSET* +argument only for Fortran 90. FORTRAN 77 users may use the non-portable +syntax + +:: + + INTEGER*MPI_OFFSET_KIND OFFSET + +where MPI_OFFSET_KIND is a constant defined in mpif.h and gives the +length of the declared integer in bytes. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. For MPI I/O function errors, the default error handler is set to +MPI_ERRORS_RETURN. The error handler may be changed with +:ref:`MPI_File_set_errhandler`; the predefined error handler +MPI_ERRORS_ARE_FATAL may be used to make I/O errors fatal. Note that MPI +does not guarantee that an MPI program can continue past an error. diff --git a/docs/man-openmpi/man3/MPI_File_set_atomicity.3.rst b/docs/man-openmpi/man3/MPI_File_set_atomicity.3.rst new file mode 100644 index 00000000000..74b8d652a06 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_File_set_atomicity.3.rst @@ -0,0 +1,85 @@ +.. _mpi_file_set_atomicity: + + +MPI_File_set_atomicity +====================== + +.. include_body + +:ref:`MPI_File_set_atomicity` - Sets consistency semantics for data-access +operations (collective). + + +SYNTAX +------ + + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_File_set_atomicity(MPI_File fh, int flag) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_FILE_SET_ATOMICITY(FH, FLAG, IERROR) + INTEGER FH, FLAG, IERROR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_File_set_atomicity(fh, flag, ierror) + TYPE(MPI_File), INTENT(IN) :: fh + LOGICAL, INTENT(IN) :: flag + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETERS +---------------- +* ``fh``: File handle (handle). +* ``flag``: **true** to enable atomic mode, **false** to enable nonatomic mode (boolean). + +OUTPUT PARAMETER +---------------- +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +The consistency semantics for data-access operations using the set of +file handles created by one collective :ref:`MPI_File_open` is set by +collectively calling :ref:`MPI_File_set_atomicity`. All processes in the group +must pass identical values for *fh* and *flag.* If *flag* is *true,* +atomic mode is set; if *flag* is *false,* nonatomic mode is set. + +The default value on a call to :ref:`MPI_File_open` in Open MPI is *true* for +jobs running on more than one node, *false* for jobs running on a single +SMP. For more information, see the MPI-2 standard. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. For MPI I/O function errors, the default error handler is set to +MPI_ERRORS_RETURN. The error handler may be changed with +:ref:`MPI_File_set_errhandler`; the predefined error handler +MPI_ERRORS_ARE_FATAL may be used to make I/O errors fatal. Note that MPI +does not guarantee that an MPI program can continue past an error. diff --git a/docs/man-openmpi/man3/MPI_File_set_errhandler.3.rst b/docs/man-openmpi/man3/MPI_File_set_errhandler.3.rst new file mode 100644 index 00000000000..4a20f34ad36 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_File_set_errhandler.3.rst @@ -0,0 +1,81 @@ +.. _mpi_file_set_errhandler: + + +MPI_File_set_errhandler +======================= + +.. include_body + +:ref:`MPI_File_set_errhandler` - Sets the error handler for a file. + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_File_set_errhandler(MPI_File file, MPI_Errhandler + errhandler) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_FILE_SET_ERRHANDLER(FILE, ERRHANDLER, IERROR) + INTEGER FILE, ERRHANDLER, IERROR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_File_set_errhandler(file, errhandler, ierror) + TYPE(MPI_File), INTENT(IN) :: file + TYPE(MPI_Errhandler), INTENT(IN) :: errhandler + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT/OUTPUT PARAMETER +---------------------- +* ``file``: File (handle). + +INPUT PARAMETER +--------------- +* ``errhandler``: New error handler for file (handle). + +OUTPUT PARAMETER +---------------- +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +Attaches a new error handler to a file. The error handler must be either +a predefined error handler or an error handler created by a call to +:ref:`MPI_File_create_errhandler`. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. For MPI I/O function errors, the default error handler is set to +MPI_ERRORS_RETURN. The error handler may be changed with +:ref:`MPI_File_set_errhandler`; the predefined error handler +MPI_ERRORS_ARE_FATAL may be used to make I/O errors fatal. Note that MPI +does not guarantee that an MPI program can continue past an error. diff --git a/docs/man-openmpi/man3/MPI_File_set_info.3.rst b/docs/man-openmpi/man3/MPI_File_set_info.3.rst new file mode 100644 index 00000000000..d532ba20b99 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_File_set_info.3.rst @@ -0,0 +1,151 @@ +.. _mpi_file_set_info: + + +MPI_File_set_info +================= + +.. include_body + +:ref:`MPI_File_set_info` - Sets new values for hints (collective). + + +SYNTAX +------ + + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_File_set_info(MPI_File fh, MPI_Info info) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_FILE_SET_INFO(FH, INFO, IERROR) + INTEGER FH, INFO, IERROR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_File_set_info(fh, info, ierror) + TYPE(MPI_File), INTENT(IN) :: fh + TYPE(MPI_Info), INTENT(IN) :: info + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT/OUTPUT PARAMETER +---------------------- +* ``fh``: File handle (handle). + +INPUT PARAMETER +--------------- +* ``info``: Info object (handle). + +OUTPUT PARAMETER +---------------- +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +:ref:`MPI_File_set_info` is a collective routine that sets new values +for the hints of the file associated with *fh*. These hints are set +for each file, using the :ref:`MPI_File_open`, :ref:`MPI_File_delete`, +:ref:`MPI_File_set_view`, and :ref:`MPI_File_set_info` routines. The +opaque *info* object, which allows you to provide hints for +optimization of your code, may be different on each process, but some +*info* entries are required to be the same on all processes: In these +cases, they must appear with the same value in each process's info +object. See the :ref:`HINTS section ` +for a list of hints that can be set. + +.. _man-openmpi-mpi-file-set-info: + +HINTS +----- + +The following hints can be used as values for the *info* argument. + +**SETTABLE HINTS** + +* ``shared_file_timeout``: Amount of time (in seconds) to wait for + access to the shared file pointer before exiting with + ``MPI_ERR_TIMEDOUT``. + +* ``rwlock_timeout``: Amount of time (in seconds) to wait for + obtaining a read or write lock on a contiguous chunk of a UNIX file + before exiting with ``MPI_ERR_TIMEDOUT``. + +* ``noncoll_read_bufsize``: Maximum size of the buffer used by MPI I/O + to satisfy read requests in the noncollective data-access + routines. + + .. note:: A buffer size smaller than the distance (in bytes) in a + UNIX file between the first byte and the last byte of the + access request causes MPI I/O to iterate and perform + multiple UNIX ``read()`` or ``write()`` calls. If the request + includes multiple noncontiguous chunks of data, and the + buffer size is greater than the size of those chunks, then + the UNIX ``read()`` or ``write()`` (made at the MPI I/O level) + will access data not requested by this process in order to + reduce the total number of ``write()`` calls made. If this is + not desirable behavior, you should reduce this buffer size + to equal the size of the contiguous chunks within the + aggregate request. + +* ``noncoll_write_bufsize``: Maximum size of the buffer used by MPI + I/O to satisfy write requests in the noncollective data-access + routines. + + See the above note in ``noncoll_read_bufsize``. + +* ``coll_read_bufsize``: Maximum size of the buffer used by MPI I/O to + satisfy read requests in the collective data-access routines. + + See the above note in ``noncoll_read_bufsize``. + +* ``coll_write_bufsize``: Maximum size of the buffer used by MPI I/O + to satisfy write requests in the collective data-access + routines. + + See the above note in ``noncoll_read_bufsize``. + +* ``mpiio_concurrency``: (boolean) controls whether nonblocking I/O + routines can bind an extra thread to an LWP. + +* ``mpiio_coll_contiguous``: (boolean) controls whether subsequent + collective data accesses will request collectively contiguous + regions of the file. + + + **NON-SETTABLE HINTS** + +* ``filename``: Access this hint to get the name of the file. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. For MPI I/O function errors, the default error handler is set to +MPI_ERRORS_RETURN. The error handler may be changed with +:ref:`MPI_File_set_errhandler`; the predefined error handler +MPI_ERRORS_ARE_FATAL may be used to make I/O errors fatal. Note that MPI +does not guarantee that an MPI program can continue past an error. diff --git a/docs/man-openmpi/man3/MPI_File_set_size.3.rst b/docs/man-openmpi/man3/MPI_File_set_size.3.rst new file mode 100644 index 00000000000..2642898dc34 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_File_set_size.3.rst @@ -0,0 +1,110 @@ +.. _mpi_file_set_size: + + +MPI_File_set_size +================= + +.. include_body + +:ref:`MPI_File_set_size` - Resizes a file (collective). + + +SYNTAX +------ + + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_File_set_size(MPI_File fh, MPI_Offset size) + + +Fortran Syntax (see FORTRAN 77 NOTES) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_FILE_SET_SIZE(FH, SIZE, IERROR) + INTEGER FH, IERROR + INTEGER(KIND=MPI_OFFSET_KIND) SIZE + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_File_set_size(fh, size, ierror) + TYPE(MPI_File), INTENT(IN) :: fh + INTEGER(KIND=MPI_OFFSET_KIND), INTENT(IN) :: size + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETERS +---------------- +* ``fh``: File handle (handle). +* ``size``: Size to truncate or expand file (integer). + +OUTPUT PARAMETER +---------------- +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +:ref:`MPI_File_set_size` resizes the file associated with the file handle *fh,* +truncating UNIX files as necessary. :ref:`MPI_File_set_size` is collective; all +processes in the group must pass identical values for size. + +When using :ref:`MPI_File_set_size` on a UNIX file, if *size* is larger than +the current file size, the file size becomes *size*. If *size* is +smaller than the current file size, the file is truncated at the +position defined by *size* (from the beginning of the file and measured +in bytes). Regions of the file which have been previously written are +unaffected. + +:ref:`MPI_File_set_size` does not affect the individual file pointers or the +shared file pointer. + +Note that the actual amount of storage space cannot be allocated by +:ref:`MPI_File_set_size`. Use :ref:`MPI_File_preallocate` to accomplish this. + +It is erroneous to call this function if MPI_MODE_SEQUENTIAL mode was +specified when the file was opened. + + +FORTRAN 77 NOTES +---------------- + +The MPI standard prescribes portable Fortran syntax for the *SIZE* +argument only for Fortran 90. FORTRAN 77 users may use the non-portable +syntax + +:: + + INTEGER*MPI_OFFSET_KIND SIZE + +where MPI_OFFSET_KIND is a constant defined in mpif.h and gives the +length of the declared integer in bytes. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. For MPI I/O function errors, the default error handler is set to +MPI_ERRORS_RETURN. The error handler may be changed with +:ref:`MPI_File_set_errhandler`; the predefined error handler +MPI_ERRORS_ARE_FATAL may be used to make I/O errors fatal. Note that MPI +does not guarantee that an MPI program can continue past an error. diff --git a/docs/man-openmpi/man3/MPI_File_set_view.3.rst b/docs/man-openmpi/man3/MPI_File_set_view.3.rst new file mode 100644 index 00000000000..b2ff3e67008 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_File_set_view.3.rst @@ -0,0 +1,191 @@ +.. _mpi_file_set_view: + + +MPI_File_set_view +================= + +.. include_body + +:ref:`MPI_File_set_view` - Changes process's view of data in file +(collective). + + +SYNTAX +------ + + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_File_set_view(MPI_File fh, MPI_Offset disp, + MPI_Datatype etype, MPI_Datatype filetype, + const char *datarep, MPI_Info info) + + +Fortran Syntax (see FORTRAN 77 NOTES) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_FILE_SET_VIEW(FH, DISP, ETYPE, + FILETYPE, DATAREP, INFO, IERROR) + INTEGER FH, ETYPE, FILETYPE, INFO, IERROR + CHARACTER*(*) DATAREP + INTEGER(KIND=MPI_OFFSET_KIND) DISP + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_File_set_view(fh, disp, etype, filetype, datarep, info, ierror) + TYPE(MPI_File), INTENT(IN) :: fh + INTEGER(KIND=MPI_OFFSET_KIND), INTENT(IN) :: disp + TYPE(MPI_Datatype), INTENT(IN) :: etype, filetype + CHARACTER(LEN=*), INTENT(IN) :: datarep + TYPE(MPI_Info), INTENT(IN) :: info + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT/OUTPUT PARAMETER +---------------------- +* ``fh``: File handle (handle). + +INPUT PARAMETERS +---------------- +* ``disp``: Displacement (integer). +* ``etype``: Elementary data type (handle). +* ``filetype``: File type (handle). See Restrictions, below. +* ``datarep``: Data representation (string). +* ``info``: Info object (handle). + +OUTPUT PARAMETER +---------------- +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +The :ref:`MPI_File_set_view` routine changes the process's view of the data in +the file -- the beginning of the data accessible in the file through +that view is set to *disp;* the type of data is set to *etype;* and the +distribution of data to processes is set to *filetype.* In addition, +:ref:`MPI_File_set_view` resets the independent file pointers and the shared +file pointer to zero. :ref:`MPI_File_set_view` is collective across the *fh*; +all processes in the group must pass identical values for *datarep* and +provide an *etype* with an identical extent. The values for *disp*, +*filetype*, and *info* may vary. It is erroneous to use the shared file +pointer data-access routines unless identical values for *disp* and +*filetype* are also given. The data types passed in *etype* and +*filetype* must be committed. + +The *disp* displacement argument specifies the position (absolute offset +in bytes from the beginning of the file) where the view begins. + +The :ref:`MPI_File_set_view` interface allows the user to pass a +data-representation string to MPI I/O via the *datarep* argument. To +obtain the default value pass the value "native". The user can also +pass information via the *info* argument. See the :ref:`HINTS section +` for a list of hints that can be +set. + +.. _man-openmpi-mpi-file-set-view: + +HINTS +----- + +The following hints can be used as values for the *info* argument. + +**SETTABLE HINTS** + +* ``MPI_INFO_NULL`` + +* ``shared_file_timeout``: Amount of time (in seconds) to wait for + access to the shared file pointer before exiting with + ``MPI_ERR_TIMEDOUT``. + +* ``rwlock_timeout``: Amount of time (in seconds) to wait for + obtaining a read or write lock on a contiguous chunk of a UNIX file + before exiting with ``MPI_ERR_TIMEDOUT``. + +* ``noncoll_read_bufsize``: Maximum size of the buffer used by MPI I/O + to satisfy read requests in the noncollective data-access routines. + + .. note:: A buffer size smaller than the distance (in bytes) in a + UNIX file between the first byte and the last byte of the + access request causes MPI I/O to iterate and perform + multiple UNIX ``read()`` or ``write()`` calls. If the + request includes multiple noncontiguous chunks of data, + and the buffer size is greater than the size of those + chunks, then the UNIX ``read()`` or ``write()`` (made at + the MPI I/O level) will access data not requested by this + process in order to reduce the total number of ``write()`` + calls made. If this is not desirable behavior, you should + reduce this buffer size to equal the size of the + contiguous chunks within the aggregate request. + +* ``noncoll_write_bufsize``: Maximum size of the buffer used by MPI + I/O to satisfy write requests in the noncollective data-access + routines. + + See the above note in ``noncoll_read_bufsize``. + +* ``coll_read_bufsize``: Maximum size of the buffer used by MPI I/O to + satisfy read requests in the collective data-access routines. + + See the above note in ``noncoll_read_bufsize``. + +* ``coll_write_bufsize``: Maximum size of the buffer used by MPI I/O + to satisfy write requests in the collective data-access routines. + + See the above note in ``noncoll_read_bufsize``. + +* ``mpiio_concurrency``: (boolean) controls whether nonblocking I/O + routines can bind an extra thread to an LWP. + +* ``mpiio_coll_contiguous``: (boolean) controls whether subsequent + collective data accesses will request collectively contiguous + regions of the file. + +**NON-SETTABLE HINTS** + +* ``filename``: Access this hint to get the name of the file. + + + +FORTRAN 77 NOTES +---------------- + +The MPI standard prescribes portable Fortran syntax for the *DISP* +argument only for Fortran 90. FORTRAN 77 users may use the non-portable +syntax + +:: + + INTEGER*MPI_OFFSET_KIND DISP + +where MPI_OFFSET_KIND is a constant defined in mpif.h and gives the +length of the declared integer in bytes. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. For MPI I/O function errors, the default error handler is set to +MPI_ERRORS_RETURN. The error handler may be changed with +:ref:`MPI_File_set_errhandler`; the predefined error handler +MPI_ERRORS_ARE_FATAL may be used to make I/O errors fatal. Note that MPI +does not guarantee that an MPI program can continue past an error. diff --git a/docs/man-openmpi/man3/MPI_File_sync.3.rst b/docs/man-openmpi/man3/MPI_File_sync.3.rst new file mode 100644 index 00000000000..dcca6543919 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_File_sync.3.rst @@ -0,0 +1,83 @@ +.. _mpi_file_sync: + + +MPI_File_sync +============= + +.. include_body + +:ref:`MPI_File_sync` - Makes semantics consistent for data-access +operations (collective). + + +SYNTAX +------ + + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_File_sync(MPI_File fh) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_FILE_SYNC(FH, IERROR) + INTEGER FH, IERROR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_File_sync(fh, ierror) + TYPE(MPI_File), INTENT(IN) :: fh + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETER +--------------- +* ``fh``: File handle (handle). + +OUTPUT PARAMETER +---------------- +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +Calling :ref:`MPI_File_sync` with *fh* causes all previous writes to *fh* by +the calling process to be written to permanent storage. If other +processes have made updates to permanent storage, then all such updates +become visible to subsequent reads of *fh* by the calling process. + +:ref:`MPI_File_sync` is a collective operation. The user is responsible for +ensuring that all nonblocking requests on *fh* have been completed +before calling :ref:`MPI_File_sync`. Otherwise, the call to :ref:`MPI_File_sync` is +erroneous. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. For MPI I/O function errors, the default error handler is set to +MPI_ERRORS_RETURN. The error handler may be changed with +:ref:`MPI_File_set_errhandler`; the predefined error handler +MPI_ERRORS_ARE_FATAL may be used to make I/O errors fatal. Note that MPI +does not guarantee that an MPI program can continue past an error. diff --git a/docs/man-openmpi/man3/MPI_File_write.3.rst b/docs/man-openmpi/man3/MPI_File_write.3.rst new file mode 100644 index 00000000000..81775780f04 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_File_write.3.rst @@ -0,0 +1,99 @@ +.. _mpi_file_write: + + +MPI_File_write +============== + +.. include_body + +:ref:`MPI_File_write` - Writes a file starting at the location specified by +the individual file pointer (blocking, noncollective). + + +SYNTAX +------ + + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_File_write(MPI_File fh, const void *buf, + int count, MPI_Datatype datatype, + MPI_Status *status) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_FILE_WRITE(FH, BUF, COUNT, + DATATYPE, STATUS, IERROR) + BUF(*) + INTEGER FH, COUNT, DATATYPE, STATUS(MPI_STATUS_SIZE), IERROR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_File_write(fh, buf, count, datatype, status, ierror) + TYPE(MPI_File), INTENT(IN) :: fh + TYPE(*), DIMENSION(..), INTENT(IN) :: buf + INTEGER, INTENT(IN) :: count + TYPE(MPI_Datatype), INTENT(IN) :: datatype + TYPE(MPI_Status) :: status + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT/OUTPUT PARAMETER +---------------------- +* ``fh``: File handle (handle). + +INPUT PARAMETERS +---------------- +* ``buf``: Initial address of buffer (choice). +* ``count``: Number of elements in buffer (integer). +* ``datatype``: Data type of each buffer element (handle). + +OUTPUT PARAMETERS +----------------- +* ``status``: Status object (status). +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +:ref:`MPI_File_write` attempts to write into the file associated with *fh* (at +the current individual file pointer position maintained by the system) a +total number of *count* data items having *datatype* type from the +user's buffer *buf.* The data is written into those parts of the file +specified by the current view. :ref:`MPI_File_write` stores the number of +*datatype* elements actually written in *status.* All other fields of +*status* are undefined. + +It is erroneous to call this function if MPI_MODE_SEQUENTIAL mode was +specified when the file was opened. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. For MPI I/O function errors, the default error handler is set to +MPI_ERRORS_RETURN. The error handler may be changed with +:ref:`MPI_File_set_errhandler`; the predefined error handler +MPI_ERRORS_ARE_FATAL may be used to make I/O errors fatal. Note that MPI +does not guarantee that an MPI program can continue past an error. diff --git a/docs/man-openmpi/man3/MPI_File_write_all.3.rst b/docs/man-openmpi/man3/MPI_File_write_all.3.rst new file mode 100644 index 00000000000..806eb29e5c6 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_File_write_all.3.rst @@ -0,0 +1,95 @@ +.. _mpi_file_write_all: + + +MPI_File_write_all +================== + +.. include_body + +:ref:`MPI_File_write_all` - Writes a file starting at the locations +specified by individual file pointers (blocking, collective). + + +SYNTAX +------ + + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_File_write_all(MPI_File fh, const void *buf, + int count, MPI_Datatype datatype, MPI_Status *status) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_FILE_WRITE_ALL(FH, BUF, COUNT, + DATATYPE, STATUS, IERROR) + BUF(*) + INTEGER FH, COUNT, DATATYPE, STATUS(MPI_STATUS_SIZE), IERROR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_File_write_all(fh, buf, count, datatype, status, ierror) + TYPE(MPI_File), INTENT(IN) :: fh + TYPE(*), DIMENSION(..), INTENT(IN) :: buf + INTEGER, INTENT(IN) :: count + TYPE(MPI_Datatype), INTENT(IN) :: datatype + TYPE(MPI_Status) :: status + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETERS +---------------- +* ``fh``: File handle (handle). +* ``buf``: Initial address of buffer (choice). +* ``count``: Number of elements in buffer (integer). +* ``datatype``: Data type of each buffer element (handle). + +OUTPUT PARAMETERS +----------------- +* ``status``: Status object (status). +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +:ref:`MPI_File_write_all` is a collective routine that attempts to write into +the file associated with *fh* (at the current individual file pointer +position maintained by the system) a total number of *count* data items +having *datatype* type from the user's buffer *buf.* The data is written +into those parts of the file specified by the current view. +:ref:`MPI_File_write_all` stores the number of *datatype* elements actually +written in *status.* All other fields of *status* are undefined. + +It is erroneous to call this function if MPI_MODE_SEQUENTIAL mode was +specified when the file was opened. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. For MPI I/O function errors, the default error handler is set to +MPI_ERRORS_RETURN. The error handler may be changed with +:ref:`MPI_File_set_errhandler`; the predefined error handler +MPI_ERRORS_ARE_FATAL may be used to make I/O errors fatal. Note that MPI +does not guarantee that an MPI program can continue past an error. diff --git a/docs/man-openmpi/man3/MPI_File_write_all_begin.3.rst b/docs/man-openmpi/man3/MPI_File_write_all_begin.3.rst new file mode 100644 index 00000000000..d7595b867ef --- /dev/null +++ b/docs/man-openmpi/man3/MPI_File_write_all_begin.3.rst @@ -0,0 +1,101 @@ +.. _mpi_file_write_all_begin: + + +MPI_File_write_all_begin +======================== + +.. include_body + +:ref:`MPI_File_write_all_begin` - Writes a file starting at the locations +specified by individual file pointers; beginning part of a split +collective routine (nonblocking). + + +SYNTAX +------ + + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_File_write_all_begin(MPI_File fh, const void *buf, + int count, MPI_Datatype datatype) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_FILE_WRITE_ALL_BEGIN(FH, BUF, COUNT, DATATYPE, IERROR) + BUF(*) + INTEGER FH, COUNT, DATATYPE, IERROR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_File_write_all_begin(fh, buf, count, datatype, ierror) + TYPE(MPI_File), INTENT(IN) :: fh + TYPE(*), DIMENSION(..), INTENT(IN), ASYNCHRONOUS :: buf + INTEGER, INTENT(IN) :: count + TYPE(MPI_Datatype), INTENT(IN) :: datatype + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT/OUTPUT PARAMETER +---------------------- +* ``fh``: File handle (handle). + +INPUT PARAMETERS +---------------- +* ``buf``: Initial address of buffer (choice). +* ``count``: Number of elements in buffer (integer). +* ``datatype``: Data type of each buffer element (handle). + +OUTPUT PARAMETER +---------------- +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +:ref:`MPI_File_write_all_begin` is the beginning part of a split collective, +nonblocking routine that attempts to write into the file associated with +*fh* (at the current individual file pointer position maintained by the +system) a total number of *count* data items having *datatype* type from +the user's buffer *buf.* The data is written into those parts of the +file specified by the current view. + + +NOTES +----- + +All the nonblocking collective routines for data access are "split" into +two routines, each with \_begin or \_end as a suffix. These split +collective routines are subject to the semantic rules described in +Section 9.4.5 of the MPI-2 standard. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. For MPI I/O function errors, the default error handler is set to +MPI_ERRORS_RETURN. The error handler may be changed with +:ref:`MPI_File_set_errhandler`; the predefined error handler +MPI_ERRORS_ARE_FATAL may be used to make I/O errors fatal. Note that MPI +does not guarantee that an MPI program can continue past an error. diff --git a/docs/man-openmpi/man3/MPI_File_write_all_end.3.rst b/docs/man-openmpi/man3/MPI_File_write_all_end.3.rst new file mode 100644 index 00000000000..d0126a55364 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_File_write_all_end.3.rst @@ -0,0 +1,99 @@ +.. _mpi_file_write_all_end: + + +MPI_File_write_all_end +====================== + +.. include_body + +:ref:`MPI_File_write_all_end` - Writes a file starting at the locations +specified by individual file pointers; ending part of a split collective +routine (blocking). + + +SYNTAX +------ + + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_File_write_all_end(MPI_File fh, const void *buf, MPI_Status *status) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_FILE_WRITE_ALL_END(FH, BUF, STATUS, IERROR) + BUF(*) + INTEGER FH, STATUS, IERROR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_File_write_all_end(fh, buf, status, ierror) + TYPE(MPI_File), INTENT(IN) :: fh + TYPE(*), DIMENSION(..), INTENT(IN), ASYNCHRONOUS :: buf + TYPE(MPI_Status) :: status + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT/OUTPUT PARAMETER +---------------------- +* ``fh``: File handle (handle). + +INPUT PARAMETER +--------------- +* ``buf``: Initial address of buffer (choice). + +OUTPUT PARAMETERS +----------------- +* ``status``: Status object (status). +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +:ref:`MPI_File_write_all_end` is the ending part of a split collective routine +that stores the number of elements actually written into the file +associated with *fh* from the user's buffer *buf* in *status.* +:ref:`MPI_File_write_all_end` blocks until the operation initiated by +:ref:`MPI_File_write_all_begin` completes. The data is written into those parts +of the file specified by the current view. All other fields of *status* +are undefined. + + +NOTES +----- + +All the nonblocking collective routines for data access are "split" into +two routines, each with \_begin or \_end as a suffix. These split +collective routines are subject to the semantic rules described in +Section 9.4.5 of the MPI-2 standard. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. For MPI I/O function errors, the default error handler is set to +MPI_ERRORS_RETURN. The error handler may be changed with +:ref:`MPI_File_set_errhandler`; the predefined error handler +MPI_ERRORS_ARE_FATAL may be used to make I/O errors fatal. Note that MPI +does not guarantee that an MPI program can continue past an error. diff --git a/docs/man-openmpi/man3/MPI_File_write_at.3.rst b/docs/man-openmpi/man3/MPI_File_write_at.3.rst new file mode 100644 index 00000000000..2592bd35bc3 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_File_write_at.3.rst @@ -0,0 +1,119 @@ +.. _mpi_file_write_at: + + +MPI_File_write_at +================= + +.. include_body + +:ref:`MPI_File_write_at` - Writes a file at an explicitly specified offset +(blocking, noncollective). + + +SYNTAX +------ + + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_File_write_at(MPI_File fh, MPI_Offset offset, const void *buf, + int count, MPI_Datatype datatype, MPI_Status *status) + + +Fortran Syntax (see FORTRAN 77 NOTES) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_FILE_WRITE_AT(FH, OFFSET, BUF, COUNT, + DATATYPE, STATUS, IERROR) + BUF(*) + INTEGER FH, COUNT, DATATYPE, STATUS(MPI_STATUS_SIZE), IERROR + INTEGER(KIND=MPI_OFFSET_KIND) OFFSET + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_File_write_at(fh, offset, buf, count, datatype, status, ierror) + TYPE(MPI_File), INTENT(IN) :: fh + INTEGER(KIND=MPI_OFFSET_KIND), INTENT(IN) :: offset + TYPE(*), DIMENSION(..), INTENT(IN) :: buf + INTEGER, INTENT(IN) :: count + TYPE(MPI_Datatype), INTENT(IN) :: datatype + TYPE(MPI_Status) :: status + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETERS +---------------- +* ``fh``: File handle (handle). +* ``offset``: File offset (integer). +* ``buf``: Initial address of buffer (choice). +* ``count``: Number of elements in buffer (integer). +* ``datatype``: Data type of each buffer element (handle). + +OUTPUT PARAMETERS +----------------- +* ``status``: Status object (status). +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +:ref:`MPI_File_write_at` attempts to write into the file associated with *fh* +(at the *offset* position) a total number of *count* data items having +*datatype* type from the user's buffer *buf.* The offset is in *etype* +units relative to the current view. That is, holes are not counted when +locating an offset. The data is written into those parts of the file +specified by the current view. :ref:`MPI_File_write_at` stores the number of +*datatype* elements actually written in *status.* All other fields of +*status* are undefined. + +It is erroneous to call this function if MPI_MODE_SEQUENTIAL mode was +specified when the file was opened. + + +FORTRAN 77 NOTES +---------------- + +The MPI standard prescribes portable Fortran syntax for the *OFFSET* +argument only for Fortran 90. FORTRAN 77 users may use the non-portable +syntax + +:: + + INTEGER*MPI_OFFSET_KIND OFFSET + +where MPI_OFFSET_KIND is a constant defined in mpif.h and gives the +length of the declared integer in bytes. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. For MPI I/O function errors, the default error handler is set to +MPI_ERRORS_RETURN. The error handler may be changed with +:ref:`MPI_File_set_errhandler`; the predefined error handler +MPI_ERRORS_ARE_FATAL may be used to make I/O errors fatal. Note that MPI +does not guarantee that an MPI program can continue past an error. + + +.. seealso:: + :ref:`MPI_File_iwrite_at` :ref:`MPI_File_write_at_all` :ref:`MPI_File_write_at_all_begin` + :ref:`MPI_File_write_at_all_end` diff --git a/docs/man-openmpi/man3/MPI_File_write_at_all.3.rst b/docs/man-openmpi/man3/MPI_File_write_at_all.3.rst new file mode 100644 index 00000000000..56315e534ba --- /dev/null +++ b/docs/man-openmpi/man3/MPI_File_write_at_all.3.rst @@ -0,0 +1,114 @@ +.. _mpi_file_write_at_all: + + +MPI_File_write_at_all +===================== + +.. include_body + +:ref:`MPI_File_write_at_all` - Writes a file at explicitly specified +offsets (blocking, collective). + + +SYNTAX +------ + + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_File_write_at_all(MPI_File fh, MPI_Offset offset, const void *buf, + int count, MPI_Datatype datatype, MPI_Status *status) + + +Fortran Syntax (see FORTRAN 77 NOTES) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_FILE_WRITE_AT_ALL(FH, OFFSET, BUF, COUNT, + DATATYPE, STATUS, IERROR) + BUF(*) + INTEGER FH, COUNT, DATATYPE, STATUS(MPI_STATUS_SIZE), IERROR + INTEGER(KIND=MPI_OFFSET_KIND) OFFSET + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_File_write_at_all(fh, offset, buf, count, datatype, status, ierror) + TYPE(MPI_File), INTENT(IN) :: fh + INTEGER(KIND=MPI_OFFSET_KIND), INTENT(IN) :: offset + TYPE(*), DIMENSION(..), INTENT(IN) :: buf + INTEGER, INTENT(IN) :: count + TYPE(MPI_Datatype), INTENT(IN) :: datatype + TYPE(MPI_Status) :: status + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETERS +---------------- +* ``fh``: File handle (handle). +* ``offset``: File offset (integer). +* ``buf``: Initial address of buffer (choice). +* ``count``: Number of elements in buffer (integer). +* ``datatype``: Data type of each buffer element (handle). + +OUTPUT PARAMETERS +----------------- +* ``status``: Status object (status). +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +:ref:`MPI_File_write_at_all` is a collective routine that attempts to write +into the file associated with *fh* (at the *offset* position) a total +number of *count* data items having *datatype* type from the user's +buffer *buf.* The offset is in etype units relative to the current view. +That is, holes are not counted when locating an offset. The data is +written into those parts of the file specified by the current view. +:ref:`MPI_File_write_at_all` stores the number of *datatype* elements actually +written in *status.* All other fields of *status* are undefined. + +It is erroneous to call this function if MPI_MODE_SEQUENTIAL mode was +specified when the file was opened. + + +FORTRAN 77 NOTES +---------------- + +The MPI standard prescribes portable Fortran syntax for the *OFFSET* +argument only for Fortran 90. FORTRAN 77 users may use the non-portable +syntax + +:: + + INTEGER*MPI_OFFSET_KIND OFFSET + +where MPI_OFFSET_KIND is a constant defined in mpif.h and gives the +length of the declared integer in bytes. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. For MPI I/O function errors, the default error handler is set to +MPI_ERRORS_RETURN. The error handler may be changed with +:ref:`MPI_File_set_errhandler`; the predefined error handler +MPI_ERRORS_ARE_FATAL may be used to make I/O errors fatal. Note that MPI +does not guarantee that an MPI program can continue past an error. diff --git a/docs/man-openmpi/man3/MPI_File_write_at_all_begin.3.rst b/docs/man-openmpi/man3/MPI_File_write_at_all_begin.3.rst new file mode 100644 index 00000000000..62e3734e8f8 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_File_write_at_all_begin.3.rst @@ -0,0 +1,119 @@ +.. _mpi_file_write_at_all_begin: + + +MPI_File_write_at_all_begin +=========================== + +.. include_body + +:ref:`MPI_File_write_at_all_begin` - Writes a file at explicitly specified +offsets; beginning part of a split collective routine (nonblocking). + + +SYNTAX +------ + + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_File_write_at_all_begin(MPI_File fh, MPI_Offset offset, + const void *buf, int count, MPI_Datatype datatype) + + +Fortran Syntax (see FORTRAN 77 NOTES) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_FILE_WRITE_AT_ALL_BEGIN(FH, OFFSET, BUF, COUNT, DATATYPE, IERROR) + BUF(*) + INTEGER FH, COUNT, DATATYPE, IERROR + INTEGER(KIND=MPI_OFFSET_KIND) OFFSET + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_File_write_at_all_begin(fh, offset, buf, count, datatype, ierror) + TYPE(MPI_File), INTENT(IN) :: fh + INTEGER(KIND=MPI_OFFSET_KIND), INTENT(IN) :: offset + TYPE(*), DIMENSION(..), INTENT(IN), ASYNCHRONOUS :: buf + INTEGER, INTENT(IN) :: count + TYPE(MPI_Datatype), INTENT(IN) :: datatype + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT/OUTPUT PARAMETER +---------------------- +* ``fh``: File handle (handle). + +INPUT PARAMETERS +---------------- +* ``offset``: File offset (handle). +* ``buf``: Initial address of buffer (choice). +* ``count``: Number of elements in buffer (integer). +* ``datatype``: Data type of each buffer element (handle). + +OUTPUT PARAMETER +---------------- +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +:ref:`MPI_File_write_at_all_begin` is the beginning part of a split collective, +that is, a nonblocking routine that attempts to write into the file +associated with *fh* (at the *offset* position) a total number of +*count* data items having *datatype* type from the user's buffer *buf.* +The offset is in etype units relative to the current view. That is, +holes are not counted when locating an offset. The data is written into +those parts of the file specified by the current view. + + +FORTRAN 77 NOTES +---------------- + +The MPI standard prescribes portable Fortran syntax for the *OFFSET* +argument only for Fortran 90. FORTRAN 77 users may use the non-portable +syntax + +:: + + INTEGER*MPI_OFFSET_KIND OFFSET + +where MPI_OFFSET_KIND is a constant defined in mpif.h and gives the +length of the declared integer in bytes. + + +NOTES +----- + +All the nonblocking collective routines for data access are "split" into +two routines, each with \_begin or \_end as a suffix. These split +collective routines are subject to the semantic rules described in +Section 9.4.5 of the MPI-2 standard. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. For MPI I/O function errors, the default error handler is set to +MPI_ERRORS_RETURN. The error handler may be changed with +:ref:`MPI_File_set_errhandler`; the predefined error handler +MPI_ERRORS_ARE_FATAL may be used to make I/O errors fatal. Note that MPI +does not guarantee that an MPI program can continue past an error. diff --git a/docs/man-openmpi/man3/MPI_File_write_at_all_end.3.rst b/docs/man-openmpi/man3/MPI_File_write_at_all_end.3.rst new file mode 100644 index 00000000000..fc8bb431c1c --- /dev/null +++ b/docs/man-openmpi/man3/MPI_File_write_at_all_end.3.rst @@ -0,0 +1,97 @@ +.. _mpi_file_write_at_all_end: + + +MPI_File_write_at_all_end +========================= + +.. include_body + +:ref:`MPI_File_write_at_all_end` - Writes a file at explicitly specified +offsets; ending part of a split collective routine (blocking). + + +SYNTAX +------ + + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_File_write_at_all_end(MPI_File fh, const void *buf, + MPI_Status *status) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_FILE_WRITE_AT_ALL_END(FH, BUF, STATUS, IERROR) + BUF(*) + INTEGER FH, STATUS(MPI_STATUS_SIZE), IERROR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_File_write_at_all_end(fh, buf, status, ierror) + TYPE(MPI_File), INTENT(IN) :: fh + TYPE(*), DIMENSION(..), INTENT(IN), ASYNCHRONOUS :: buf + TYPE(MPI_Status) :: status + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT/OUTPUT PARAMETER +---------------------- +* ``fh``: File handle (handle). + +INPUT PARAMETER +--------------- +* ``buf``: Initial address of buffer (choice). + +OUTPUT PARAMETERS +----------------- +* ``status``: Status object (status). +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +:ref:`MPI_File_write_at_all_end` is the ending part of a split collective +routine that stores the number of elements actually written into the +file associated with *fh* in *status.* The data is written into those +parts of the file specified by the current view. All other fields of +*status* are undefined. + + +NOTES +----- + +All the nonblocking collective routines for data access are "split" into +two routines, each with \_begin or \_end as a suffix. These split +collective routines are subject to the semantic rules described in +Section 9.4.5 of the MPI-2 standard. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. For MPI I/O function errors, the default error handler is set to +MPI_ERRORS_RETURN. The error handler may be changed with +:ref:`MPI_File_set_errhandler`; the predefined error handler +MPI_ERRORS_ARE_FATAL may be used to make I/O errors fatal. Note that MPI +does not guarantee that an MPI program can continue past an error. diff --git a/docs/man-openmpi/man3/MPI_File_write_ordered.3.rst b/docs/man-openmpi/man3/MPI_File_write_ordered.3.rst new file mode 100644 index 00000000000..02130e44788 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_File_write_ordered.3.rst @@ -0,0 +1,95 @@ +.. _mpi_file_write_ordered: + +MPI_File_write_ordered +====================== + +.. include_body + +:ref:`MPI_File_write_ordered` - Writes a file at a location specified by a +shared file pointer (blocking, collective). + +Syntax +------ + +C Syntax +^^^^^^^^ + +.. code:: c + + #include + + int MPI_File_write_ordered(MPI_File fh, const void *buf, + int count, MPI_Datatype datatype, + MPI_Status *status) + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + + MPI_FILE_WRITE_ORDERED(FH, BUF, COUNT, DATATYPE, + STATUS, IERROR) + BUF(*) + INTEGER FH, COUNT, DATATYPE, STATUS(MPI_STATUS_SIZE), IERROR + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code:: fortran + + USE mpi_f08 + + MPI_File_write_ordered(fh, buf, count, datatype, status, ierror) + TYPE(MPI_File), INTENT(IN) :: fh + TYPE(*), DIMENSION(..), INTENT(IN) :: buf + INTEGER, INTENT(IN) :: count + TYPE(MPI_Datatype), INTENT(IN) :: datatype + TYPE(MPI_Status) :: status + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + +Input Parameters +---------------- + +- ``fh`` : File handle (handle). +- ``buf`` : Initial address of buffer (choice). +- ``count`` : Number of elements in buffer (integer). +- ``datatype`` : Data type of each buffer element (handle). + +Output Parameters +----------------- + +- ``status`` : Status object (Status). +- ``IERROR`` : Fortran only: Error status (integer). + +Description +----------- + +:ref:`MPI_File_write_ordered` is a collective routine. This routine must be +called by all processes in the communicator group associated with the +file handle ``fh``. Each process may pass different argument values for +the ``datatype`` and ``count`` arguments. Each process attempts to +write, into the file associated with ``fh``, a total number of ``count`` +data items having ``datatype`` type contained in the user's buffer +``buf``. For each process, the location in the file at which data is +written is the position at which the shared file pointer would be after +all processes whose ranks within the group are less than that of this +process had written their data. :ref:`MPI_File_write_ordered` returns the +number of ``datatype`` elements written in ``status``. The shared file +pointer is updated by the amounts of data requested by all processes of +the group. + +Errors +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. For MPI I/O function errors, the default error handler is set to +MPI_ERRORS_RETURN. The error handler may be changed with +:ref:`MPI_File_set_errhandler`; the predefined error handler +MPI_ERRORS_ARE_FATAL may be used to make I/O errors fatal. Note that +MPI does not guarantee that an MPI program can continue past an error. diff --git a/docs/man-openmpi/man3/MPI_File_write_ordered_begin.3.rst b/docs/man-openmpi/man3/MPI_File_write_ordered_begin.3.rst new file mode 100644 index 00000000000..58c249269a7 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_File_write_ordered_begin.3.rst @@ -0,0 +1,103 @@ +.. _mpi_file_write_ordered_begin: + +MPI_File_write_ordered_begin +============================ + +.. include_body + +:ref:`MPI_File_write_ordered_begin` - Writes a file at a location specified +by a shared file pointer; beginning part of a split collective routine +(nonblocking). + +Syntax +------ + +C Syntax +^^^^^^^^ + +.. code:: c + + #include + + int MPI_File_write_ordered_begin(MPI_File fh, const void *buf, + int count, MPI_Datatype datatype) + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + + MPI_FILE_WRITE_ORDERED_BEGIN(FH, BUF, COUNT, DATATYPE, IERROR) + BUF(*) + INTEGER FH, COUNT, DATATYPE, IERROR + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code:: fortran + + USE mpi_f08 + + MPI_File_write_ordered_begin(fh, buf, count, datatype, ierror) + TYPE(MPI_File), INTENT(IN) :: fh + TYPE(*), DIMENSION(..), INTENT(IN), ASYNCHRONOUS :: buf + INTEGER, INTENT(IN) :: count + TYPE(MPI_Datatype), INTENT(IN) :: datatype + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + +Input/Output Parameter +---------------------- + +- ``fh`` : File handle (handle). + +Input Parameters +---------------- + +- ``buf`` : Initial address of buffer (choice). +- ``count`` : Number of elements in buffer (integer). +- ``datatype`` : Data type of each buffer element (handle). + +Output Parameter +---------------- + +- ``IERROR`` : Fortran only: Error status (integer). + +Description +----------- + +:ref:`MPI_File_write_ordered_begin` is the beginning part of a split +collective, nonblocking routine that must be called by all processes in +the communicator group associated with the file handle ``fh``. Each +process may pass different argument values for the ``datatype`` and +``count`` arguments. After all processes of the group have issued their +respective calls, each process attempts to write, into the file +associated with ``fh``, a total number of ``count`` data items having +``datatype`` type contained in the user's buffer ``buf``. For each +process, the location in the file at which data is written is the +position at which the shared file pointer would be after all processes +whose ranks within the group are less than that of this process had +written their data. + +Notes +----- + +All the nonblocking collective routines for data access are "split" into +two routines, each with \_begin or \_end as a suffix. These split +collective routines are subject to the semantic rules described in +Section 9.4.5 of the MPI-2 standard. + +Errors +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. For MPI I/O function errors, the default error handler is set to +MPI_ERRORS_RETURN. The error handler may be changed with +:ref:`MPI_File_set_errhandler`; the predefined error handler +MPI_ERRORS_ARE_FATAL may be used to make I/O errors fatal. Note that +MPI does not guarantee that an MPI program can continue past an error. diff --git a/docs/man-openmpi/man3/MPI_File_write_ordered_end.3.rst b/docs/man-openmpi/man3/MPI_File_write_ordered_end.3.rst new file mode 100644 index 00000000000..28b33aeb7e6 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_File_write_ordered_end.3.rst @@ -0,0 +1,94 @@ +.. _mpi_file_write_ordered_end: + +MPI_File_write_ordered_end +========================== + +.. include_body + +:ref:`MPI_File_write_ordered_end` - Writes a file at a location specified +by a shared file pointer; ending part of a split collective routine +(blocking). + +Syntax +------ + +C Syntax +^^^^^^^^ + +.. code:: c + + #include + + int MPI_File_write_ordered_end(MPI_File fh, const void *buf, + MPI_Status *status) + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + + MPI_FILE_WRITE_ORDERED_END(FH, BUF, STATUS, IERROR) + BUF(*) + INTEGER FH, STATUS(MPI_STATUS_SIZE), IERROR + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code:: fortran + + USE mpi_f08 + + MPI_File_write_ordered_end(fh, buf, status, ierror) + TYPE(MPI_File), INTENT(IN) :: fh + TYPE(*), DIMENSION(..), INTENT(IN), ASYNCHRONOUS :: buf + TYPE(MPI_Status) :: status + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + +Input/Output Parameter +---------------------- + +- ``fh`` : File handle (handle). + +Input Parameter +--------------- + +- ``buf`` : Initial address of buffer (choice). + +Output Parameters +----------------- + +- ``status`` : Status object (status). +- ``IERROR`` : Fortran only: Error status (integer). + +Description +----------- + +:ref:`MPI_File_write_ordered_end` is the ending part of a split collective +routine that must be called by all processes in the communicator group +associated with the file handle ``fh``. :ref:`MPI_File_write_ordered_end` +returns the number of elements written into the file associated with +``fh`` in ``status``. + +Notes +----- + +All the nonblocking collective routines for data access are "split" into +two routines, each with \_begin or \_end as a suffix. These split +collective routines are subject to the semantic rules described in +Section 9.4.5 of the MPI-2 standard. + +Errors +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. For MPI I/O function errors, the default error handler is set to +MPI_ERRORS_RETURN. The error handler may be changed with +:ref:`MPI_File_set_errhandler`; the predefined error handler +MPI_ERRORS_ARE_FATAL may be used to make I/O errors fatal. Note that +MPI does not guarantee that an MPI program can continue past an error. diff --git a/docs/man-openmpi/man3/MPI_File_write_shared.3.rst b/docs/man-openmpi/man3/MPI_File_write_shared.3.rst new file mode 100644 index 00000000000..cdea8ed02db --- /dev/null +++ b/docs/man-openmpi/man3/MPI_File_write_shared.3.rst @@ -0,0 +1,87 @@ +.. _mpi_file_write_shared: + +MPI_File_write_shared +===================== + +.. include_body + +:ref:`MPI_File_write_shared` - Writes a file using the shared file pointer +(blocking, noncollective). + +Syntax +------ + +C Syntax +^^^^^^^^ + +.. code:: c + + #include + + int MPI_File_write_shared(MPI_File fh, const void *buf, int count, + MPI_Datatype datatype, MPI_Status *status) + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + + MPI_FILE_WRITE_SHARED(FH, BUF, COUNT, DATATYPE, STATUS, IERROR) + BUF(*) + INTEGER FH, COUNT, DATATYPE, STATUS(MPI_STATUS_SIZE), IERROR + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code:: fortran + + USE mpi_f08 + + MPI_File_write_shared(fh, buf, count, datatype, status, ierror) + TYPE(MPI_File), INTENT(IN) :: fh + TYPE(*), DIMENSION(..), INTENT(IN) :: buf + INTEGER, INTENT(IN) :: count + TYPE(MPI_Datatype), INTENT(IN) :: datatype + TYPE(MPI_Status) :: status + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + +Input/Output Parameter +---------------------- + +- ``fh`` : File handle (handle). + +Input Parameters +---------------- + +- ``buf`` : Initial address of buffer (choice). +- ``count`` : Number of elements in buffer (integer). +- ``datatype`` : Data type of each buffer element (handle). + +Output Parameters +----------------- + +- ``status`` : Status object (status). +- ``IERROR`` : Fortran only: Error status (integer). + +Description +----------- + +:ref:`MPI_File_write_shared` is a blocking routine that uses the shared +file pointer to write files. The order of serialization is not +deterministic for this noncollective routine. + +Errors +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. For MPI I/O function errors, the default error handler is set to +MPI_ERRORS_RETURN. The error handler may be changed with +:ref:`MPI_File_set_errhandler`; the predefined error handler +MPI_ERRORS_ARE_FATAL may be used to make I/O errors fatal. Note that +MPI does not guarantee that an MPI program can continue past an error. diff --git a/docs/man-openmpi/man3/MPI_Finalize.3.rst b/docs/man-openmpi/man3/MPI_Finalize.3.rst new file mode 100644 index 00000000000..32c28ee047d --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Finalize.3.rst @@ -0,0 +1,112 @@ +.. _mpi_finalize: + +MPI_Finalize +============ + +.. include_body + +:ref:`MPI_Finalize` - Terminates MPI execution environment. + +Syntax +------ + +C Syntax +^^^^^^^^ + +.. code:: c + + #include + + int MPI_Finalize() + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + + MPI_FINALIZE(IERROR) + INTEGER IERROR + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code:: fortran + + USE mpi_f08 + + MPI_Finalize(ierror) + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + +Output Parameter +---------------- + +- IERROR : Fortran only: Error status (integer). + +Description +----------- + +This routine cleans up all MPI states. Once this routine is called, no +MPI routine (not even MPI_Init) may be called, except for +:ref:`MPI_Get_version`, :ref:`MPI_Initialized`, and :ref:`MPI_Finalized`. Unless there has +been a call to :ref:`MPI_Abort`, you must ensure that all pending +communications involving a process are complete before the process calls +:ref:`MPI_Finalize`. If the call returns, each process may either continue +local computations or exit without participating in further +communication with other processes. At the moment when the last process +calls :ref:`MPI_Finalize`, all pending sends must be matched by a receive, and +all pending receives must be matched by a send. + +:ref:`MPI_Finalize` is collective over all connected processes. If no processes +were spawned, accepted, or connected, then this means it is collective +over MPI_COMM_WORLD. Otherwise, it is collective over the union of all +processes that have been and continue to be connected. + +Notes +----- + +All processes must call this routine before exiting. All processes will +still exist but may not make any further MPI calls. :ref:`MPI_Finalize` +guarantees that all local actions required by communications the user +has completed will, in fact, occur before it returns. However, +:ref:`MPI_Finalize` guarantees nothing about pending communications that have +not been completed; completion is ensured only by :ref:`MPI_Wait`, :ref:`MPI_Test`, or +:ref:`MPI_Request_free` combined with some other verification of completion. + +For example, a successful return from a blocking communication operation +or from :ref:`MPI_Wait` or :ref:`MPI_Test` means that the communication is completed +by the user and the buffer can be reused, but does not guarantee that +the local process has no more work to do. Similarly, a successful return +from :ref:`MPI_Request_free` with a request handle generated by an :ref:`MPI_Isend` +nullifies the handle but does not guarantee that the operation has +completed. The :ref:`MPI_Isend` is complete only when a matching receive has +completed. + +If you would like to cause actions to happen when a process finishes, +attach an attribute to MPI_COMM_SELF with a callback function. Then, +when :ref:`MPI_Finalize` is called, it will first execute the equivalent of an +:ref:`MPI_Comm_free` on MPI_COMM_SELF. This will cause the delete callback +function to be executed on all keys associated with MPI_COMM_SELF in an +arbitrary order. If no key has been attached to MPI_COMM_SELF, then no +callback is invoked. This freeing of MPI_COMM_SELF happens before any +other parts of MPI are affected. Calling :ref:`MPI_Finalized` will thus return +"false" in any of these callback functions. Once you have done this with +MPI_COMM_SELF, the results of :ref:`MPI_Finalize` are not specified. + +Errors +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. + + +.. seealso:: :ref:`MPI_Init` diff --git a/docs/man-openmpi/man3/MPI_Finalized.3.rst b/docs/man-openmpi/man3/MPI_Finalized.3.rst new file mode 100644 index 00000000000..39fb61f0e74 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Finalized.3.rst @@ -0,0 +1,72 @@ +.. _mpi_finalized: + +MPI_Finalized +============= + +.. include_body + +:ref:`MPI_Finalized` - Checks whether MPI has been finalized + +Syntax +------ + +C Syntax +^^^^^^^^ + +.. code:: c + + #include + + int MPI_Finalized(int *flag) + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + + MPI_FINALIZED(FLAG, IERROR) + LOGICAL FLAG + INTEGER IERROR + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code:: fortran + + USE mpi_f08 + + MPI_Finalized(flag, ierror) + LOGICAL, INTENT(OUT) :: flag + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + +Output Parameter +---------------- + +- flag : True if MPI was finalized, and false otherwise (logical). +- IERROR : Fortran only: Error status (integer). + +Description +----------- + +This routine may be used to determine whether MPI has been finalized. It +is one of a small number of routines that may be called before MPI is +initialized and after MPI has been finalized (:ref:`MPI_Initialized` is +another). + +Errors +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. Before the +error value is returned, the current MPI error handler is called. By +default, this error handler aborts the MPI job, except for I/O function +errors. The error handler may be changed with :ref:`MPI_Comm_set_errhandler`; +the predefined error handler MPI_ERRORS_RETURN may be used to cause +error values to be returned. Note that MPI does not guarantee that an +MPI program can continue past an error. + + +.. seealso:: :ref:`MPI_Init` diff --git a/docs/man-openmpi/man3/MPI_Free_mem.3.rst b/docs/man-openmpi/man3/MPI_Free_mem.3.rst new file mode 100644 index 00000000000..a186cb9d89f --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Free_mem.3.rst @@ -0,0 +1,75 @@ +.. _mpi_free_mem: + +MPI_Free_mem +============ + +.. include_body + +:ref:`MPI_Free_mem` - Frees memory that has been allocated using :ref:`MPI_Alloc_mem`. + +Syntax +------ + +C Syntax +^^^^^^^^ + +.. code:: c + + #include + + int MPI_Free_mem(void *base) + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + + MPI_FREE_MEM(BASE, IERROR) + BASE(*) + INTEGER IERROR + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code:: fortran + + USE mpi_f08 + + MPI_Free_mem(base, ierror) + TYPE(*), DIMENSION(..), INTENT(IN), ASYNCHRONOUS :: base + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + +Input Parameter +--------------- + +- base : Initial address of memory segment allocated by :ref:`MPI_Alloc_mem` + (choice). + +Output Parameter +---------------- + +- IERROR : Fortran only: Error status (integer). + +Description +----------- + +:ref:`MPI_Free_mem` frees memory that has been allocated by :ref:`MPI_Alloc_mem`. + +Errors +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. + + +.. seealso:: :ref:`MPI_Alloc_mem` diff --git a/docs/man-openmpi/man3/MPI_Gather.3.rst b/docs/man-openmpi/man3/MPI_Gather.3.rst new file mode 100644 index 00000000000..d47d0dae6bb --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Gather.3.rst @@ -0,0 +1,233 @@ +.. _mpi_gather: + +MPI_Gather +========== + +.. include_body + +:ref:`MPI_Gather`, :ref:`MPI_Igather`, :ref:`MPI_Gather_init` - Gathers values from a group +of processes. + +Synopsis +-------- + +C Syntax +^^^^^^^^ + +.. code:: c + + #include + + int MPI_Gather(const void *sendbuf, int sendcount, MPI_Datatype sendtype, + void *recvbuf, int recvcount, MPI_Datatype recvtype, int root, + MPI_Comm comm) + + int MPI_Igather(const void *sendbuf, int sendcount, MPI_Datatype sendtype, + void *recvbuf, int recvcount, MPI_Datatype recvtype, int root, + MPI_Comm comm, MPI_Request *request) + + int MPI_Gather_init(const void *sendbuf, int sendcount, MPI_Datatype sendtype, + void *recvbuf, int recvcount, MPI_Datatype recvtype, int root, + MPI_Comm comm, MPI_Info info, MPI_Request *request) + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + + MPI_GATHER(SENDBUF, SENDCOUNT, SENDTYPE, RECVBUF, RECVCOUNT, + RECVTYPE, ROOT, COMM, IERROR) + SENDBUF(*), RECVBUF(*) + INTEGER SENDCOUNT, SENDTYPE, RECVCOUNT, RECVTYPE, ROOT + INTEGER COMM, IERROR + + MPI_IGATHER(SENDBUF, SENDCOUNT, SENDTYPE, RECVBUF, RECVCOUNT, + RECVTYPE, ROOT, COMM, REQUEST, IERROR) + SENDBUF(*), RECVBUF(*) + INTEGER SENDCOUNT, SENDTYPE, RECVCOUNT, RECVTYPE, ROOT + INTEGER COMM, REQUEST, IERROR + + MPI_GATHER_INIT(SENDBUF, SENDCOUNT, SENDTYPE, RECVBUF, RECVCOUNT, + RECVTYPE, ROOT, COMM, INFO, REQUEST, IERROR) + SENDBUF(*), RECVBUF(*) + INTEGER SENDCOUNT, SENDTYPE, RECVCOUNT, RECVTYPE, ROOT + INTEGER COMM, INFO, REQUEST, IERROR + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code:: fortran + + USE mpi_f08 + + MPI_Gather(sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype, + root, comm, ierror) + TYPE(*), DIMENSION(..), INTENT(IN) :: sendbuf + TYPE(*), DIMENSION(..) :: recvbuf + INTEGER, INTENT(IN) :: sendcount, recvcount, root + TYPE(MPI_Datatype), INTENT(IN) :: sendtype, recvtype + TYPE(MPI_Comm), INTENT(IN) :: comm + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + MPI_Igather(sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype, + root, comm, request, ierror) + TYPE(*), DIMENSION(..), INTENT(IN), ASYNCHRONOUS :: sendbuf + TYPE(*), DIMENSION(..), ASYNCHRONOUS :: recvbuf + INTEGER, INTENT(IN) :: sendcount, recvcount, root + TYPE(MPI_Datatype), INTENT(IN) :: sendtype, recvtype + TYPE(MPI_Comm), INTENT(IN) :: comm + TYPE(MPI_Request), INTENT(OUT) :: request + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + MPI_Gather_init(sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype, + root, comm, info, request, ierror) + TYPE(*), DIMENSION(..), INTENT(IN), ASYNCHRONOUS :: sendbuf + TYPE(*), DIMENSION(..), ASYNCHRONOUS :: recvbuf + INTEGER, INTENT(IN) :: sendcount, recvcount, root + TYPE(MPI_Datatype), INTENT(IN) :: sendtype, recvtype + TYPE(MPI_Comm), INTENT(IN) :: comm + TYPE(MPI_Info), INTENT(IN) :: info + TYPE(MPI_Request), INTENT(OUT) :: request + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + +Input Parameters +---------------- + +- sendbuf : Starting address of send buffer (choice). +- sendcount : Number of elements in send buffer (integer). +- sendtype : Datatype of send buffer elements (handle). +- recvcount : Number of elements for any single receive (integer, + significant only at root). +- recvtype : Datatype of recvbuffer elements (handle, significant only + at root). +- root : Rank of receiving process (integer). +- comm : Communicator (handle). +- info : Info (handle, persistent only). + +Output Parameters +----------------- + +- recvbuf : Address of receive buffer (choice, significant only at + root). +- request : Request (handle, non-blocking only). +- IERROR : Fortran only: Error status (integer). + +Description +----------- + +Each process (root process included) sends the contents of its send +buffer to the root process. The root process receives the messages and +stores them in rank order. The outcome is as if each of the n processes +in the group (including the root process) had executed a call to + +c MPI_Send(sendbuf, sendcount, sendtype, root, ...) + +and the root had executed n calls to + +c MPI_Recv(recfbuf + i \* recvcount \* extent(recvtype), recvcount, +recvtype, i, ...) + +where extent(recvtype) is the type extent obtained from a call to +MPI_Type_extent(). + +An alternative description is that the n messages sent by the processes +in the group are concatenated in rank order, and the resulting message +is received by the root as if by a call to MPI_RECV(recvbuf, recvcount\* +n, recvtype, ... ). + +The receive buffer is ignored for all nonroot processes. + +General, derived datatypes are allowed for both sendtype and recvtype. +The type signature of sendcount, sendtype on process i must be equal to +the type signature of recvcount, recvtype at the root. This implies that +the amount of data sent must be equal to the amount of data received, +pairwise between each process and the root. Distinct type maps between +sender and receiver are still allowed. + +All arguments to the function are significant on process root, while on +other processes, only arguments sendbuf, sendcount, sendtype, root, comm +are significant. The arguments root and comm must have identical values +on all processes. + +The specification of counts and types should not cause any location on +the root to be written more than once. Such a call is erroneous. Note +that the recvcount argument at the root indicates the number of items it +receives from each process, not the total number of items it receives. + +Example 1: Gather 100 ints from every process in group to root. + +c MPI_Comm comm; int gsize,sendarray[100]; int root, \*rbuf; //... + +MPI_Comm_size( comm, &gsize); rbuf = (int +*)malloc(gsize*\ 100*sizeof(int)); + +MPI_Gather( sendarray, 100, MPI_INT, rbuf, 100, MPI_INT, root, comm); + +Example 2: Previous example modified -- only the root allocates memory +for the receive buffer. + +c MPI_Comm comm; int gsize,sendarray[100]; int root, myrank, \*rbuf; +//... + +MPI_Comm_rank( comm, myrank); if ( myrank == root) { MPI_Comm_size( +comm, &gsize); rbuf = (int *)malloc(gsize*\ 100*sizeof(int)); } +MPI_Gather( sendarray, 100, MPI_INT, rbuf, 100, MPI_INT, root, comm); + +Example 3: Do the same as the previous example, but use a derived +datatype. Note that the type cannot be the entire set of gsize \* 100 +ints since type matching is defined pairwise between the root and each +process in the gather. + +c MPI_Comm comm; int gsize,sendarray[100]; int root, \*rbuf; +MPI_Datatype rtype; //... + +MPI_Comm_size( comm, &gsize); MPI_Type_contiguous( 100, MPI_INT, &rtype +); MPI_Type_commit( &rtype ); rbuf = (int +*)malloc(gsize*\ 100*sizeof(int)); MPI_Gather( sendarray, 100, MPI_INT, +rbuf, 1, rtype, root, comm); + +Use Of In-Place Option +---------------------- + +When the communicator is an intracommunicator, you can perform a gather +operation in-place (the output buffer is used as the input buffer). Use +the variable MPI_IN_PLACE as the value of the root process sendbuf. In +this case, sendcount and sendtype are ignored, and the contribution of +the root process to the gathered vector is assumed to already be in the +correct place in the receive buffer. Note that MPI_IN_PLACE is a special +kind of value; it has the same restrictions on its use as MPI_BOTTOM. +Because the in-place option converts the receive buffer into a +send-and-receive buffer, a Fortran binding that includes INTENT must +mark these as INOUT, not OUT. + +When Communicator Is An Inter-Communicator +------------------------------------------ + +When the communicator is an inter-communicator, the root process in the +first group gathers data from all the processes in the second group. The +first group defines the root process. That process uses MPI_ROOT as the +value of its root argument. The remaining processes use MPI_PROC_NULL as +the value of their root argument. All processes in the second group use +the rank of that root process in the first group as the value of their +root argument. The send buffer argument of the processes in the first +group must be consistent with the receive buffer argument of the root +process in the second group. + +Errors +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. Before the +error value is returned, the current MPI error handler is called. By +default, this error handler aborts the MPI job, except for I/O function +errors. The error handler may be changed with :ref:`MPI_Comm_set_errhandler`; +the predefined error handler MPI_ERRORS_RETURN may be used to cause +error values to be returned. Note that MPI does not guarantee that an +MPI program can continue past an error. See the MPI man page for a full +list of MPI error codes. + + +.. seealso:: :ref:`MPI_Gatherv` diff --git a/docs/man-openmpi/man3/MPI_Gather_init.3.rst b/docs/man-openmpi/man3/MPI_Gather_init.3.rst new file mode 100644 index 00000000000..851647e6898 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Gather_init.3.rst @@ -0,0 +1,9 @@ +.. _mpi_gather_init: + +MPI_Gather_init +=============== + .. include_body + +.. include:: ../man3/MPI_Gather.3.rst + :start-after: .. include_body + diff --git a/docs/man-openmpi/man3/MPI_Gatherv.3.rst b/docs/man-openmpi/man3/MPI_Gatherv.3.rst new file mode 100644 index 00000000000..a10629291a7 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Gatherv.3.rst @@ -0,0 +1,315 @@ +.. _mpi_gatherv: + +MPI_Gatherv +=========== + +.. include_body + +:ref:`MPI_Gatherv`, :ref:`MPI_Igatherv`, :ref:`MPI_Gatherv_init` - Gathers varying amounts of +data from all processes to the root process + +Syntax +------ + +C Syntax +^^^^^^^^ + +.. code:: c + + #include + + int MPI_Gatherv(const void *sendbuf, int sendcount, MPI_Datatype sendtype, + void *recvbuf, const int recvcounts[], const int displs[], MPI_Datatype recvtype, + int root, MPI_Comm comm) + + int MPI_Igatherv(const void *sendbuf, int sendcount, MPI_Datatype sendtype, + void *recvbuf, const int recvcounts[], const int displs[], MPI_Datatype recvtype, + int root, MPI_Comm comm, MPI_Request *request) + + int MPI_Gatherv_init(const void *sendbuf, int sendcount, MPI_Datatype sendtype, + void *recvbuf, const int recvcounts[], const int displs[], MPI_Datatype recvtype, + int root, MPI_Comm comm, MPI_Info info, MPI_Request *request) + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + + MPI_GATHERV(SENDBUF, SENDCOUNT, SENDTYPE, RECVBUF, RECVCOUNTS, + DISPLS, RECVTYPE, ROOT, COMM, IERROR) + SENDBUF(*), RECVBUF(*) + INTEGER SENDCOUNT, SENDTYPE, RECVCOUNTS(*), DISPLS(*) + INTEGER RECVTYPE, ROOT, COMM, IERROR + + MPI_IGATHERV(SENDBUF, SENDCOUNT, SENDTYPE, RECVBUF, RECVCOUNTS, + DISPLS, RECVTYPE, ROOT, COMM, REQUEST, IERROR) + SENDBUF(*), RECVBUF(*) + INTEGER SENDCOUNT, SENDTYPE, RECVCOUNTS(*), DISPLS(*) + INTEGER RECVTYPE, ROOT, COMM, REQUEST, IERROR + + MPI_GATHERV_INIT(SENDBUF, SENDCOUNT, SENDTYPE, RECVBUF, RECVCOUNTS, + DISPLS, RECVTYPE, ROOT, COMM, INFO, REQUEST, IERROR) + SENDBUF(*), RECVBUF(*) + INTEGER SENDCOUNT, SENDTYPE, RECVCOUNTS(*), DISPLS(*) + INTEGER RECVTYPE, ROOT, COMM, INFO, REQUEST, IERROR + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code:: fortran + + USE mpi_f08 + + MPI_Gatherv(sendbuf, sendcount, sendtype, recvbuf, recvcounts, displs, + recvtype, root, comm, ierror) + TYPE(*), DIMENSION(..), INTENT(IN) :: sendbuf + TYPE(*), DIMENSION(..) :: recvbuf + INTEGER, INTENT(IN) :: sendcount, recvcounts(*), displs(*), root + TYPE(MPI_Datatype), INTENT(IN) :: sendtype, recvtype + TYPE(MPI_Comm), INTENT(IN) :: comm + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + MPI_Igatherv(sendbuf, sendcount, sendtype, recvbuf, recvcounts, displs, + recvtype, root, comm, request, ierror) + TYPE(*), DIMENSION(..), INTENT(IN), ASYNCHRONOUS :: sendbuf + TYPE(*), DIMENSION(..), ASYNCHRONOUS :: recvbuf + INTEGER, INTENT(IN) :: sendcount, root + INTEGER, INTENT(IN), ASYNCHRONOUS :: recvcounts(*), displs(*) + TYPE(MPI_Datatype), INTENT(IN) :: sendtype, recvtype + TYPE(MPI_Comm), INTENT(IN) :: comm + TYPE(MPI_Request), INTENT(OUT) :: request + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + MPI_Gatherv_init(sendbuf, sendcount, sendtype, recvbuf, recvcounts, displs, + recvtype, root, comm, info, request, ierror) + TYPE(*), DIMENSION(..), INTENT(IN), ASYNCHRONOUS :: sendbuf + TYPE(*), DIMENSION(..), ASYNCHRONOUS :: recvbuf + INTEGER, INTENT(IN) :: sendcount, root + INTEGER, INTENT(IN), ASYNCHRONOUS :: recvcounts(*), displs(*) + TYPE(MPI_Datatype), INTENT(IN) :: sendtype, recvtype + TYPE(MPI_Comm), INTENT(IN) :: comm + TYPE(MPI_Info), INTENT(IN) :: info + TYPE(MPI_Request), INTENT(OUT) :: request + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + +Input Parameters +---------------- + +- sendbuf : Starting address of send buffer (choice). +- sendcount : Number of elements in send buffer (integer). +- sendtype : Datatype of send buffer elements (handle). +- recvcounts : Integer array (of length group size) containing the + number of elements that are received from each process (significant + only at root). +- displs : Integer array (of length group size). Entry i specifies the + displacement relative to recvbuf at which to place the incoming data + from process i (significant only at root). +- recvtype : Datatype of recv buffer elements (significant only at + root) (handle). +- root : Rank of receiving process (integer). +- comm : Communicator (handle). +- info : Info (handle, persistent only). + +Output Parameters +----------------- + +- recvbuf : Address of receive buffer (choice, significant only at + root). +- request : Request (handle, non-blocking only). +- IERROR : Fortran only: Error status (integer). + +Description +----------- + +:ref:`MPI_Gatherv` extends the functionality of :ref:`MPI_Gather` by allowing a +varying count of data from each process, since recvcounts is now an +array. It also allows more flexibility as to where the data is placed on +the root, by providing the new argument, displs. + +The outcome is as if each process, including the root process, sends a +message to the root, + +c MPI_Send(sendbuf, sendcount, sendtype, root, ...) + +and the root executes n receives, + +c MPI_Recv(recvbuf + disp[i] \* extent(recvtype), recvcounts[i], +recvtype, i, ...) + +Messages are placed in the receive buffer of the root process in rank +order, that is, the data sent from process j is placed in the jth +portion of the receive buffer recvbuf on process root. The jth portion +of recvbuf begins at offset displs[j] elements (in terms of recvtype) +into recvbuf. + +The receive buffer is ignored for all nonroot processes. + +The type signature implied by sendcount, sendtype on process i must be +equal to the type signature implied by recvcounts[i], recvtype at the +root. This implies that the amount of data sent must be equal to the +amount of data received, pairwise between each process and the root. +Distinct type maps between sender and receiver are still allowed, as +illustrated in Example 2, below. + +All arguments to the function are significant on process root, while on +other processes, only arguments sendbuf, sendcount, sendtype, root, comm +are significant. The arguments root and comm must have identical values +on all processes. + +The specification of counts, types, and displacements should not cause +any location on the root to be written more than once. Such a call is +erroneous. + +Example 1: Now have each process send 100 ints to root, but place each +set (of 100) stride ints apart at receiving end. Use :ref:`MPI_Gatherv` and the +displs argument to achieve this effect. Assume stride >= 100. + +c MPI_Comm comm; int gsize,sendarray[100]; int root, \*rbuf, stride; int +*displs,i,\ rcounts; // ... MPI_Comm_size(comm, &gsize); rbuf = (int +)malloc(gsize\ stride\ sizeof(int)); displs = (int +)malloc(gsize\ sizeof(int)); rcounts = (int )malloc(gsize\ sizeof(int)); +for (i=0; i + + MPI_Get(void *origin_addr, int origin_count, MPI_Datatype + origin_datatype, int target_rank, MPI_Aint target_disp, + int target_count, MPI_Datatype target_datatype, MPI_Win win) + + MPI_Rget(void *origin_addr, int origin_count, MPI_Datatype + origin_datatype, int target_rank, MPI_Aint target_disp, + int target_count, MPI_Datatype target_datatype, MPI_Win win, + MPI_Request *request) + +Fortran Syntax (See Fortran 77 Notes) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. code:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + + MPI_GET(ORIGIN_ADDR, ORIGIN_COUNT, ORIGIN_DATATYPE, TARGET_RANK, + TARGET_DISP, TARGET_COUNT, TARGET_DATATYPE, WIN, IERROR) + ORIGIN_ADDR(*) + INTEGER(KIND=MPI_ADDRESS_KIND) TARGET_DISP + INTEGER ORIGIN_COUNT, ORIGIN_DATATYPE, TARGET_RANK, + TARGET_COUNT, TARGET_DATATYPE, WIN, IERROR + + MPI_RGET(ORIGIN_ADDR, ORIGIN_COUNT, ORIGIN_DATATYPE, TARGET_RANK, + TARGET_DISP, TARGET_COUNT, TARGET_DATATYPE, WIN, REQUEST, IERROR) + ORIGIN_ADDR(*) + INTEGER(KIND=MPI_ADDRESS_KIND) TARGET_DISP + INTEGER ORIGIN_COUNT, ORIGIN_DATATYPE, TARGET_RANK, + TARGET_COUNT, TARGET_DATATYPE, WIN, REQUEST, IERROR + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code:: fortran + + USE mpi_f08 + + MPI_Get(origin_addr, origin_count, origin_datatype, target_rank, + target_disp, target_count, target_datatype, win, ierror) + TYPE(*), DIMENSION(..), ASYNCHRONOUS :: origin_addr + INTEGER, INTENT(IN) :: origin_count, target_rank, target_count + TYPE(MPI_Datatype), INTENT(IN) :: origin_datatype, target_datatype + INTEGER(KIND=MPI_ADDRESS_KIND), INTENT(IN) :: target_disp + TYPE(MPI_Win), INTENT(IN) :: win + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + MPI_Rget(origin_addr, origin_count, origin_datatype, target_rank, + target_disp, target_count, target_datatype, win, request, + ierror) + TYPE(*), DIMENSION(..), ASYNCHRONOUS :: origin_addr + INTEGER, INTENT(IN) :: origin_count, target_rank, target_count + TYPE(MPI_Datatype), INTENT(IN) :: origin_datatype, target_datatype + INTEGER(KIND=MPI_ADDRESS_KIND), INTENT(IN) :: target_disp + TYPE(MPI_Win), INTENT(IN) :: win + TYPE(MPI_Request), INTENT(OUT) :: request + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + +Input Parameters +---------------- + +- origin_addr : Initial address of origin buffer (choice). +- origin_count : Number of entries in origin buffer (nonnegative + integer). +- origin_datatype : Data type of each entry in origin buffer (handle). +- target_rank : Rank of target (nonnegative integer). +- target_disp : Displacement from window start to the beginning of the + target buffer (nonnegative integer). +- target_count : Number of entries in target buffer (nonnegative + integer). +- target datatype : datatype of each entry in target buffer (handle) +- win : window object used for communication (handle) + +Output Parameter +---------------- + +- request : :ref:`MPI_Rget`: RMA request +- IERROR : Fortran only: Error status (integer). + +Description +----------- + +:ref:`MPI_Get` copies data from the target memory to the origin, similar to +:ref:`MPI_Put`, except that the direction of data transfer is reversed. The +origin_datatype may not specify overlapping entries in the origin +buffer. The target buffer must be contained within the target window, +and the copied data must fit, without truncation, in the origin buffer. +Only processes within the same node can access the target window. + +:ref:`MPI_Rget` is similar to :ref:`MPI_Get`, except that it allocates a communication +request object and associates it with the request handle (the argument +request) that can be used to wait or test for completion. The completion +of an :ref:`MPI_Rget` operation indicates that the data is available in the +origin buffer. If origin_addr points to memory attached to a window, +then the data becomes available in the private copy of this window. + +Fortran 77 Notes +---------------- + +The MPI standard prescribes portable Fortran syntax for the TARGET_DISP +argument only for Fortran 90. FORTRAN 77 users may use the non-portable +syntax + +fortran INTEGERMPI_ADDRESS_KIND TARGET_DISP + +where MPI_ADDRESS_KIND is a constant defined in mpif.h and gives the +length of the declared integer in bytes. + +Errors +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. + + +.. seealso:: :ref:`MPI_Put` diff --git a/docs/man-openmpi/man3/MPI_Get_accumulate.3.rst b/docs/man-openmpi/man3/MPI_Get_accumulate.3.rst new file mode 100644 index 00000000000..4d43a7284e2 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Get_accumulate.3.rst @@ -0,0 +1,202 @@ +.. _mpi_get_accumulate: + +MPI_Get_accumulate +================== + +.. include_body + +:ref:`MPI_Get_accumulate`, :ref:`MPI_Rget_accumulate` - Combines the contents of the +origin buffer with that of a target buffer and returns the target buffer +value. + +Syntax +------ + +C Syntax +^^^^^^^^ + +.. code:: c + + #include + + int MPI_Get_accumulate(const void *origin_addr, int origin_count, + MPI_Datatype origin_datatype, void *result_addr, + int result_count, MPI_Datatype result_datatype, + int target_rank, MPI_Aint target_disp, int target_count, + MPI_Datatype target_datatype, MPI_Op op, MPI_Win win) + + int MPI_Rget_accumulate(const void *origin_addr, int origin_count, + MPI_Datatype origin_datatype, void *result_addr, + int result_count, MPI_Datatype result_datatype, + int target_rank, MPI_Aint target_disp, int target_count, + MPI_Datatype target_datatype, MPI_Op op, MPI_Win win, + MPI_Request *request) + +Fortran Syntax (See Fortran 77 Notes) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. code:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + + MPI_GET_ACCUMULATE(ORIGIN_ADDR, ORIGIN_COUNT, ORIGIN_DATATYPE, RESULT_ADDR, + RESULT_COUNT, RESULT_DATATYPE, TARGET_RANK, TARGET_DISP, TARGET_COUNT, + TARGET_DATATYPE, OP, WIN, IERROR) + ORIGIN_ADDR, RESULT_ADDR(*) + INTEGER(KIND=MPI_ADDRESS_KIND) TARGET_DISP + INTEGER ORIGIN_COUNT, ORIGIN_DATATYPE, TARGET_COUNT, TARGET_DATATYPE, + TARGET_RANK, TARGET_COUNT, TARGET_DATATYPE, OP, WIN, IERROR + + MPI_RGET_ACCUMULATE(ORIGIN_ADDR, ORIGIN_COUNT, ORIGIN_DATATYPE, RESULT_ADDR, + RESULT_COUNT, RESULT_DATATYPE, TARGET_RANK, TARGET_DISP, TARGET_COUNT, + TARGET_DATATYPE, OP, WIN, REQUEST, IERROR) + ORIGIN_ADDR, RESULT_ADDR(*) + INTEGER(KIND=MPI_ADDRESS_KIND) TARGET_DISP + INTEGER ORIGIN_COUNT, ORIGIN_DATATYPE, TARGET_COUNT, TARGET_DATATYPE, + TARGET_RANK, TARGET_COUNT, TARGET_DATATYPE, OP, WIN, REQUEST, IERROR + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code:: fortran + + USE mpi_f08 + + MPI_Get_accumulate(origin_addr, origin_count, origin_datatype, result_addr, + result_count, result_datatype, target_rank, target_disp, + target_count, target_datatype, op, win, ierror) + TYPE(*), DIMENSION(..), INTENT(IN) :: origin_addr + TYPE(*), DIMENSION(..) :: result_addr + INTEGER, INTENT(IN) :: origin_count, result_count, target_rank, target_count + TYPE(MPI_Datatype), INTENT(IN) :: origin_datatype, target_datatype, result_datatype + INTEGER(KIND=MPI_ADDRESS_KIND), INTENT(IN) :: target_dist + TYPE(MPI_Op), INTENT(IN) :: op + TYPE(MPI_Win), INTENT(IN) :: win + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + MPI_Rget_accumulate(origin_addr, origin_count, origin_datatype, + result_addr, result_count, result_datatype, target_rank, + target_disp, target_count, target_datatype, op, win, request, + ierror) + TYPE(*), DIMENSION(..), INTENT(IN) :: origin_addr + TYPE(*), DIMENSION(..) :: result_addr + INTEGER, INTENT(IN) :: origin_count, result_count, target_rank, target_count + TYPE(MPI_Datatype), INTENT(IN) :: origin_datatype, target_datatype, result_datatype + INTEGER(KIND=MPI_ADDRESS_KIND), INTENT(IN) :: target_dist + TYPE(MPI_Op), INTENT(IN) :: op + TYPE(MPI_Win), INTENT(IN) :: win + TYPE(MPI_Request), INTENT(OUT) :: request + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + +Input Parameters +---------------- + +- origin_addr : Initial address of buffer (choice). +- origin_count : Number of entries in buffer (nonnegative integer). +- origin_datatype : Data type of each buffer entry (handle). +- result_addr : Initial address of result buffer (choice). +- result_count : Number of entries in result buffer (nonnegative + integer). +- result_datatype : Data type of each result buffer entry (handle). +- target_rank : Rank of target (nonnegative integer). +- target_disp : Displacement from start of window to beginning of + target buffer (nonnegative integer). +- target_count : Number of entries in target buffer (nonnegative + integer). +- target_datatype : Data type of each entry in target buffer (handle). +- op : Reduce operation (handle). +- win : Window object (handle). + +Output Parameter +---------------- + +- :ref:`MPI_Rget_accumulate`: RMA request +- IERROR : Fortran only: Error status (integer). + +Description +----------- + +:ref:`MPI_Get_accumulate` is a function used for one-sided MPI communication +that adds the contents of the origin buffer (as defined by origin_addr, +origin_count, and origin_datatype) to the buffer specified by the +arguments target_count and target_datatype, at offset target_disp, in +the target window specified by target_rank and win, using the operation +op. :ref:`MPI_Get_accumulate` returns in the result buffer result_addr the +contents of the target buffer before the accumulation. + +Any of the predefined operations for :ref:`MPI_Reduce`, as well as MPI_NO_OP, +can be used. User-defined functions cannot be used. For example, if op +is MPI_SUM, each element of the origin buffer is added to the +corresponding element in the target, replacing the former value in the +target. + +Each datatype argument must be a predefined data type or a derived data +type, where all basic components are of the same predefined data type. +Both datatype arguments must be constructed from the same predefined +data type. The operation op applies to elements of that predefined type. +The target_datatype argument must not specify overlapping entries, and +the target buffer must fit in the target window. + +A new predefined operation, MPI_REPLACE, is defined. It corresponds to +the associative function f(a, b) =b; that is, the current value in the +target memory is replaced by the value supplied by the origin. + +A new predefined operation, MPI_NO_OP, is defined. It corresponds to the +assiciative function f(a, b) = a; that is the current value in the +target memory is returned in the result buffer at the origin and no +operation is performed on the target buffer. + +:ref:`MPI_Rget_accumulate` is similar to :ref:`MPI_Get_accumulate`, except that it +allocates a communication request object and associates it with the +request handle (the argument request) that can be used to wait or test +for completion. The completion of an :ref:`MPI_Rget_accumulate` operation +indicates that the data is available in the result buffer and the origin +buffer is free to be updated. It does not indicate that the operation +has been completed at the target window. + +Fortran 77 Notes +---------------- + +The MPI standard prescribes portable Fortran syntax for the TARGET_DISP +argument only for Fortran 90. FORTRAN 77 users may use the non-portable +syntax + +fortran INTEGERMPI_ADDRESS_KIND TARGET_DISP + +where MPI_ADDRESS_KIND is a constant defined in mpif.h and gives the +length of the declared integer in bytes. + +Notes +----- + +The generic functionality of :ref:`MPI_Get_accumulate` might limit the +performance of fetch-and-increment or fetch-and-add calls that might be +supported by special hardware operations. :ref:`MPI_Fetch_and_op` thus allows +for a fast implementation of a commonly used subset of the functionality +of :ref:`MPI_Get_accumulate`. + +:ref:`MPI_Get` is a special case of :ref:`MPI_Get_accumulate`, with the operation +MPI_NO_OP. Note, however, that :ref:`MPI_Get` and :ref:`MPI_Get_accumulate` have +different constraints on concurrent updates. + +It is the user's responsibility to guarantee that, when using the +accumulate functions, the target displacement argument is such that +accesses to the window are properly aligned according to the data type +arguments in the call to the :ref:`MPI_Get_accumulate` function. + +Errors +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. + + +.. seealso:: :ref:`MPI_Put` :ref:`MPI_Reduce` diff --git a/docs/man-openmpi/man3/MPI_Get_address.3.rst b/docs/man-openmpi/man3/MPI_Get_address.3.rst new file mode 100644 index 00000000000..f4f1e99b057 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Get_address.3.rst @@ -0,0 +1,99 @@ +.. _mpi_get_address: + +MPI_Get_address +=============== + +.. include_body + +:ref:`MPI_Get_address` - Gets the address of a location in memory. + +Syntax +------ + +C Syntax +^^^^^^^^ + +.. code:: c + + #include + + int MPI_Get_address(const void *location, MPI_Aint *address) + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + + MPI_GET_ADDRESS(LOCATION, ADDRESS, IERROR) + LOCATION(*) + INTEGER(KIND=MPI_ADDRESS_KIND) ADDRESS + INTEGER IERROR + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code:: fortran + + USE mpi_f08 + + MPI_Get_address(location, address, ierror) + TYPE(*), DIMENSION(..), ASYNCHRONOUS :: location + INTEGER(KIND=MPI_ADDRESS_KIND), INTENT(OUT) :: address + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + +Input Parameters +---------------- + +- ``location`` : Location in caller memory (choice). + +Output Parameters +----------------- + +- ``address`` : Address of location (integer). +- ``IERROR`` : Fortran only: Error status (integer). + +Description +----------- + +:ref:`MPI_Get_address` returns the byte ``address`` of a location in +memory. + +Example: Using :ref:`MPI_Get_address` for an array. + +.. code:: fortran + + EAL A(100,100) + INTEGER I1, I2, DIFF + CALL MPI_GET_ADDRESS(A(1,1), I1, IERROR) + CALL MPI_GET_ADDRESS(A(10,10), I2, IERROR) + DIFF = I2 - I1 + ! The value of DIFF is 909*sizeofreal; the values of I1 and I2 are + ! implementation dependent. + +Notes +----- + +Current Fortran MPI codes will run unmodified and will port to any +system. However, they may fail if ``addresses`` larger than 2^32 - 1 are +used in the program. New codes should be written so that they use the +new functions. This provides compatibility with C and avoids errors on +64-bit architectures. However, such newly written codes may need to be +(slightly) rewritten to port to old Fortran 77 environments that do not +support KIND declarations. + +Errors +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler +MPI_ERRORS_RETURN may be used to cause error values to be returned. +Note that MPI does not guarantee that an MPI program can continue past +an error. diff --git a/docs/man-openmpi/man3/MPI_Get_count.3.rst b/docs/man-openmpi/man3/MPI_Get_count.3.rst new file mode 100644 index 00000000000..eee531dd17e --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Get_count.3.rst @@ -0,0 +1,99 @@ +.. _mpi_get_count: + +MPI_Get_count +============= + +.. include_body + +:ref:`MPI_Get_count` - Gets the number of top-level elements received. + +Syntax +------ + +C Syntax +^^^^^^^^ + +.. code:: c + + #include + + int MPI_Get_count(const MPI_Status *status, MPI_Datatype datatype, + int *count) + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + + MPI_GET_COUNT(STATUS, DATATYPE, COUNT, IERROR) + INTEGER STATUS(MPI_STATUS_SIZE), DATATYPE, COUNT, IERROR + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code:: fortran + + USE mpi_f08 + + MPI_Get_count(status, datatype, count, ierror) + TYPE(MPI_Status), INTENT(IN) :: status + TYPE(MPI_Datatype), INTENT(IN) :: datatype + INTEGER, INTENT(OUT) :: count + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + +Input Parameters +---------------- + +- status : Return status of receive operation (status). +- datatype : Datatype of each receive buffer element (handle). + +Output Parameters +----------------- + +- count : Number of received elements (integer). +- IERROR : Fortran only: Error status (integer). + +Description +----------- + +Returns the number of entries received. (We count entries, each of type +datatype, not bytes.) The datatype argument should match the argument +provided by the receive call that set the status variable. (As explained +in Section 3.12.5 in the MPI-1 Standard, "Use of General Datatypes in +Communication," :ref:`MPI_Get_count` may, in certain situations, return the +value MPI_UNDEFINED.) + +The datatype argument is passed to :ref:`MPI_Get_count` to improve performance. +A message might be received without counting the number of elements it +contains, and the count value is often not needed. Also, this allows the +same function to be used after a call to :ref:`MPI_Probe`. + +Notes +----- + +If the size of the datatype is zero, this routine will return a count of +zero. If the amount of data in status is not an exact multiple of the +size of datatype (so that count would not be integral), a count of +MPI_UNDEFINED is returned instead. + +Errors +------ + +If the value to be returned is larger than can fit into the count +parameter, an MPI_ERR_TRUNCATE error is raised. + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. + + +.. seealso:: :ref:`MPI_Get_elements` diff --git a/docs/man-openmpi/man3/MPI_Get_elements.3.rst b/docs/man-openmpi/man3/MPI_Get_elements.3.rst new file mode 100644 index 00000000000..c536e55bbde --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Get_elements.3.rst @@ -0,0 +1,138 @@ +.. _mpi_get_elements: + +MPI_Get_elements +================ + +.. include_body + +:ref:`MPI_Get_elements`, :ref:`MPI_Get_elements_x` - Returns the number of basic +elements in a data type. + +Syntax +------ + +C Syntax +^^^^^^^^ + +.. code:: c + + #include + + int MPI_Get_elements(const MPI_Status *status, MPI_Datatype datatype, + int *count) + + int MPI_Get_elements_x(const MPI_Status *status, MPI_Datatype datatype, + MPI_Count *count) + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + + MPI_GET_ELEMENTS(STATUS, DATATYPE, COUNT, IERROR) + INTEGER STATUS(MPI_STATUS_SIZE), DATATYPE, COUNT, IERROR + + MPI_GET_ELEMENTS_X(STATUS, DATATYPE, COUNT, IERROR) + INTEGER STATUS(MPI_STATUS_SIZE), DATATYPE + INTEGER(KIND=MPI_COUNT_KIND) COUNT + INTEGER IERROR + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code:: fortran + + USE mpi_f08 + + MPI_Get_elements(status, datatype, count, ierror) + TYPE(MPI_Status), INTENT(IN) :: status + TYPE(MPI_Datatype), INTENT(IN) :: datatype + INTEGER, INTENT(OUT) :: count + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + MPI_Get_elements_x(status, datatype, count, ierror) + TYPE(MPI_Status), INTENT(IN) :: status + TYPE(MPI_Datatype), INTENT(IN) :: datatype + INTEGER(KIND = MPI_COUNT_KIND), INTENT(OUT) :: count + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + +Input Parameters +---------------- + +- status : Return status of receive operation (status). +- datatype : Datatype used by receive operation (handle). + +Output Parameters +----------------- + +- IERROR : Fortran only: Error status (integer). + +Description +----------- + +:ref:`MPI_Get_elements` and :ref:`MPI_Get_elements_x` behave different from +:ref:`MPI_Get_count`, which returns the number of "top-level entries" received, +i.e., the number of "copies" of type datatype. :ref:`MPI_Get_count` may return +any integer value k, where 0 =< k =< count. If :ref:`MPI_Get_count` returns k, +then the number of basic elements received (and the value returned by +:ref:`MPI_Get_elements` and MPI_Get_elements_x) is n k, where n is the number +of basic elements in the type map of datatype. If the number of basic +elements received is not a multiple of n, that is, if the receive +operation has not received an integral number of datatype "copies," then +:ref:`MPI_Get_count` returns the value MPI_UNDEFINED. For both functions, if +the count parameter cannot express the value to be returned (e.g., if +the parameter is too small to hold the output value), it is set to +MPI_UNDEFINED. + +Example: Usage of :ref:`MPI_Get_count` and MPI_Get_element: + +fortran //... MPI_TYPE_CONTIGUOUS(2, MPI_REAL, Type2, ierr) +MPI_TYPE_COMMIT(Type2, ierr) // ... MPI_COMM_RANK(comm, rank, ierr) +IF(rank.EQ.0) THEN CALL MPI_SEND(a, 2, MPI_REAL, 1, 0, comm, ierr) CALL +MPI_SEND(a, 3, MPI_REAL, 1, 0, comm, ierr) ELSE CALL MPI_RECV(a, 2, +Type2, 0, 0, comm, stat, ierr) CALL MPI_GET_COUNT(stat, Type2, i, ierr) +! returns i=1 CALL MPI_GET_ELEMENTS(stat, Type2, i, ierr) ! returns i=2 +CALL MPI_RECV(a, 2, Type2, 0, 0, comm, stat, ierr) CALL +MPI_GET_COUNT(stat, Type2, i, ierr) ! returns i=MPI_UNDEFINED + +:: + + CALL MPI_GET_ELEMENTS(stat, Type2, i, ierr) ! returns i=3 + +END IF + +The function :ref:`MPI_Get_elements` can also be used after a probe to find the +number of elements in the probed message. Note that the two functions +:ref:`MPI_Get_count` and :ref:`MPI_Get_elements` return the same values when they are +used with primitive data types. + +Errors +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. + +Fortran 77 Notes +---------------- + +The MPI standard prescribes portable Fortran syntax for the COUNT +argument of :ref:`MPI_Get_elements_x` only for Fortran 90. FORTRAN 77 users may +use the non-portable syntax + +Fortran INTEGERMPI_COUNT_KIND COUNT + +where MPI_COUNT_KIND is a constant defined in mpif.h and gives the +length of the declared integer in bytes. + + +.. seealso:: :ref:`MPI_Get_count` diff --git a/docs/man-openmpi/man3/MPI_Get_elements_x.3.rst b/docs/man-openmpi/man3/MPI_Get_elements_x.3.rst new file mode 100644 index 00000000000..4b35b3fc64d --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Get_elements_x.3.rst @@ -0,0 +1,9 @@ +.. _mpi_get_elements_x: + +MPI_Get_elements_x +================== + .. include_body + +.. include:: ../man3/MPI_Get_elements.3.rst + :start-after: .. include_body + diff --git a/docs/man-openmpi/man3/MPI_Get_library_version.3.rst b/docs/man-openmpi/man3/MPI_Get_library_version.3.rst new file mode 100644 index 00000000000..c36a80b8594 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Get_library_version.3.rst @@ -0,0 +1,92 @@ +.. _mpi_get_library_version: + +MPI_Get_library_version +======================= + +.. include_body + +:ref:`MPI_Get_library_version` - Returns a string of the current Open MPI +version + +Syntax +------ + +C Syntax +^^^^^^^^ + +.. code:: c + + #include + + int MPI_Get_library_version(char *version, int *resultlen) + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + + MPI_GET_LIBRARY_VERSION(VERSION, RESULTLEN, IERROR) + CHARACTER*(*) NAME + INTEGER RESULTLEN, IERROR + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code:: fortran + + USE mpi_f08 + + MPI_Get_library_version(version, resulten, ierror) + CHARACTER(LEN=MPI_MAX_LIBRARY_VERSION_STRING), INTENT(OUT) :: version + INTEGER, INTENT(OUT) :: resultlen + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + +Output Parameters +----------------- + +- version : A string containing the Open MPI version (string). +- resultlen : Length (in characters) of result returned in version + (integer). +- IERROR : Fortran only: Error status (integer). + +Description +----------- + +This routine returns a string representing the version of the MPI +library. The version argument is a character string for maximum +flexibility. + +The number of characters actually written is returned in the output +argument, resultlen. In C, a '0' character is additionally stored at +version[resultlen]. The resultlen cannot be larger than +(MPI_MAX_LIBRARY_VERSION_STRING - 1). In Fortran, version is padded on +the right with blank characters. The resultlen cannot be larger than +MPI_MAX_LIBRARY_VERSION_STRING. + +Note +---- + +The version string that is passed must be at least +MPI_MAX_LIBRARY_VERSION_STRING characters long. + +:ref:`MPI_Get_library_version` is one of the few functions that can be called +before :ref:`MPI_Init` and after :ref:`MPI_Finalize`. + +Errors +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. + + +.. seealso:: :ref:`MPI_Get_version` diff --git a/docs/man-openmpi/man3/MPI_Get_processor_name.3.rst b/docs/man-openmpi/man3/MPI_Get_processor_name.3.rst new file mode 100644 index 00000000000..6b3dce2ff34 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Get_processor_name.3.rst @@ -0,0 +1,86 @@ +.. _mpi_get_processor_name: + +MPI_Get_processor_name +====================== + +.. include_body + +:ref:`MPI_Get_processor_name` - Gets the name of the processor. + +Syntax +------ + +C Syntax +^^^^^^^^ + +.. code:: c + + #include + + int MPI_Get_processor_name(char *name, int *resultlen) + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + + MPI_GET_PROCESSOR_NAME(NAME, RESULTLEN, IERROR) + CHARACTER*(*) NAME + INTEGER RESULTLEN, IERROR + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code:: fortran + + USE mpi_f08 + + MPI_Get_processor_name(name, resultlen, ierror) + CHARACTER(LEN=MPI_MAX_PROCESSOR_NAME), INTENT(OUT) :: name + INTEGER, INTENT(OUT) :: resultlen + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + +Output Parameters +----------------- + +- ``name`` : A unique specifier for the actual (as opposed to virtual) + node. +- ``resultlen`` : Length (in characters) of result returned in name. +- ``IERROR`` : Fortran only: Error status (integer). + +Description +----------- + +This routine returns the ``name`` of the processor on which it was +called at the moment of the call. The ``name`` is a character string for +maximum flexibility. From this value it must be possible to identify a +specific piece of hardware. The argument ``name`` must represent storage +that is at least MPI_MAX_PROCESSOR_NAME characters long. + +The number of characters actually written is returned in the output +argument, ``resultlen``. + +Notes +----- + +The user must provide at least MPI_MAX_PROCESSOR_NAME space to write +the processor ``name``; processor ``name``\ s can be this long. The user +should examine the output argument, ``resultlen``, to determine the +actual length of the ``name``. + +Errors +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler +MPI_ERRORS_RETURN may be used to cause error values to be returned. +Note that MPI does not guarantee that an MPI program can continue past +an error. diff --git a/docs/man-openmpi/man3/MPI_Get_version.3.rst b/docs/man-openmpi/man3/MPI_Get_version.3.rst new file mode 100644 index 00000000000..db165aa471b --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Get_version.3.rst @@ -0,0 +1,78 @@ +.. _mpi_get_version: + +MPI_Get_version +=============== + +.. include_body + +:ref:`MPI_Get_version` - Returns the version of the standard corresponding +to the current implementation. + +Syntax +------ + +C Syntax +^^^^^^^^ + +.. code:: c + + #include + + int MPI_Get_version(int *version, int *subversion) + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + + MPI_GET_VERSION(VERSION, SUBVERSION, IERROR) + INTEGER VERSION, SUBVERSION, IERROR + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code:: fortran + + USE mpi_f08 + + MPI_Get_version(version, subversion, ierror) + INTEGER, INTENT(OUT) :: version, subversion + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + +Output Parameters +----------------- + +- ``version`` : The major version number of the corresponding standard + (integer). +- ``subversion`` : The minor version number of the corresponding + standard (integer). +- ``IERROR`` : Fortran only: Error status (integer). + +Description +----------- + +Since Open MPI is MPI 3.1 compliant, this function will return a +``version`` value of 3 and a subversion value of 1 for this release. + +Note +---- + +:ref:`MPI_Get_version` is one of the few functions that can be called +before :ref:`MPI_Init` and after :ref:`MPI_Finalize`. + +Errors +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler +MPI_ERRORS_RETURN may be used to cause error values to be returned. +Note that MPI does not guarantee that an MPI program can continue past +an error. diff --git a/docs/man-openmpi/man3/MPI_Graph_create.3.rst b/docs/man-openmpi/man3/MPI_Graph_create.3.rst new file mode 100644 index 00000000000..3638b2543ca --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Graph_create.3.rst @@ -0,0 +1,138 @@ +.. _mpi_graph_create: + +MPI_Graph_create +================ + +.. include_body + +:ref:`MPI_Graph_create` - Makes a new communicator to which topology +information has been attached. + +Syntax +------ + +C Syntax +^^^^^^^^ + +.. code:: c + + #include + + int MPI_Graph_create(MPI_Comm comm_old, int nnodes, const int index[], + const int edges[], int reorder, MPI_Comm *comm_graph) + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + + MPI_GRAPH_CREATE(COMM_OLD, NNODES, INDEX, EDGES, REORDER, + COMM_GRAPH, IERROR) + INTEGER COMM_OLD, NNODES, INDEX(*), EDGES(*) + INTEGER COMM_GRAPH, IERROR + LOGICAL REORDER + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code:: fortran + + USE mpi_f08 + + MPI_Graph_create(comm_old, nnodes, index, edges, reorder, comm_graph, + ierror) + TYPE(MPI_Comm), INTENT(IN) :: comm_old + INTEGER, INTENT(IN) :: nnodes, index(nnodes), edges(*) + LOGICAL, INTENT(IN) :: reorder + TYPE(MPI_Comm), INTENT(OUT) :: comm_graph + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + +Input Parameters +---------------- + +- comm_old : Input communicator without topology (handle). +- nnodes : Number of nodes in graph (integer). +- index : Array of integers describing node degrees (see below). +- edges : Array of integers describing graph edges (see below). +- reorder : Ranking may be reordered (true) or not (false) (logical). + +Output Parameters +----------------- + +- comm_graph : Communicator with graph topology added (handle). +- IERROR : Fortran only: Error status (integer). + +Description +----------- + +:ref:`MPI_Graph_create` returns a handle to a new communicator to which the +graph topology information is attached. If reorder = false then the rank +of each process in the new group is identical to its rank in the old +group. Otherwise, the function may reorder the processes. If the size, +nnodes, of the graph is smaller than the size of the group of comm_old, +then some processes are returned MPI_COMM_NULL, in analogy to +:ref:`MPI_Cart_create` and :ref:`MPI_Comm_split`. The call is erroneous if it +specifies a graph that is larger than the group size of the input +communicator. + +The three parameters nnodes, index, and edges define the graph +structure. nnodes is the number of nodes of the graph. The nodes are +numbered from 0 to nnodes-1. The ith entry of array index stores the +total number of neighbors of the first i graph nodes. The lists of +neighbors of nodes 0, 1, ..., nnodes-1 are stored in consecutive +locations in array edges. The array edges is a flattened representation +of the edge lists. The total number of entries in index is nnodes and +the total number of entries in edges is equal to the number of graph +edges. + +The definitions of the arguments nnodes, index, and edges are +illustrated with the following simple example. + +Example: Assume there are four processes 0, 1, 2, 3 with the following +adjacency matrix: + +------- --------- +Process Neighbors +------- --------- +0 1, 3 +1 0 +2 3 +3 0, 2 +------- --------- + +Then, the input arguments are: + +- nodes = 4 +- index = 2, 3, 4, 6 +- edges = 1, 3, 0, 3, 0, 2 + +Thus, in C, index[0] is the degree of node zero, and index[i] - +index[i-1] is the degree of node i, i=1, . . . , nnodes-1; the list of +neighbors of node zero is stored in edges[j], for 0 <= j <= index[0] - 1 +and the list of neighbors of node i, i > 0 , is stored in edges[j], +index[i-1] <= j <= index[i] - 1. + +In Fortran, index(1) is the degree of node zero, and index(i+1) - +index(i) is the degree of node i, i=1, . . . , nnodes-1; the list of +neighbors of node zero is stored in edges(j), for 1 <= j <= index(1) and +the list of neighbors of node i, i > 0, is stored in edges(j), index(i) ++ 1 <= j <= index(i + 1). + +Errors +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. + + +.. seealso:: :ref:`MPI_Graph_get` diff --git a/docs/man-openmpi/man3/MPI_Graph_get.3.rst b/docs/man-openmpi/man3/MPI_Graph_get.3.rst new file mode 100644 index 00000000000..5f59c9d1dbc --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Graph_get.3.rst @@ -0,0 +1,88 @@ +.. _mpi_graph_get: + +MPI_Graph_get +============= + +.. include_body + +:ref:`MPI_Graph_get` - Retrieves graph topology information associated with a +communicator. + +Syntax +------ + +C Syntax +^^^^^^^^ + +.. code:: c + + #include + + int MPI_Graph_get(MPI_Comm comm, int maxindex, int maxedges, + int index[], int edges[]) + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + + MPI_GRAPH_GET(COMM, MAXINDEX, MAXEDGES, INDEX, EDGES, IERROR) + INTEGER COMM, MAXINDEX, MAXEDGES, INDEX(*) + INTEGER EDGES(*), IERROR + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code:: fortran + + USE mpi_f08 + + MPI_Graph_get(comm, maxindex, maxedges, index, edges, ierror) + TYPE(MPI_Comm), INTENT(IN) :: comm + INTEGER, INTENT(IN) :: maxindex, maxedges + INTEGER, INTENT(OUT) :: index(maxindex), edges(maxedges) + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + +Input Parameters +---------------- + +- comm : Communicator with graph structure (handle). +- maxindex : Length of vector index in the calling program (integer). +- maxedges : Length of vector edges in the calling program (integer). + +Output Parameters +----------------- + +- index : Array of integers containing the graph structure (for details + see the definition of MPI_Graph_create). +- edges : Array of integers containing the graph structure. +- IERROR : Fortran only: Error status (integer). + +Description +----------- + +Functions :ref:`MPI_Graphdims_get` and :ref:`MPI_Graph_get` retrieve the +graph-topology information that was associated with a communicator by +:ref:`MPI_Graph_create`. + +The information provided by :ref:`MPI_Graphdims_get` can be used to dimension +the vectors index and edges correctly for a call to :ref:`MPI_Graph_get`. + +Errors +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. + + +.. seealso:: :ref:`MPI_Graph_create` diff --git a/docs/man-openmpi/man3/MPI_Graph_map.3.rst b/docs/man-openmpi/man3/MPI_Graph_map.3.rst new file mode 100644 index 00000000000..4dfe79f49b9 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Graph_map.3.rst @@ -0,0 +1,85 @@ +.. _mpi_graph_map: + +MPI_Graph_map +============= + +.. include_body + +:ref:`MPI_Graph_map` - Maps process to graph topology information. + +Syntax +------ + +C Syntax +^^^^^^^^ + +.. code:: c + + #include + + int MPI_Graph_map(MPI_Comm comm, int nnodes, const int index[], + const int edges[], int *newrank) + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + + MPI_GRAPH_MAP(COMM, NNODES, INDEX, EDGES, NEWRANK, IERROR) + INTEGER COMM, NNODES, INDEX(*), EDGES(*), NEWRANK, IERROR + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code:: fortran + + USE mpi_f08 + + MPI_Graph_map(comm, nnodes, index, edges, newrank, ierror) + TYPE(MPI_Comm), INTENT(IN) :: comm + INTEGER, INTENT(IN) :: nnodes, index(nnodes), edges(*) + INTEGER, INTENT(OUT) :: newrank + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + +Input Parameters +---------------- + +- comm : Input communicator (handle). +- nnodes : Number of graph nodes (integer). +- index : Integer array specifying the graph structure, see + :ref:`MPI_Graph_create`. +- edges : Integer array specifying the graph structure. + +Output Parameters +----------------- + +- newrank : Reordered rank of the calling process; MPI_UNDEFINED if the + calling process does not belong to graph (integer). +- IERROR : Fortran only: Error status (integer). + +Description +----------- + +:ref:`MPI_Cart_map` and :ref:`MPI_Graph_map` can be used to implement all other +topology functions. In general they will not be called by the user +directly, unless he or she is creating additional virtual topology +capability other than that provided by MPI. + +Errors +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. + + +.. seealso:: :ref:`MPI_Cart_map` diff --git a/docs/man-openmpi/man3/MPI_Graph_neighbors.3.rst b/docs/man-openmpi/man3/MPI_Graph_neighbors.3.rst new file mode 100644 index 00000000000..f03f5b276d2 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Graph_neighbors.3.rst @@ -0,0 +1,123 @@ +.. _mpi_graph_neighbors: + + +MPI_Graph_neighbors +=================== + +.. include_body + +:ref:`MPI_Graph_neighbors` - Returns the neighbors of a node associated +with a graph topology. + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Graph_neighbors(MPI_Comm comm, int rank, int maxneighbors, + int neighbors[]) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_GRAPH_NEIGHBORS(COMM, RANK, MAXNEIGHBORS, NEIGHBORS, IERROR) + INTEGER COMM, RANK, MAXNEIGHBORS, NEIGHBORS(*), IERROR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Graph_neighbors(comm, rank, maxneighbors, neighbors, ierror) + TYPE(MPI_Comm), INTENT(IN) :: comm + INTEGER, INTENT(IN) :: rank, maxneighbors + INTEGER, INTENT(OUT) :: neighbors(maxneighbors) + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETERS +---------------- +* ``comm``: Communicator with graph topology (handle). +* ``rank``: Rank of process in group of comm (integer). +* ``maxneighbors``: Size of array neighbors (integer). + +OUTPUT PARAMETERS +----------------- +* ``neighbors``: Ranks of processes that are neighbors to specified process (array of integers). +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +**Example:** Suppose that comm is a communicator with a shuffle-exchange +topology. The group has 2n members. Each process is labeled by a(1), +..., a(n) with a(i) E{0,1}, and has three neighbors: exchange (a(1), +..., a(n) = a(1), ..., a(n-1), a(n) (a = 1 - a), shuffle (a(1), ..., +a(n)) = a(2), ..., a(n), a(1), and unshuffle (a(1), ..., a(n)) = a(n), +a(1), ..., a(n-1). The graph adjacency list is illustrated below for +n=3. + +:: + + exchange shuffle unshuffle + node neighbors(1) neighbors(2) neighbors(3) + 0(000) 1 0 0 + 1(001) 0 2 4 + 2(010) 3 4 1 + 3(011) 2 6 5 + 4(100) 5 1 2 + 5(101) 4 3 6 + 6(110) 7 5 3 + 7(111) 6 7 7 + +Suppose that the communicator comm has this topology associated with it. +The following code fragment cycles through the three types of neighbors +and performs an appropriate permutation for each. + +:: + + C assume: each process has stored a real number A. + C extract neighborhood information + CALL MPI_COMM_RANK(comm, myrank, ierr) + CALL MPI_GRAPH_NEIGHBORS(comm, myrank, 3, neighbors, ierr) + C perform exchange permutation + CALL MPI_SENDRECV_REPLACE(A, 1, MPI_REAL, neighbors(1), 0, + + neighbors(1), 0, comm, status, ierr) + C perform shuffle permutation + CALL MPI_SENDRECV_REPLACE(A, 1, MPI_REAL, neighbors(2), 0, + + neighbors(3), 0, comm, status, ierr) + C perform unshuffle permutation + CALL MPI_SENDRECV_REPLACE(A, 1, MPI_REAL, neighbors(3), 0, + + neighbors(2), 0, comm, status, ierr) + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. + + +.. seealso:: + :ref:`MPI_Graph_neighbors_count` diff --git a/docs/man-openmpi/man3/MPI_Graph_neighbors_count.3.rst b/docs/man-openmpi/man3/MPI_Graph_neighbors_count.3.rst new file mode 100644 index 00000000000..cdfc4d739f3 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Graph_neighbors_count.3.rst @@ -0,0 +1,81 @@ +.. _mpi_graph_neighbors_count: + +MPI_Graph_neighbors_count +========================= + +.. include_body + +:ref:`MPI_Graph_neighbors_count` - Returns the number of neighbors of a node +associated with a graph topology. + +Syntax +------ + +C Syntax +^^^^^^^^ + +.. code:: c + + #include + + int MPI_Graph_neighbors_count(MPI_Comm comm, int rank, + int *nneighbors) + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + + MPI_GRAPH_NEIGHBORS_COUNT(COMM, RANK, NNEIGHBORS, IERROR) + INTEGER COMM, RANK, NNEIGHBORS, IERROR + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code:: fortran + + USE mpi_f08 + + MPI_Graph_neighbors_count(comm, rank, nneighbors, ierror) + TYPE(MPI_Comm), INTENT(IN) :: comm + INTEGER, INTENT(IN) :: rank + INTEGER, INTENT(OUT) :: nneighbors + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + +Input Parameters +---------------- + +- comm : Communicator with graph topology (handle). +- rank : Rank of process in group of comm (integer). + +Output Parameters +----------------- + +- nneighbors : Number of neighbors of specified process (integer). +- IERROR : Fortran only: Error status (integer). + +Description +----------- + +:ref:`MPI_Graph_neighbors_count` and :ref:`MPI_Graph_neighbors` provide adjacency +information for a general, graph topology. :ref:`MPI_Graph_neighbors_count` +returns the number of neighbors for the process signified by rank. + +Errors +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. + + +.. seealso:: :ref:`MPI_Graph_neighbors` diff --git a/docs/man-openmpi/man3/MPI_Graphdims_get.3.rst b/docs/man-openmpi/man3/MPI_Graphdims_get.3.rst new file mode 100644 index 00000000000..eb106210fd9 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Graphdims_get.3.rst @@ -0,0 +1,82 @@ +.. _mpi_graphdims_get: + +MPI_Graphdims_get +================= + +.. include_body + +:ref:`MPI_Graphdims_get` - Retrieves graph topology information associated with +a communicator. + +Syntax +------ + +C Syntax +^^^^^^^^ + +.. code:: c + + #include + + int MPI_Graphdims_get(MPI_Comm comm, int *nnodes, int *nedges) + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + + MPI_GRAPHDIMS_GET(COMM, NNODES, NEDGES, IERROR) + INTEGER COMM, NNODES, NEDGES, IERROR + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code:: fortran + + USE mpi_f08 + + MPI_Graphdims_get(comm, nnodes, nedges, ierror) + TYPE(MPI_Comm), INTENT(IN) :: comm + INTEGER, INTENT(OUT) :: nnodes, nedges + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + +Input Parameter +--------------- + +- comm : Communicator for group with graph structure (handle). + +Output Parameters +----------------- + +- nnodes : Number of nodes in graph (integer). +- nedges : Number of edges in graph (integer). +- IERROR : Fortran only: Error status (integer). + +Description +----------- + +Functions :ref:`MPI_Graphdims_get` and :ref:`MPI_Graph_get` retrieve the +graph-topology information that was associated with a communicator by +:ref:`MPI_Graph_create`. + +The information provided by :ref:`MPI_Graphdims_get` can be used to dimension +the vectors index and edges correctly for a call to :ref:`MPI_Graph_get`. + +Errors +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. + + +.. seealso:: :ref:`MPI_Graph_create` diff --git a/docs/man-openmpi/man3/MPI_Grequest_complete.3.rst b/docs/man-openmpi/man3/MPI_Grequest_complete.3.rst new file mode 100644 index 00000000000..d6af9bf02b3 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Grequest_complete.3.rst @@ -0,0 +1,91 @@ +.. _mpi_grequest_complete: + +MPI_Grequest_complete +===================== + +.. include_body + +:ref:`MPI_Grequest_complete` - Reports that a generalized request is +complete. + +Syntax +------ + +C Syntax +^^^^^^^^ + +.. code:: c + + #include + + int MPI_Grequest_complete(MPI_Request request) + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + + MPI_GREQUEST_COMPLETE(REQUEST, IERROR) + INTEGER REQUEST, IERROR + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code:: fortran + + USE mpi_f08 + + MPI_Grequest_complete(request, ierror) + TYPE(MPI_Request), INTENT(IN) :: request + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + +Input/Output Parameter +---------------------- + +- ``request`` : Generalized request (handle). + +Output Parameter +---------------- + +- ``IERROR`` : Fortran only: Error status (integer). + +Description +----------- + +:ref:`MPI_Grequest_complete` informs MPI that the operations represented by +the generalized request ``request`` are complete. A call to +MPI_Wait(request, status)`` will return, and a call to +MPI_Test(request, flag, status)`` will return flag=true only after a +call to :ref:`MPI_Grequest_complete` has declared that these operations are +complete. + +MPI imposes no restrictions on the code executed by the callback +functions. However, new nonblocking operations should be defined so that +the general semantic rules about MPI calls such as :ref:`MPI_Test`, +:ref:`MPI_Request_free`, or :ref:`MPI_Cancel` still hold. For example, all +these calls are supposed to be local and nonblocking. Therefore, the +callback functions ``query_fn``, ``free_fn``, or ``cancel_fn`` should +invoke blocking MPI communication calls only if the context is such that +these calls are guaranteed to return in finite time. Once :ref:`MPI_Cancel` +has been invoked, the canceled operation should complete in finite time, +regardless of the state of other processes (the operation has acquired +"local" semantics). It should either succeed or fail without +side-effects. The user should guarantee these same properties for newly +defined operations. + +Errors +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler +MPI_ERRORS_RETURN may be used to cause error values to be returned. +Note that MPI does not guarantee that an MPI program can continue past +an error. diff --git a/docs/man-openmpi/man3/MPI_Grequest_start.3.rst b/docs/man-openmpi/man3/MPI_Grequest_start.3.rst new file mode 100644 index 00000000000..85f3bb91c79 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Grequest_start.3.rst @@ -0,0 +1,223 @@ +.. _mpi_grequest_start: + +MPI_Grequest_start +================== + +.. include_body + +:ref:`MPI_Grequest_start` - Starts a generalized request and returns a +handle to it in ``request``. + +Syntax +------ + +C Syntax +^^^^^^^^ + +.. code:: c + + #include + + int MPI_Grequest_start(MPI_Grequest_query_function *query_fn, + MPI_Grequest_free_function *free_fn, + MPI_Grequest_cancel_function *cancel_fn, void *extra_state, + MPI_Request *request) + +Fortran Syntax (See Fortran 77 Notes) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. code:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + + MPI_GREQUEST_START(QUERY_FN, FREE_FN, CANCEL_FN, EXTRA_STATE, + REQUEST, IERROR) + INTEGER REQUEST, IERROR + EXTERNAL QUERY_FN, FREE_FN, CANCEL_FN + INTEGER(KIND=MPI_ADDRESS_KIND) EXTRA_STATE + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code:: fortran + + USE mpi_f08 + + MPI_Grequest_start(query_fn, free_fn, cancel_fn, extra_state, request, + ierror) + PROCEDURE(MPI_Grequest_query_function) :: query_fn + PROCEDURE(MPI_Grequest_free_function) :: free_fn + PROCEDURE(MPI_Grequest_cancel_function) :: cancel_fn + INTEGER(KIND=MPI_ADDRESS_KIND), INTENT(IN) :: extra_state + TYPE(MPI_Request), INTENT(OUT) :: request + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + +Input Parameters +---------------- + +- ``query_fn`` : Callback function invoked when request status is + queried (function). +- ``free_fn`` : Callback function invoked when request is freed + (function). +- ``cancel_fn`` : Callback function invoked when request is canceled + (function). +- ``extra_state`` : Extra state. + +Output Parameters +----------------- + +- ``request`` : Generalized request (handle). +- ``IERROR`` : Fortran only: Error status (integer). + +Description +----------- + +:ref:`MPI_Grequest_start` starts a generalized ``request`` and returns a +handle to it in ``request``. + +The syntax and meaning of the callback functions are listed below. All +callback functions are passed the ``extra_state`` argument that was +associated with the ``request`` by the starting call +:ref:`MPI_Grequest_start`. This can be used to maintain user-defined state +for the ``request``. In C, the query function is + +.. code:: c + + typedef int MPI_Grequest_query_function(void *extra_state, + MPI_Status *status); + +In Fortran, it is + +.. code:: fortran + + SUBROUTINE GREQUEST_QUERY_FUNCTION(EXTRA_STATE, STATUS, IERROR) + INTEGER STATUS(MPI_STATUS_SIZE), IERROR + INTEGER(KIND=MPI_ADDRESS_KIND) EXTRA_STATE + +The ``query_fn`` function computes the status that should be returned +for the generalized request. The status also includes information about +successful/unsuccessful cancellation of the request (result to be +returned by ``MPI_Test_cancelled``). + +The ``query_fn`` function is invoked by the +``MPI_{Wait|Test}{any|some|all}`` call that completed the generalized +request associated with this callback. The callback function is also +invoked by calls to ``MPI_Request_get_status`` if the request is +complete when the call occurs. In both cases, the callback is passed a +reference to the corresponding status variable passed by the user to the +MPI call. If the user provided ``MPI_STATUS_IGNORE`` or +``MPI_STATUSES_IGNORE`` to the MPI function that causes ``query_fn`` to +be called, then MPI will pass a valid status object to ``query_fn``, and +this status will be ignored upon return of the callback function. Note +that ``query_fn`` is invoked only after ``MPI_Grequest_complete`` is +called on the request; it may be invoked several times for the same +generalized request. Note also that a call to +``MPI_{Wait|Test}{some|all}`` may cause multiple invocations of +``query_fn`` callback functions, one for each generalized request that +is completed by the MPI call. The order of these invocations is not +specified by MPI. + +In C, the free function is + +.. code:: c + + typedef int MPI_Grequest_free_function(void *extra_state); + +And in Fortran, it is + +.. code:: fortran + + SUBROUTINE GREQUEST_FREE_FUNCTION(EXTRA_STATE, IERROR) + INTEGER IERROR + INTEGER(KIND=MPI_ADDRESS_KIND) EXTRA_STATE + +The ``free_fn`` callback function is invoked to clean up user-allocated +resources when the generalized request is freed. + +The ``free_fn`` function is invoked by the +``MPI_{Wait|Test}{any|some|all}`` call that completed the generalized +request associated with this callback. ``free_fn`` is invoked after the +call to ``query_fn`` for the same request. However, if the MPI call +completed multiple generalized requests, the order in which ``free_fn`` +callback functions are invoked is not specified by MPI. + +The ``free_fn`` callback is also invoked for generalized requests that +are freed by a call to ``MPI_Request_free`` (no call to +``MPI_{Wait|Test}{any|some|all}`` will occur for such a request). In +this case, the callback function will be called either in the MPI call +``MPI_Request_free(request)`` or in the MPI call +``MPI_Grequest_complete(request)``, whichever happens last. In other +words, in this case the actual freeing code is executed as soon as both +calls (``MPI_Request_free`` and ``MPI_Grequest_complete``) have +occurred. The ``request`` is not deallocated until after ``free_fn`` +completes. Note that ``free_fn`` will be invoked only once per request +by a correct program. + +In C, the cancel function is + +.. code:: c + + typedef int MPI_Grequest_cancel_function(void *extra_state, int complete); + +In Fortran, the cancel function is + +.. code:: Fortran + + SUBROUTINE GREQUEST_CANCEL_FUNCTION(EXTRA_STATE, COMPLETE, IERROR) + INTEGER IERROR + INTEGER(KIND=MPI_ADDRESS_KIND) EXTRA_STATE + LOGICAL COMPLETE + +The ``cancel_fn`` function is invoked to start the cancellation of a +generalized request. It is called by ``MPI_Request_cancel(request)``. +MPI passes to the callback function complete=true if +``MPI_Grequest_complete`` has already been called on the request, and +complete=false otherwise. + +Fortran 77 Notes +---------------- + +The MPI standard prescribes portable Fortran syntax for the +``EXTRA_STATE`` argument only for Fortran 90. FORTRAN 77 users may use +the non-portable syntax + +.. code:: fortran + + INTEGER*MPI_ADDRESS_KIND EXTRA_STATE + +where ``MPI_ADDRESS_KIND`` is a constant defined in mpif.h and gives the +length of the declared integer in bytes. + +Errors +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler +MPI_ERRORS_RETURN may be used to cause error values to be returned. +Note that MPI does not guarantee that an MPI program can continue past +an error. + +All callback functions return an error code. The code is passed back and +dealt with as appropriate for the error code by the MPI function that +invoked the callback function. For example, if error codes are returned, +then the error code returned by the callback function will be returned +by the MPI function that invoked the callback function. In the case of a +``MPI_{Wait|Test}any`` call that invokes both ``query_fn`` and +``free_fn``, the MPI call will return the error code returned by the +last callback, namely ``free_fn``. If one or more of the ``request``\ s +in a call to ``MPI_{Wait|Test}{some|all``} has failed, then the MPI call +will return MPI_ERR_IN_STATUS. In such a case, if the MPI call was +passed an array of statuses, then MPI will return in each of the +statuses that correspond to a completed generalized ``request`` the +error code returned by the corresponding invocation of its ``free_fn`` +callback function. However, if the MPI function was passed +MPI_STATUSES_IGNORE, then the individual error codes returned by +each callback function will be lost. + +See the MPI man page for a full list of MPI error codes. diff --git a/docs/man-openmpi/man3/MPI_Group_c2f.3.rst b/docs/man-openmpi/man3/MPI_Group_c2f.3.rst new file mode 100644 index 00000000000..f68e543ea4e --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Group_c2f.3.rst @@ -0,0 +1,9 @@ +.. _mpi_group_c2f: + +MPI_Group_c2f +============= + .. include_body + +.. include:: ../man3/MPI_Comm_f2c.3.rst + :start-after: .. include_body + diff --git a/docs/man-openmpi/man3/MPI_Group_compare.3.rst b/docs/man-openmpi/man3/MPI_Group_compare.3.rst new file mode 100644 index 00000000000..f84ca2690da --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Group_compare.3.rst @@ -0,0 +1,81 @@ +.. _mpi_group_compare: + +MPI_Group_compare +================= + +.. include_body + +:ref:`MPI_Group_compare` - Compares two groups. + +Syntax +------ + +C Syntax +^^^^^^^^ + +.. code:: c + + #include + + int MPI_Group_compare(MPI_Group group1, MPI_Group group2, + int *result) + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + + MPI_GROUP_COMPARE(GROUP1, GROUP2, RESULT, IERROR) + INTEGER GROUP1, GROUP2, RESULT, IERROR + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code:: fortran + + USE mpi_f08 + + MPI_Group_compare(group1, group2, result, ierror) + TYPE(MPI_Group), INTENT(IN) :: group1, group2 + INTEGER, INTENT(OUT) :: result + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + +Input Parameters +---------------- + +- ``group1`` : First group (handle). +- ``group2`` : Second group (handle). + +Output Parameters +----------------- + +- ``result`` : Integer which is MPI_IDENT if the order and members of + the two groups are the same, MPI_SIMILAR if only the members are the + same, and MPI_UNEQUAL otherwise. +- ``IERROR`` : Fortran only: Error status (integer). + +Description +----------- + +MPI_IDENT results if the group members and group order is exactly +the same in both groups. This happens for instance if ``group1`` and +``group2`` are the same handle. MPI_SIMILAR results if the group +members are the same but the order is different. MPI_UNEQUAL results +otherwise. + +Errors +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler +MPI_ERRORS_RETURN may be used to cause error values to be returned. +Note that MPI does not guarantee that an MPI program can continue past +an error. diff --git a/docs/man-openmpi/man3/MPI_Group_difference.3.rst b/docs/man-openmpi/man3/MPI_Group_difference.3.rst new file mode 100644 index 00000000000..9650acc3e3a --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Group_difference.3.rst @@ -0,0 +1,91 @@ +.. _mpi_group_difference: + +MPI_Group_difference +==================== + +.. include_body + +:ref:`MPI_Group_difference` - Makes a group from the difference of two groups. + +Syntax +------ + +C Syntax +^^^^^^^^ + +.. code:: c + + #include + + int MPI_Group_difference(MPI_Group group1, MPI_Group group2, + MPI_Group *newgroup) + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + + MPI_GROUP_DIFFERENCE(GROUP1, GROUP2, NEWGROUP, IERROR) + INTEGER GROUP1, GROUP2, NEWGROUP, IERROR + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code:: fortran + + USE mpi_f08 + + MPI_Group_difference(group1, group2, newgroup, ierror) + TYPE(MPI_Group), INTENT(IN) :: group1, group2 + TYPE(MPI_Group), INTENT(OUT) :: newgroup + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + +Input Parameters +---------------- + +- group1 : First group (handle). +- group2 : Second group (handle). + +Output Parameters +----------------- + +- newgroup : Difference group (handle). +- IERROR : Fortran only: Error status (integer). + +Description +----------- + +The set-like operations are defined as follows: + +- union -- All elements of the first group (group1), followed by all + elements of second group (group2) that are not in the first group +- intersect -- all elements of the first group that are also in the + second group, ordered as in first group +- difference -- all elements of the first group that are not in the + second group, ordered as in the first group + +Note that for these operations the order of processes in the output +group is determined primarily by order in the first group (if possible) +and then, if necessary, by order in the second group. Neither union nor +intersection are commutative, but both are associative. + +The new group can be empty, that is, equal to MPI_GROUP_EMPTY. + +Errors +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. + + +.. seealso:: :ref:`MPI_Group_free` diff --git a/docs/man-openmpi/man3/MPI_Group_excl.3.rst b/docs/man-openmpi/man3/MPI_Group_excl.3.rst new file mode 100644 index 00000000000..ecbde59543a --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Group_excl.3.rst @@ -0,0 +1,93 @@ +.. _mpi_group_excl: + +MPI_Group_excl +============== + +.. include_body + +:ref:`MPI_Group_excl` - Produces a group by reordering an existing group and +taking only unlisted members. + +Syntax +------ + +C Syntax +^^^^^^^^ + +.. code:: c + + #include + + int MPI_Group_excl(MPI_Group group, int n, const int ranks[], + MPI_Group *newgroup) + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + + MPI_GROUP_EXCL(GROUP, N, RANKS, NEWGROUP, IERROR) + INTEGER GROUP, N, RANKS(*), NEWGROUP, IERROR + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code:: fortran + + USE mpi_f08 + + MPI_Group_excl(group, n, ranks, newgroup, ierror) + TYPE(MPI_Group), INTENT(IN) :: group + INTEGER, INTENT(IN) :: n, ranks(n) + TYPE(MPI_Group), INTENT(OUT) :: newgroup + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + +Input Parameters +---------------- + +- group : Group (handle). +- n : Number of elements in array ranks (integer). +- ranks : Array of integer ranks in group not to appear in newgroup. + +Output Parameters +----------------- + +- newgroup : New group derived from above, preserving the order defined + by group (handle). +- IERROR : Fortran only: Error status (integer). + +Description +----------- + +The function :ref:`MPI_Group_excl` creates a group of processes newgroup that +is obtained by deleting from group those processes with ranks ranks[0], +... ranks[n-1]. The ordering of processes in newgroup is identical to +the ordering in group. Each of the n elements of ranks must be a valid +rank in group and all elements must be distinct; otherwise, the call is +erroneous. If n = 0, then newgroup is identical to group. + +Note +---- + +Currently, each of the ranks to exclude must be a valid rank in the +group and all elements must be distinct or the function is erroneous. +This restriction is per the draft. + +Errors +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. + + +.. seealso:: :ref:`MPI_Group_range_excl` diff --git a/docs/man-openmpi/man3/MPI_Group_f2c.3.rst b/docs/man-openmpi/man3/MPI_Group_f2c.3.rst new file mode 100644 index 00000000000..871f8aa110d --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Group_f2c.3.rst @@ -0,0 +1,9 @@ +.. _mpi_group_f2c: + +MPI_Group_f2c +============= + .. include_body + +.. include:: ../man3/MPI_Comm_f2c.3.rst + :start-after: .. include_body + diff --git a/docs/man-openmpi/man3/MPI_Group_free.3.rst b/docs/man-openmpi/man3/MPI_Group_free.3.rst new file mode 100644 index 00000000000..20420e851cb --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Group_free.3.rst @@ -0,0 +1,78 @@ +.. _mpi_group_free: + +MPI_Group_free +============== + +.. include_body + +:ref:`MPI_Group_free` - Frees a group. + +Syntax +------ + +C Syntax +^^^^^^^^ + +.. code:: c + + #include + + int MPI_Group_free(MPI_Group *group) + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + + MPI_GROUP_FREE(GROUP, IERROR) + INTEGER GROUP, IERROR + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code:: fortran + + USE mpi_f08 + + MPI_Group_free(group, ierror) + TYPE(MPI_Group), INTENT(INOUT) :: group + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + +Input/Output Parameter +---------------------- + +- ``group`` : Group (handle). + +Output Parameter +---------------- + +- ``IERROR`` : Fortran only: Error status (integer). + +Description +----------- + +This operation marks a ``group`` object for deallocation. The handle +``group`` is set to MPI_GROUP_NULL by the call. Any ongoing +operation using this ``group`` will complete normally. + +Note +---- + +On return, ``group`` is set to MPI_GROUP_NULL. + +Errors +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler +MPI_ERRORS_RETURN may be used to cause error values to be returned. +Note that MPI does not guarantee that an MPI program can continue past +an error. diff --git a/docs/man-openmpi/man3/MPI_Group_from_session_pset.3.rst b/docs/man-openmpi/man3/MPI_Group_from_session_pset.3.rst new file mode 100644 index 00000000000..1e460f48763 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Group_from_session_pset.3.rst @@ -0,0 +1,92 @@ +.. _mpi_group_from_session_pset: + +MPI_Group_from_session_pset +=========================== + +.. include_body + +:ref:`MPI_Group_from_session_pset` - Creates a group using a provided session +handle and process set. + +Syntax +------ + +C Syntax +^^^^^^^^ + +.. code:: c + + #include + + int MPI_Group_from_session_pset(MPI_Session session, const char *pset_name, MPI_Group *newgroup) + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + + MPI_GROUP_FROM_SESSION_PSET(SESSION, PSET_NAME, NEWGROUP, IERROR) + INTEGER SESSION, NEWGROUP, IERROR + CHARACTER*(*) PSET_NAME + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code:: fortran + + USE mpi_f08 + + MPI_Group_from_session_pset(session, pset_name, newgroup, ierror) + TYPE(MPI_Session), INTENT(IN) :: session + CHARACTER(LEN=*), INTENT(IN) :: pset_name + TYPE(MPI_Group), INTENT(OUT) :: newgroup + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + +Input Parameters +---------------- + +- session : Session (handle). +- pset_name : name of process set to use to create the new group + (string) + +Output Parameters +----------------- + +- newgroup : New group derived from supplied session and process set + (handle). +- IERROR : Fortran only: Error status (integer). + +Description +----------- + +The function :ref:`MPI_Group_from_session_pset` creates a group newgroup using +the provided session handle and process set. The process set name must +be one returned from an invocation of :ref:`MPI_Session_get_nth_pset` using the +supplied session handle. If the pset_name does not exist, MPI_GROUP_NULL +will be returned in the newgroup argument. + +Note +---- + +As with other group constructors, :ref:`MPI_Group_from_session_pset` is a local +function. + +Errors +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +MPI_Session_set_errhandler; the predefined error handler +MPI_ERRORS_RETURN may be used to cause error values to be returned. Note +that MPI does not guarantee that an MPI program can continue past an +error. + + +.. seealso:: :ref:`MPI_Session_init` diff --git a/docs/man-openmpi/man3/MPI_Group_incl.3.rst b/docs/man-openmpi/man3/MPI_Group_incl.3.rst new file mode 100644 index 00000000000..789c773d062 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Group_incl.3.rst @@ -0,0 +1,95 @@ +.. _mpi_group_incl: + +MPI_Group_incl +============== + +.. include_body + +:ref:`MPI_Group_incl` - Produces a group by reordering an existing group and +taking only listed members. + +Syntax +------ + +C Syntax +^^^^^^^^ + +.. code:: c + + #include + + int MPI_Group_incl(MPI_Group group, int n, const int ranks[], + MPI_Group *newgroup) + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + + MPI_GROUP_INCL(GROUP, N, RANKS, NEWGROUP, IERROR) + INTEGER GROUP, N, RANKS(*), NEWGROUP, IERROR + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code:: fortran + + USE mpi_f08 + + MPI_Group_incl(group, n, ranks, newgroup, ierror) + TYPE(MPI_Group), INTENT(IN) :: group + INTEGER, INTENT(IN) :: n, ranks(n) + TYPE(MPI_Group), INTENT(OUT) :: newgroup + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + +Input Parameters +---------------- + +- group : Group (handle). +- n : Number of elements in array ranks (and size of + newgroup)(integer). +- ranks : Ranks of processes in group to appear in newgroup (array of + integers). + +Output Parameters +----------------- + +- newgroup : New group derived from above, in the order defined by + ranks (handle). +- IERROR : Fortran only: Error status (integer). + +Description +----------- + +The function :ref:`MPI_Group_incl` creates a group group_out that consists of +the n processes in group with ranks rank[0], ..., rank[n-1]; the process +with rank i in group_out is the process with rank ranks[i] in group. +Each of the n elements of ranks must be a valid rank in group and all +elements must be distinct, or else the program is erroneous. If n = 0, +then group_out is MPI_GROUP_EMPTY. This function can, for instance, be +used to reorder the elements of a group. + +Note +---- + +This implementation does not currently check to ensure that there are no +duplicates in the list of ranks. + +Errors +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. + + +.. seealso:: :ref:`MPI_Group_compare` diff --git a/docs/man-openmpi/man3/MPI_Group_intersection.3.rst b/docs/man-openmpi/man3/MPI_Group_intersection.3.rst new file mode 100644 index 00000000000..d01ea7f19cb --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Group_intersection.3.rst @@ -0,0 +1,92 @@ +.. _mpi_group_intersection: + +MPI_Group_intersection +====================== + +.. include_body + +:ref:`MPI_Group_intersection` - Produces a group at the intersection of two +existing groups. + +Syntax +------ + +C Syntax +^^^^^^^^ + +.. code:: c + + #include + + int MPI_Group_intersection(MPI_Group group1, MPI_Group group2, + MPI_Group *newgroup) + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + + MPI_GROUP_INTERSECTION(GROUP1, GROUP2, NEWGROUP, IERROR) + INTEGER GROUP1, GROUP2, NEWGROUP, IERROR + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code:: fortran + + USE mpi_f08 + + MPI_Group_intersection(group1, group2, newgroup, ierror) + TYPE(MPI_Group), INTENT(IN) :: group1, group2 + TYPE(MPI_Group), INTENT(OUT) :: newgroup + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + +Input Parameters +---------------- + +- group1 : First group (handle). +- group2 : Second group (handle). + +Output Parameters +----------------- + +- newgroup : Intersection group (handle). +- IERROR : Fortran only: Error status (integer). + +Description +----------- + +The set-like operations are defined as follows: + +- union -- All elements of the first group (group1), followed by all + elements of second group (group2) not in first. +- intersect -- all elements of the first group that are also in the + second group, ordered as in first group. +- difference -- all elements of the first group that are not in the + second group, ordered as in the first group. + +Note that for these operations the order of processes in the output +group is determined primarily by order in the first group (if possible) +and then, if necessary, by order in the second group. Neither union nor +intersection are commutative, but both are associative. + +The new group can be empty, that is, equal to MPI_GROUP_EMPTY. + +Errors +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. + + +.. seealso:: :ref:`MPI_Group_free` diff --git a/docs/man-openmpi/man3/MPI_Group_range_excl.3.rst b/docs/man-openmpi/man3/MPI_Group_range_excl.3.rst new file mode 100644 index 00000000000..aaf4f20e963 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Group_range_excl.3.rst @@ -0,0 +1,92 @@ +.. _mpi_group_range_excl: + + +MPI_Group_range_excl +==================== + +.. include_body + +:ref:`MPI_Group_range_excl` - Produces a group by excluding ranges of +processes from an existing group. + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Group_range_excl(MPI_Group group, int n, int ranges[][3], + MPI_Group *newgroup) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_GROUP_RANGE_EXCL(GROUP, N, RANGES, NEWGROUP, IERROR) + INTEGER GROUP, N, RANGES(3,*), NEWGROUP, IERROR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Group_range_excl(group, n, ranges, newgroup, ierror) + TYPE(MPI_Group), INTENT(IN) :: group + INTEGER, INTENT(IN) :: n, ranges(3,n) + TYPE(MPI_Group), INTENT(OUT) :: newgroup + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETERS +---------------- +* ``group``: Group (handle). +* ``n``: Number of triplets in array ranges (integer). +* ``ranges``: A one-dimensional array of integer triplets of the form (first rank, last rank, stride), indicating the ranks in group of processes to be excluded from the output group newgroup. + +OUTPUT PARAMETERS +----------------- +* ``newgroup``: New group derived from above, preserving the order in group (handle). +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +Each computed rank must be a valid rank in group and all computed ranks +must be distinct, or else the program is erroneous. + +The functionality of this routine is specified to be equivalent to +expanding the array of ranges to an array of the excluded ranks and +passing the resulting array of ranks and other arguments to +:ref:`MPI_Group_excl`. A call to :ref:`MPI_Group_excl` is equivalent to a call to +:ref:`MPI_Group_range_excl` with each rank i in ranks replaced by the triplet +(i,i,1) in the argument ranges. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. + + +.. seealso:: + :ref:`MPI_Group_excl` :ref:`MPI_Group_free` diff --git a/docs/man-openmpi/man3/MPI_Group_range_incl.3.rst b/docs/man-openmpi/man3/MPI_Group_range_incl.3.rst new file mode 100644 index 00000000000..27c7a9b09c8 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Group_range_incl.3.rst @@ -0,0 +1,119 @@ +.. _mpi_group_range_incl: + + +MPI_Group_range_incl +==================== + +.. include_body + +:ref:`MPI_Group_range_incl` - Creates a new group from ranges of ranks in +an existing group. + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Group_range_incl(MPI_Group group, int n, int ranges[][3], + MPI_Group *newgroup) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_GROUP_RANGE_INCL(GROUP, N, RANGES, NEWGROUP, IERROR) + INTEGER GROUP, N, RANGES(3,*), NEWGROUP, IERROR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Group_range_incl(group, n, ranges, newgroup, ierror) + TYPE(MPI_Group), INTENT(IN) :: group + INTEGER, INTENT(IN) :: n, ranges(3,n) + TYPE(MPI_Group), INTENT(OUT) :: newgroup + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETERS +---------------- +* ``group``: Group (handle). +* ``n``: Number of triplets in array ranges (integer). +* ``ranges``: A one-dimensional array of integer triplets, of the form (first rank, last rank, stride) indicating ranks in group or processes to be included in newgroup. + +OUTPUT PARAMETERS +----------------- +* ``newgroup``: New group derived from above, in the order defined by ranges (handle). +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +If ranges consist of the triplets + +:: + + (first1, last1, stride1), ..., (firstn, lastn, striden) + +then newgroup consists of the sequence of processes in group with ranks + +:: + + last(1)-first(1) + first(1), first(1) + stride(1),..., first(1) + ---------------- stride(1),... + stride(1) + + last(n)-first(n) + first(n), first(n) + stride(n),..., first(n) + ---------------- stride(n). + stride(n) + +Each computed rank must be a valid rank in group and all computed ranks +must be distinct, or else the program is erroneous. Note that we may +have first(i) > last(i), and stride(i) may be negative, but cannot be +zero. + +The functionality of this routine is specified to be equivalent to +expanding the array of ranges to an array of the included ranks and +passing the resulting array of ranks and other arguments to +:ref:`MPI_Group_incl`. A call to :ref:`MPI_Group_incl` is equivalent to a call to +:ref:`MPI_Group_range_incl` with each rank i in ranks replaced by the triplet +(i,i,1) in the argument ranges. + + +NOTE +---- + +This implementation does not currently check to see that the list of +ranges to include are valid ranks in the group. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. + + +.. seealso:: + :ref:`MPI_Group_incl` :ref:`MPI_Group_free` diff --git a/docs/man-openmpi/man3/MPI_Group_rank.3.rst b/docs/man-openmpi/man3/MPI_Group_rank.3.rst new file mode 100644 index 00000000000..fc7bb0719ef --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Group_rank.3.rst @@ -0,0 +1,78 @@ +.. _mpi_group_rank: + + +MPI_Group_rank +============== + +.. include_body + +:ref:`MPI_Group_rank` - Returns the rank of the calling process in the +given group. + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Group_rank(MPI_Group group, int *rank) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_GROUP_RANK(GROUP, RANK, IERROR) + INTEGER GROUP, RANK, IERROR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Group_rank(group, rank, ierror) + TYPE(MPI_Group), INTENT(IN) :: group + INTEGER, INTENT(OUT) :: rank + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETERS +---------------- +* ``group``: Group (handle). + +OUTPUT PARAMETERS +----------------- +* ``rank``: Rank of the calling process in group, or MPI_UNDEFINED if the process is not a member (integer). +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +:ref:`MPI_Group_rank` returns as the output parameter *rank* the rank of the +calling process in group. If the process is not a member of group then +MPI_UNDEFINED is returned. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. diff --git a/docs/man-openmpi/man3/MPI_Group_size.3.rst b/docs/man-openmpi/man3/MPI_Group_size.3.rst new file mode 100644 index 00000000000..9c860d0524f --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Group_size.3.rst @@ -0,0 +1,77 @@ +.. _mpi_group_size: + + +MPI_Group_size +============== + +.. include_body + +:ref:`MPI_Group_size` - Returns the size of a group. + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Group_size(MPI_Group group, int *size) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_GROUP_SIZE(GROUP, SIZE, IERROR) + INTEGER GROUP, SIZE, IERROR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Group_size(group, size, ierror) + TYPE(MPI_Group), INTENT(IN) :: group + INTEGER, INTENT(OUT) :: size + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETERS +---------------- +* ``group``: Group (handle). + +OUTPUT PARAMETERS +----------------- +* ``size``: Number of processes in the group (integer). +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +:ref:`MPI_Group_size` returns in *size* the number of processes in the group. +Thus, if group = MPI_GROUP_EMPTY, then the call will return size = 0. On +the other hand, a call with group = MPI_GROUP_NULL is erroneous. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. diff --git a/docs/man-openmpi/man3/MPI_Group_translate_ranks.3.rst b/docs/man-openmpi/man3/MPI_Group_translate_ranks.3.rst new file mode 100644 index 00000000000..27b0d9fb30e --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Group_translate_ranks.3.rst @@ -0,0 +1,85 @@ +.. _mpi_group_translate_ranks: + + +MPI_Group_translate_ranks +========================= + +.. include_body + +:ref:`MPI_Group_translate_ranks` - Translates the ranks of processes in one +group to those in another group. + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Group_translate_ranks(MPI_Group group1, int n, + const int ranks1[], MPI_Group group2, int ranks2[]) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_GROUP_TRANSLATE_RANKS(GROUP1, N, RANKS1, GROUP2, RANKS2, + IERROR) + INTEGER GROUP1, N, RANKS1(*), GROUP2, RANKS2(*), IERROR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Group_translate_ranks(group1, n, ranks1, group2, ranks2, ierror) + TYPE(MPI_Group), INTENT(IN) :: group1, group2 + INTEGER, INTENT(IN) :: n, ranks1(n) + INTEGER, INTENT(OUT) :: ranks2(n) + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETERS +---------------- +* ``group1``: First group (handle). +* ``n``: Number of ranks in ranks1 and ranks2 arrays (integer). +* ``ranks1``: Array of zero or more valid ranks in group1. +* ``group2``: Second group (handle). + +OUTPUT PARAMETERS +----------------- +* ``ranks2``: Array of corresponding ranks in group2, MPI_UNDEFINED when no correspondence exists. +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +This function is important for determining the relative numbering of the +same processes in two different groups. For instance, if one knows the +ranks of certain processes in the group of MPI_COMM_WORLD, one might +want to know their ranks in a subset of that group. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. diff --git a/docs/man-openmpi/man3/MPI_Group_union.3.rst b/docs/man-openmpi/man3/MPI_Group_union.3.rst new file mode 100644 index 00000000000..586104ac25a --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Group_union.3.rst @@ -0,0 +1,94 @@ +.. _mpi_group_union: + + +MPI_Group_union +=============== + +.. include_body + +:ref:`MPI_Group_union` - Produces a group by combining two groups. + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Group_union(MPI_Group group1, MPI_Group group2, + MPI_Group *newgroup) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_GROUP_UNION(GROUP1, GROUP2, NEWGROUP, IERROR) + INTEGER GROUP1, GROUP2, NEWGROUP, IERROR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Group_union(group1, group2, newgroup, ierror) + TYPE(MPI_Group), INTENT(IN) :: group1, group2 + TYPE(MPI_Group), INTENT(OUT) :: newgroup + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETERS +---------------- +* ``group1``: First group (handle). +* ``group2``: Second group (handle). + +OUTPUT PARAMETERS +----------------- +* ``newgroup``: Union group (handle). +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +The set-like operations are defined as follows: + + o union -- All elements of the first group (group1), followed by all elements of second group (group2) not in first. + + o intersect -- all elements of the first group that are also in the second group, ordered as in first group. + + o difference -- all elements of the first group that are not in the second group, ordered as in the first group. + +Note that for these operations the order of processes in the output +group is determined primarily by order in the first group (if possible) +and then, if necessary, by order in the second group. Neither union nor +intersection are commutative, but both are associative. + +The new group can be empty, that is, equal to MPI_GROUP_EMPTY. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. + + +.. seealso:: + :ref:`MPI_Group_free` diff --git a/docs/man-openmpi/man3/MPI_Iallgather.3.rst b/docs/man-openmpi/man3/MPI_Iallgather.3.rst new file mode 100644 index 00000000000..ef0239b2968 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Iallgather.3.rst @@ -0,0 +1,9 @@ +.. _mpi_iallgather: + +MPI_Iallgather +============== + .. include_body + +.. include:: ../man3/MPI_Allgather.3.rst + :start-after: .. include_body + diff --git a/docs/man-openmpi/man3/MPI_Iallgatherv.3.rst b/docs/man-openmpi/man3/MPI_Iallgatherv.3.rst new file mode 100644 index 00000000000..4bce01a7b77 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Iallgatherv.3.rst @@ -0,0 +1,9 @@ +.. _mpi_iallgatherv: + +MPI_Iallgatherv +=============== + .. include_body + +.. include:: ../man3/MPI_Allgatherv.3.rst + :start-after: .. include_body + diff --git a/docs/man-openmpi/man3/MPI_Iallreduce.3.rst b/docs/man-openmpi/man3/MPI_Iallreduce.3.rst new file mode 100644 index 00000000000..5a97d182edb --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Iallreduce.3.rst @@ -0,0 +1,9 @@ +.. _mpi_iallreduce: + +MPI_Iallreduce +============== + .. include_body + +.. include:: ../man3/MPI_Allreduce.3.rst + :start-after: .. include_body + diff --git a/docs/man-openmpi/man3/MPI_Ialltoall.3.rst b/docs/man-openmpi/man3/MPI_Ialltoall.3.rst new file mode 100644 index 00000000000..2a5e8b6cd4c --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Ialltoall.3.rst @@ -0,0 +1,9 @@ +.. _mpi_ialltoall: + +MPI_Ialltoall +============= + .. include_body + +.. include:: ../man3/MPI_Alltoall.3.rst + :start-after: .. include_body + diff --git a/docs/man-openmpi/man3/MPI_Ialltoallv.3.rst b/docs/man-openmpi/man3/MPI_Ialltoallv.3.rst new file mode 100644 index 00000000000..d51ef26f019 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Ialltoallv.3.rst @@ -0,0 +1,9 @@ +.. _mpi_ialltoallv: + +MPI_Ialltoallv +============== + .. include_body + +.. include:: ../man3/MPI_Alltoallv.3.rst + :start-after: .. include_body + diff --git a/docs/man-openmpi/man3/MPI_Ialltoallw.3.rst b/docs/man-openmpi/man3/MPI_Ialltoallw.3.rst new file mode 100644 index 00000000000..947f256495d --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Ialltoallw.3.rst @@ -0,0 +1,9 @@ +.. _mpi_ialltoallw: + +MPI_Ialltoallw +============== + .. include_body + +.. include:: ../man3/MPI_Alltoallw.3.rst + :start-after: .. include_body + diff --git a/docs/man-openmpi/man3/MPI_Ibarrier.3.rst b/docs/man-openmpi/man3/MPI_Ibarrier.3.rst new file mode 100644 index 00000000000..1f35f6bd9a9 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Ibarrier.3.rst @@ -0,0 +1,9 @@ +.. _mpi_ibarrier: + +MPI_Ibarrier +============ + .. include_body + +.. include:: ../man3/MPI_Barrier.3.rst + :start-after: .. include_body + diff --git a/docs/man-openmpi/man3/MPI_Ibcast.3.rst b/docs/man-openmpi/man3/MPI_Ibcast.3.rst new file mode 100644 index 00000000000..6a367aa9a16 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Ibcast.3.rst @@ -0,0 +1,9 @@ +.. _mpi_ibcast: + +MPI_Ibcast +========== + .. include_body + +.. include:: ../man3/MPI_Bcast.3.rst + :start-after: .. include_body + diff --git a/docs/man-openmpi/man3/MPI_Ibsend.3.rst b/docs/man-openmpi/man3/MPI_Ibsend.3.rst new file mode 100644 index 00000000000..a159353626c --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Ibsend.3.rst @@ -0,0 +1,97 @@ +.. _mpi_ibsend: + + +MPI_Ibsend +========== + +.. include_body + +:ref:`MPI_Ibsend` - Starts a nonblocking buffered send. + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Ibsend(const void *buf, int count, MPI_Datatype datatype, + int dest, int tag, MPI_Comm comm, MPI_Request *request) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_IBSEND(BUF, COUNT, DATATYPE, DEST, TAG, COMM, REQUEST, IERROR) + BUF(*) + INTEGER COUNT, DATATYPE, DEST, TAG, COMM, REQUEST, IERROR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Ibsend(buf, count, datatype, dest, tag, comm, request, ierror) + TYPE(*), DIMENSION(..), INTENT(IN), ASYNCHRONOUS :: buf + INTEGER, INTENT(IN) :: count, dest, tag + TYPE(MPI_Datatype), INTENT(IN) :: datatype + TYPE(MPI_Comm), INTENT(IN) :: comm + TYPE(MPI_Request), INTENT(OUT) :: request + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETERS +---------------- +* ``buf``: Initial address of send buffer (choice). +* ``count``: Number of elements in send buffer (integer). +* ``datatype``: Data type of each send buffer element (handle). +* ``dest``: Rank of destination (integer). +* ``tag``: Message tag (integer). +* ``comm``: Communicator (handle). + +OUTPUT PARAMETERS +----------------- +* ``request``: Communication request (handle). +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +:ref:`MPI_Ibsend` posts a buffered-mode, nonblocking send. Nonblocking calls +allocate a communication request object and associate it with the +request handle (the argument request). The request can be used later to +query the status of the communication or wait for its completion. + +A nonblocking send call indicates that the system may start copying data +out of the send buffer. The sender should not modify any part of the +send buffer after a nonblocking send operation is called, until the send +completes. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. + + +.. seealso:: + :ref:`MPI_Test` :ref:`MPI_Wait` diff --git a/docs/man-openmpi/man3/MPI_Iexscan.3.rst b/docs/man-openmpi/man3/MPI_Iexscan.3.rst new file mode 100644 index 00000000000..0d9664ddcf7 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Iexscan.3.rst @@ -0,0 +1,9 @@ +.. _mpi_iexscan: + +MPI_Iexscan +=========== + .. include_body + +.. include:: ../man3/MPI_Exscan.3.rst + :start-after: .. include_body + diff --git a/docs/man-openmpi/man3/MPI_Igather.3.rst b/docs/man-openmpi/man3/MPI_Igather.3.rst new file mode 100644 index 00000000000..7538b4de3b1 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Igather.3.rst @@ -0,0 +1,9 @@ +.. _mpi_igather: + +MPI_Igather +=========== + .. include_body + +.. include:: ../man3/MPI_Gather.3.rst + :start-after: .. include_body + diff --git a/docs/man-openmpi/man3/MPI_Igatherv.3.rst b/docs/man-openmpi/man3/MPI_Igatherv.3.rst new file mode 100644 index 00000000000..cf58ee9858b --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Igatherv.3.rst @@ -0,0 +1,9 @@ +.. _mpi_igatherv: + +MPI_Igatherv +============ + .. include_body + +.. include:: ../man3/MPI_Gatherv.3.rst + :start-after: .. include_body + diff --git a/docs/man-openmpi/man3/MPI_Improbe.3.rst b/docs/man-openmpi/man3/MPI_Improbe.3.rst new file mode 100644 index 00000000000..ffd1e4b2f94 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Improbe.3.rst @@ -0,0 +1,105 @@ +.. _mpi_improbe: + + +MPI_Improbe +=========== + +.. include_body + +:ref:`MPI_Improbe` - Non-blocking matched probe for a message. + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Improbe(int source, int tag, MPI_Comm comm, + int *flag, MPI_Message *message, MPI_Status *status) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_IMPROBE(SOURCE, TAG, COMM, FLAG, MESSAGE, STATUS, IERROR) + LOGICAL FLAG + INTEGER SOURCE, TAG, COMM, MESSAGE + INTEGER STATUS(MPI_STATUS_SIZE), IERROR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Improbe(source, tag, comm, flag, message, status, ierror) + INTEGER, INTENT(IN) :: source, tag + TYPE(MPI_Comm), INTENT(IN) :: comm + INTEGER, INTENT(OUT) :: flag + TYPE(MPI_Message), INTENT(OUT) :: message + TYPE(MPI_Status) :: status + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETERS +---------------- +* ``source``: Source rank or MPI_ANY_SOURCE (integer). +* ``tag``: Tag value or MPI_ANY_TAG (integer). +* ``comm``: Communicator (handle). + +OUTPUT PARAMETERS +----------------- +* ``flag``: Flag (logical). +* ``message``: Message (handle). +* ``status``: Status object (status). +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +Like :ref:`MPI_Probe` and :ref:`MPI_Iprobe`, the :ref:`MPI_Mprobe` and :ref:`MPI_Improbe` operations +allow incoming messages to be queried without actually receiving them, +except that :ref:`MPI_Mprobe` and :ref:`MPI_Improbe` provide a mechanism to receive +the specific message that was matched regardless of other intervening +probe or receive operations. This gives the application an opportunity +to decide how to receive the message, based on the information returned +by the probe. In particular, the application may allocate memory for the +receive buffer according to the length of the probed message. + +A matching probe with MPI_PROC_NULL as *source* returns *flag* = true, + +*message* = MPI_MESSAGE_NO_PROC, and the *status* object returns source +^ MPI_PROC_NULL, tag ^ MPI_ANY_TAG, and count ^ 0. + +:ref:`MPI_Iprobe` returns a true value in *flag* if a message has been matched +and can be received by passing the *message* handle to the :ref:`MPI_Mrecv` or +:ref:`MPI_Imrecv` functions, provided the *source* was not MPI_PROC_NULL. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. + + +.. seealso:: + :ref:`MPI_Mprobe` :ref:`MPI_Probe` :ref:`MPI_Iprobe` :ref:`MPI_Mrecv` :ref:`MPI_Imrecv` :ref:`MPI_Cancel` diff --git a/docs/man-openmpi/man3/MPI_Imrecv.3.rst b/docs/man-openmpi/man3/MPI_Imrecv.3.rst new file mode 100644 index 00000000000..1d3a49d102f --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Imrecv.3.rst @@ -0,0 +1,109 @@ +.. _mpi_imrecv: + + +MPI_Imrecv +========== + +.. include_body + +:ref:`MPI_Imrecv` - Non-blocking receive for a matched message + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Imrecv(void *buf, int count, MPI_Datatype type, + MPI_Message *message, MPI_Request *request) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_IMRECV(BUF, COUNT, DATATYPE, MESSAGE, REQUEST, IERROR) + BUF(*) + INTEGER COUNT, DATATYPE, MESSAGE, REQUEST, IERROR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Imrecv(buf, count, datatype, message, request, ierror) + TYPE(*), DIMENSION(..), ASYNCHRONOUS :: buf + INTEGER, INTENT(IN) :: count + TYPE(MPI_Datatype), INTENT(IN) :: datatype + TYPE(MPI_Message), INTENT(INOUT) :: message + TYPE(MPI_Request), INTENT(OUT) :: request + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETERS +---------------- +* ``count``: Number of elements to receive (nonnegative integer). +* ``datatype``: Datatype of each send buffer element (handle). +* ``message``: Message (handle). + +OUTPUT PARAMETERS +----------------- +* ``buf``: Initial address of receive buffer (choice). +* ``request``: Request (handle). +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +The functions :ref:`MPI_Mrecv` and :ref:`MPI_Imrecv` receive messages that have been +previously matched by a matching probe. + +The *request* returned from :ref:`MPI_Imrecv` can be used with any of the +:ref:`MPI_Test` and :ref:`MPI_Wait` variants, like any non-blocking receive request. + +If :ref:`MPI_Imrecv` is called with MPI_MESSAGE_NULL as the message argument, a +call to one of the :ref:`MPI_Test` or :ref:`MPI_Wait` variants will return immediately +with the *status* object set to *source* = MPI_PROC_NULL, *tag* = +MPI_ANY_TAG, and *count* = 0, as if a receive from MPI_PROC_NULL was +issued. + +If reception of a matched message is started with :ref:`MPI_Imrecv`, then it is +possible to cancel the returned request with :ref:`MPI_Cancel`. If :ref:`MPI_Cancel` +succeeds, the matched message must be found by a subsequent message +probe (:ref:`MPI_Probe`, :ref:`MPI_Iprobe`, :ref:`MPI_Mprobe`, or MPI_Improbe), received by a +subsequent receive operation or canceled by the sender. + +Note, however, that is it possible for the cancellation of operations +initiated with :ref:`MPI_Imrecv` to fail. An example of a failing case is when +canceling the matched message receive would violate MPI message ordering +rules (e.g., if another message matching the same message signature has +matched -- and possible received -- before this :ref:`MPI_Imrecv` is canceled). + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. + + +.. seealso:: + :ref:`MPI_Mprobe` :ref:`MPI_Improbe` :ref:`MPI_Probe` :ref:`MPI_Iprobe` :ref:`MPI_Imrecv` :ref:`MPI_Cancel` diff --git a/docs/man-openmpi/man3/MPI_Ineighbor_allgather.3.rst b/docs/man-openmpi/man3/MPI_Ineighbor_allgather.3.rst new file mode 100644 index 00000000000..1c7e7ef6517 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Ineighbor_allgather.3.rst @@ -0,0 +1,9 @@ +.. _mpi_ineighbor_allgather: + +MPI_Ineighbor_allgather +======================= + .. include_body + +.. include:: ../man3/MPI_Neighbor_allgather.3.rst + :start-after: .. include_body + diff --git a/docs/man-openmpi/man3/MPI_Ineighbor_allgatherv.3.rst b/docs/man-openmpi/man3/MPI_Ineighbor_allgatherv.3.rst new file mode 100644 index 00000000000..e891f9a5853 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Ineighbor_allgatherv.3.rst @@ -0,0 +1,9 @@ +.. _mpi_ineighbor_allgatherv: + +MPI_Ineighbor_allgatherv +======================== + .. include_body + +.. include:: ../man3/MPI_Neighbor_allgatherv.3.rst + :start-after: .. include_body + diff --git a/docs/man-openmpi/man3/MPI_Ineighbor_alltoall.3.rst b/docs/man-openmpi/man3/MPI_Ineighbor_alltoall.3.rst new file mode 100644 index 00000000000..953db3d5682 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Ineighbor_alltoall.3.rst @@ -0,0 +1,9 @@ +.. _mpi_ineighbor_alltoall: + +MPI_Ineighbor_alltoall +====================== + .. include_body + +.. include:: ../man3/MPI_Neighbor_alltoall.3.rst + :start-after: .. include_body + diff --git a/docs/man-openmpi/man3/MPI_Ineighbor_alltoallv.3.rst b/docs/man-openmpi/man3/MPI_Ineighbor_alltoallv.3.rst new file mode 100644 index 00000000000..26b00d71799 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Ineighbor_alltoallv.3.rst @@ -0,0 +1,9 @@ +.. _mpi_ineighbor_alltoallv: + +MPI_Ineighbor_alltoallv +======================= + .. include_body + +.. include:: ../man3/MPI_Neighbor_alltoallv.3.rst + :start-after: .. include_body + diff --git a/docs/man-openmpi/man3/MPI_Ineighbor_alltoallw.3.rst b/docs/man-openmpi/man3/MPI_Ineighbor_alltoallw.3.rst new file mode 100644 index 00000000000..1fc9c4c3acd --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Ineighbor_alltoallw.3.rst @@ -0,0 +1,9 @@ +.. _mpi_ineighbor_alltoallw: + +MPI_Ineighbor_alltoallw +======================= + .. include_body + +.. include:: ../man3/MPI_Neighbor_alltoallw.3.rst + :start-after: .. include_body + diff --git a/docs/man-openmpi/man3/MPI_Info_c2f.3.rst b/docs/man-openmpi/man3/MPI_Info_c2f.3.rst new file mode 100644 index 00000000000..01e43dd6fad --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Info_c2f.3.rst @@ -0,0 +1,9 @@ +.. _mpi_info_c2f: + +MPI_Info_c2f +============ + .. include_body + +.. include:: ../man3/MPI_Comm_f2c.3.rst + :start-after: .. include_body + diff --git a/docs/man-openmpi/man3/MPI_Info_create.3.rst b/docs/man-openmpi/man3/MPI_Info_create.3.rst new file mode 100644 index 00000000000..61b05ca4222 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Info_create.3.rst @@ -0,0 +1,75 @@ +.. _mpi_info_create: + + +MPI_Info_create +=============== + +.. include_body + +:ref:`MPI_Info_create` - Creates a new info object. + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Info_create(MPI_Info *info) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_INFO_CREATE(INFO, IERROR) + INTEGER INFO, IERROR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Info_create(info, ierror) + TYPE(MPI_Info), INTENT(OUT) :: info + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +OUTPUT PARAMETERS +----------------- +* ``info``: Info object created (handle). +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +:ref:`MPI_Info_create` creates a new info object. The newly created object +contains no key/value pairs. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. + + +.. seealso:: + :ref:`MPI_Info_delete` :ref:`MPI_Info_dup` :ref:`MPI_Info_free` :ref:`MPI_Info_get` :ref:`MPI_Info_set` diff --git a/docs/man-openmpi/man3/MPI_Info_delete.3.rst b/docs/man-openmpi/man3/MPI_Info_delete.3.rst new file mode 100644 index 00000000000..2db22d3887d --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Info_delete.3.rst @@ -0,0 +1,84 @@ +.. _mpi_info_delete: + + +MPI_Info_delete +=============== + +.. include_body + +:ref:`MPI_Info_delete` - Deletes a key/value pair from *info*. + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Info_delete(MPI_Info info, const char *key) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_INFO_DELETE(INFO, KEY, IERROR) + INTEGER INFO, IERROR + CHARACTER*(*) KEY + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Info_delete(info, key, ierror) + TYPE(MPI_Info), INTENT(IN) :: info + CHARACTER(LEN=*), INTENT(IN) :: key + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT/OUTPUT PARAMETER +---------------------- +* ``info``: Info object (handle). + +INPUT PARAMETER +--------------- +* ``key``: Key (string). + +OUTPUT PARAMETER +---------------- +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +:ref:`MPI_Info_delete` deletes a (key,value) pair from *info*. If *key* is not +defined in *info*, the call raises an error of class MPI_ERR_INFO_NOKEY. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. + + +.. seealso:: + :ref:`MPI_Info_create` :ref:`MPI_Info_dup` :ref:`MPI_Info_free` :ref:`MPI_Info_get` :ref:`MPI_Info_set` diff --git a/docs/man-openmpi/man3/MPI_Info_dup.3.rst b/docs/man-openmpi/man3/MPI_Info_dup.3.rst new file mode 100644 index 00000000000..0c80fe6b16b --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Info_dup.3.rst @@ -0,0 +1,80 @@ +.. _mpi_info_dup: + + +MPI_Info_dup +============ + +.. include_body + +:ref:`MPI_Info_dup` - Duplicates an info object. + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Info_dup(MPI_Info info, MPI_Info *newinfo) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_INFO_DUP(INFO, NEWINFO, IERROR) + INTEGER INFO, NEWINFO, IERROR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Info_dup(info, newinfo, ierror) + TYPE(MPI_Info), INTENT(IN) :: info + TYPE(MPI_Info), INTENT(OUT) :: newinfo + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETER +--------------- +* ``info``: Info object (handle). + +OUTPUT PARAMETERS +----------------- +* ``newinfo``: Info object (handle). +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +:ref:`MPI_Info_dup` duplicates an existing info object, creating a new object, +with the same (key,value) pairs and the same ordering of keys. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. + + +.. seealso:: + :ref:`MPI_Info_create` :ref:`MPI_Info_delete` :ref:`MPI_Info_free` :ref:`MPI_Info_get` :ref:`MPI_Info_set` diff --git a/docs/man-openmpi/man3/MPI_Info_env.3.rst b/docs/man-openmpi/man3/MPI_Info_env.3.rst new file mode 100644 index 00000000000..777fce9e27e --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Info_env.3.rst @@ -0,0 +1,96 @@ +.. _mpi_info_env: + + +MPI_Info_env +============ + +.. include_body + +:ref:`MPI_INFO_ENV` - Static MPI_Info object containing info about the +application + + +DESCRIPTION +----------- + +The MPI-3 standard established a static MPI_Info object named +:ref:`MPI_INFO_ENV` that can be used to access information about how the +application was executed from the run-time. + + +SUPPORTED FIELDS +---------------- + +command + If available, the value will be set to argv[0]. Note that the value + may not always be available - e.g., it is valid for a program to call + :ref:`MPI_Init` with NULL parameters, in which case argv[0] will not be set + if run as a singleton. This value will never be set in a Fortran + program as the argv are not available. + +argv + The argv given for the application. If no arguments are passed to the + application, then this value will not be set. It will also not be set + in the case of a singleton that calls :ref:`MPI_Init` with NULL parameters, + or a Fortran program. + +maxprocs + The number of processes in the job. + +soft + Open MPI does not support the *soft* option for specifying the number + of processes to be executed, so this value is set to the same as + *maxprocs*. + +host + The name of the host this process is executing upon - the value + returned from *gethostname()*. + +arch + The architecture of the host this process is executing upon. This + value indicates the underlying chip architecture (e.g., x86_64), if + it can be determined. + +wdir + The working directory at the time of process launch by mpiexec. Note + that this value will not be set for processes launched as singletons + as there is no reliable way for the MPI library to determine the + location. + +file + Although specified by the MPI-3 standard, no value is currently set + for this field. + +thread_level + The requested MPI thread level - note that this may differ from the + *actual* MPI thread level of the application. + +ompi_num_apps + The number of application contexts in an MPMD job. This is an Open + MPI-specific field and value. + +ompi_np + The number of processes in each application context, provided as a + space-delimited list of integers. This is an Open MPI-specific field + and value. + +ompi_first_rank + The MPI rank of the first process in each application context, + provided as a space-delimited list of integers This is an Open + MPI-specific field and value. + +ompi_positioned_file_dir + If Open MPI was asked to pre-position files, this field provides the + top-level directory where those files were place. This is an Open + MPI-specific field and value. + + +ERRORS +------ + +| When calling :ref:`MPI_INFO_GET`, the *flag* parameter will be set to zero + (false) if a value for the field has not been set. + + +.. seealso:: + :ref:`MPI_Info_get` diff --git a/docs/man-openmpi/man3/MPI_Info_f2c.3.rst b/docs/man-openmpi/man3/MPI_Info_f2c.3.rst new file mode 100644 index 00000000000..4670169bb2d --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Info_f2c.3.rst @@ -0,0 +1,9 @@ +.. _mpi_info_f2c: + +MPI_Info_f2c +============ + .. include_body + +.. include:: ../man3/MPI_Comm_f2c.3.rst + :start-after: .. include_body + diff --git a/docs/man-openmpi/man3/MPI_Info_free.3.rst b/docs/man-openmpi/man3/MPI_Info_free.3.rst new file mode 100644 index 00000000000..4e44e42e6cc --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Info_free.3.rst @@ -0,0 +1,77 @@ +.. _mpi_info_free: + + +MPI_Info_free +============= + +.. include_body + +:ref:`MPI_Info_free` - Frees an info object. + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Info_free(MPI_Info *info) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_INFO_FREE(INFO, IERROR) + INTEGER INFO, IERROR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Info_free(info, ierror) + TYPE(MPI_Info), INTENT(INOUT) :: info + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT/OUTPUT PARAMETER +---------------------- +* ``info``: Info object (handle). + +OUTPUT PARAMETER +---------------- +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +:ref:`MPI_Info_free` frees *info* and sets it to MPI_INFO_NULL. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. + + +.. seealso:: + :ref:`MPI_Info_create` :ref:`MPI_Info_delete` :ref:`MPI_Info_dup` :ref:`MPI_Info_get` :ref:`MPI_Info_set` diff --git a/docs/man-openmpi/man3/MPI_Info_get.3.rst b/docs/man-openmpi/man3/MPI_Info_get.3.rst new file mode 100644 index 00000000000..a40219a0598 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Info_get.3.rst @@ -0,0 +1,98 @@ +.. _mpi_info_get: + + +MPI_Info_get +============ + +.. include_body + +:ref:`MPI_Info_get` - Retrieves the value associated with a key in an info +object. + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Info_get(MPI_Info info, const char *key, int valuelen, char *value, int *flag) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_INFO_GET(INFO, KEY, VALUELEN, VALUE, FLAG, IERROR) + INTEGER INFO, VALUELEN, IERROR + CHARACTER*(*) KEY, VALUE + LOGICAL FLAG + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Info_get(info, key, valuelen, value, flag, ierror) + TYPE(MPI_Info), INTENT(IN) :: info + CHARACTER(LEN=*), INTENT(IN) :: key + INTEGER, INTENT(IN) :: valuelen + CHARACTER(LEN=valuelen), INTENT(OUT) :: value + LOGICAL, INTENT(OUT) :: flag + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETERS +---------------- +* ``info``: Info object (handle). +* ``key``: Key (string). +* ``valuelen``: Length of value arg (integer). + +OUTPUT PARAMETER +---------------- +* ``value``: Value (string). +* ``flag``: Returns true if key defined, false if not (boolean). +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +:ref:`MPI_Info_get` retrieves the value associated with *key* in a previous +call to :ref:`MPI_Info_set`. If such a key exists, it sets *flag* to true and +returns the value in *value*; otherwise it sets *flag* to false and +leaves *value* unchanged. *valuelen* is the number of characters +available in value. If it is less than the actual size of the value, the +returned value is truncated. In C, *valuelen* should be one less than +the amount of allocated space to allow for the null terminator. + +If *key* is larger than MPI_MAX_INFO_KEY, the call is erroneous. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. + + +.. seealso:: + :ref:`MPI_Info_create` :ref:`MPI_Info_delete` :ref:`MPI_Info_dup` :ref:`MPI_Info_free` + :ref:`MPI_Info_get_valuelen` :ref:`MPI_Info_get_nkeys` :ref:`MPI_Info_get_nthkey` + :ref:`MPI_Info_set` diff --git a/docs/man-openmpi/man3/MPI_Info_get_nkeys.3.rst b/docs/man-openmpi/man3/MPI_Info_get_nkeys.3.rst new file mode 100644 index 00000000000..2abb190e316 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Info_get_nkeys.3.rst @@ -0,0 +1,81 @@ +.. _mpi_info_get_nkeys: + + +MPI_Info_get_nkeys +================== + +.. include_body + +:ref:`MPI_Info_get_nkeys` - Gets the number of keys currently defined in an +info object. + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Info_get_nkeys(MPI_Info info, int *nkeys) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_INFO_GET_NKEYS(INFO, NKEYS, IERROR) + INTEGER INFO, NKEYS, IERROR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Info_get_nkeys(info, nkeys, ierror) + TYPE(MPI_Info), INTENT(IN) :: info + INTEGER, INTENT(OUT) :: nkeys + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETER +--------------- +* ``info``: Info object (handle). + +OUTPUT PARAMETERS +----------------- +* ``nkeys``: Number of defined keys (integer). +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +:ref:`MPI_Info_get_nkeys` returns the number of currently defined keys in +*info*. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. + + +.. seealso:: + :ref:`MPI_Info_get` :ref:`MPI_Info_get_nthkey` :ref:`MPI_Info_get_valuelen` diff --git a/docs/man-openmpi/man3/MPI_Info_get_nthkey.3.rst b/docs/man-openmpi/man3/MPI_Info_get_nthkey.3.rst new file mode 100644 index 00000000000..85b1827c39f --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Info_get_nthkey.3.rst @@ -0,0 +1,86 @@ +.. _mpi_info_get_nthkey: + + +MPI_Info_get_nthkey +=================== + +.. include_body + +:ref:`MPI_Info_get_nthkey` - Returns the *n*\ th defined key in *info*. + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Info_get_nthkey(MPI_Info info, int n, char *key) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_INFO_GET_NTHKEY(INFO, N, KEY, IERROR) + INTEGER INFO, N, IERROR + CHARACTER*(*) KEY + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Info_get_nthkey(info, n, key, ierror) + TYPE(MPI_Info), INTENT(IN) :: info + INTEGER, INTENT(IN) :: n + CHARACTER(LEN=*), INTENT(OUT) :: key + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETERS +---------------- +* ``info``: Info object (handle). +* ``n``: Key number (integer). + +OUTPUT PARAMETERS +----------------- +* ``key``: Key (string). +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +:ref:`MPI_Info_get_nthkey` returns the *n*\ th defined key in *info*. Keys are +numbered 0...\ *N* - 1 where *N* is the value returned by +:ref:`MPI_Info_get_nkeys`. All keys between 0 and *N* - 1 are guaranteed to be +defined. The number of a given key does not change as long as *info* is +not modified with :ref:`MPI_Info_set` or :ref:`MPI_Info_delete`. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. + + +.. seealso:: + :ref:`MPI_Info_get` :ref:`MPI_Info_get_nkeys` :ref:`MPI_Info_get_valuelen` diff --git a/docs/man-openmpi/man3/MPI_Info_get_string.3.rst b/docs/man-openmpi/man3/MPI_Info_get_string.3.rst new file mode 100644 index 00000000000..ad7e6c04d90 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Info_get_string.3.rst @@ -0,0 +1,103 @@ +.. _mpi_info_get_string: + + +MPI_Info_get_string +=================== + +.. include_body + +:ref:`MPI_Info_get_string` - Retrieves the value associated with a key in +an info object. + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Info_get_string(MPI_Info info, const char *key, int *buflen, char *value, int *flag) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_INFO_GET_STRING(INFO, KEY, BUFLEN, VALUE, FLAG, IERROR) + INTEGER INFO, BUFLEN, IERROR + CHARACTER*(*) KEY, VALUE + LOGICAL FLAG + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Info_get_string(info, key, buflen, value, flag, ierror) + TYPE(MPI_Info), INTENT(IN) :: info + CHARACTER(LEN=*), INTENT(IN) :: key + INTEGER, INTENT(INOUT) :: buflen + CHARACTER(LEN=valuelen), INTENT(OUT) :: value + LOGICAL, INTENT(OUT) :: flag + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETERS +---------------- +* ``info``: Info object (handle). +* ``key``: Key (string). + +OUTPUT PARAMETER +---------------- +* ``buflen On entry, length of value arg. On return, set to required size to``: hold value string (integer). +* ``value``: Value (string). +* ``flag``: Returns true if key defined, false if not (boolean). +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +:ref:`MPI_Info_get_string` retrieves the value associated with *key* from +*info*, if any. If such a key exists in info, it sets *flag* to true and +returns the value in *value*, otherwise it sets flag to false and leaves +value unchanged. *buflen* on input is the size of the provided buffer, +for the output of buflen it is the size of the buffer needed to store +the value string. If the buflen passed into the function is less than +the actual size needed to store the value string (including null +terminator in C), the value is truncated. On return, the value of +*buflen* will be set to the required buffer size to hold the value +string. If buflen is set to 0, value is not changed. In C, *buflen* +includes the required space for the null terminator. In C, this function +returns a null terminated string in all cases where the *buflen* input +value is greater than 0. + +If *key* is larger than MPI_MAX_INFO_KEY, the call is erroneous. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. + + +.. seealso:: + :ref:`MPI_Info_create` :ref:`MPI_Info_delete` :ref:`MPI_Info_dup` :ref:`MPI_Info_free` + :ref:`MPI_Info_get_nkeys` :ref:`MPI_Info_get_nthkey` :ref:`MPI_Info_set` diff --git a/docs/man-openmpi/man3/MPI_Info_get_valuelen.3.rst b/docs/man-openmpi/man3/MPI_Info_get_valuelen.3.rst new file mode 100644 index 00000000000..9eb833414eb --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Info_get_valuelen.3.rst @@ -0,0 +1,93 @@ +.. _mpi_info_get_valuelen: + + +MPI_Info_get_valuelen +===================== + +.. include_body + +:ref:`MPI_Info_get_valuelen` - Retrieves the length of the key value +associated with an info object. + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Info_get_valuelen(MPI_Info info, const char *key, + int *valuelen, int *flag) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_INFO_GET_VALUELEN(INFO, KEY, VALUELEN, FLAG, IERROR) + INTEGER INFO, VALUELEN, IERROR + LOGICAL FLAG + CHARACTER*(*) KEY + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Info_get_valuelen(info, key, valuelen, flag, ierror) + TYPE(MPI_Info), INTENT(IN) :: info + CHARACTER(LEN=*), INTENT(IN) :: key + INTEGER, INTENT(OUT) :: valuelen + LOGICAL, INTENT(OUT) :: flag + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETERS +---------------- +* ``info``: Info object (handle). +* ``key``: Key (string). + +OUTPUT PARAMETERS +----------------- +* ``valuelen``: Length of value arg (integer). +* ``flag``: Returns true if key defined, false if not (boolean). +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +:ref:`MPI_Info_get_valuelen` retrieves the length of the *value* associated +with *key*. If *key* is defined, *valuelen* is set to the length of its +associated value and *flag* is set to true. If *key* is not defined, +*valuelen* is not touched and *flag* is set to false. The length +returned in C does not include the end-of-string character. + +If *key* is larger than MPI_MAX_INFO_KEY, the call is erroneous. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. + + +.. seealso:: + :ref:`MPI_Info_get` :ref:`MPI_Info_get_nkeys` :ref:`MPI_Info_get_nthkey` diff --git a/docs/man-openmpi/man3/MPI_Info_set.3.rst b/docs/man-openmpi/man3/MPI_Info_set.3.rst new file mode 100644 index 00000000000..400ce134405 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Info_set.3.rst @@ -0,0 +1,89 @@ +.. _mpi_info_set: + + +MPI_Info_set +============ + +.. include_body + +:ref:`MPI_Info_set` - Adds a key/value pair to *info*. + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Info_set(MPI_Info info, char *key, char *value) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_INFO_SET(INFO, KEY, VALUE, IERROR) + INTEGER INFO, IERROR + CHARACTER*(*) KEY, VALUE + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Info_set(info, key, value, ierror) + TYPE(MPI_Info), INTENT(IN) :: info + CHARACTER(LEN=*), INTENT(IN) :: key, value + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT/OUTPUT PARAMETER +---------------------- +* ``info``: Info object (handle). + +INPUT PARAMETERS +---------------- +* ``key``: Key (string). +* ``value``: Value (string). + +OUTPUT PARAMETER +---------------- +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +:ref:`MPI_Info_set` adds the (key,value) pair to *info* and overrides the value +if a value for the same key was previously set. The *key* and *value* +parameters are null-terminated strings in C. In Fortran, leading and +trailing spaces in *key* and *value* are stripped. If either *key* or +*value* is larger than the allowed maximums, the error MPI_ERR_INFO_KEY +or MPI_ERR_INFO_VALUE is raised, respectively. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. + + +.. seealso:: + :ref:`MPI_Info_create` :ref:`MPI_Info_delete` :ref:`MPI_Info_dup` :ref:`MPI_Info_free` :ref:`MPI_Info_set` diff --git a/docs/man-openmpi/man3/MPI_Init.3.rst b/docs/man-openmpi/man3/MPI_Init.3.rst new file mode 100644 index 00000000000..42ade5d2d7f --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Init.3.rst @@ -0,0 +1,109 @@ +.. _mpi_init: + + +MPI_Init +======== + +.. include_body + +:ref:`MPI_Init` - Initializes the MPI execution environment + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Init(int *argc, char ***argv) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_INIT(IERROR) + INTEGER IERROR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Init(ierror) + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETERS +---------------- +* ``argc``: C only: Pointer to the number of arguments. +* ``argv``: C only: Argument vector. + +OUTPUT PARAMETER +---------------- +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +This routine, or :ref:`MPI_Init_thread`, must be called before most other MPI +routines are called. There are a small number of errors, such as +:ref:`MPI_Initialized` and :ref:`MPI_Finalized`. MPI can be initialized at most once; +subsequent calls to :ref:`MPI_Init` or :ref:`MPI_Init_thread` are erroneous. + +All MPI programs must contain a call to :ref:`MPI_Init` or :ref:`MPI_Init_thread`. +Open MPI accepts the C *argc* and *argv* arguments to main, but neither +modifies, interprets, nor distributes them: + +:: + + { + /* declare variables */ + MPI_Init(&argc, &argv); + /* parse arguments */ + /* main program */ + MPI_Finalize(); + } + + +NOTES +----- + +The Fortran version does not have provisions for *argc* and *argv* and +takes only IERROR. + +The MPI Standard does not say what a program can do before an :ref:`MPI_Init` +or after an :ref:`MPI_Finalize`. In the Open MPI implementation, it should do +as little as possible. In particular, avoid anything that changes the +external state of the program, such as opening files, reading standard +input, or writing to standard output. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. + +See the MPI man page for a full list of MPI error codes. + + +.. seealso:: + :ref:`MPI_Init_thread` :ref:`MPI_Initialized` :ref:`MPI_Finalize` :ref:`MPI_Finalized` diff --git a/docs/man-openmpi/man3/MPI_Init_thread.3.rst b/docs/man-openmpi/man3/MPI_Init_thread.3.rst new file mode 100644 index 00000000000..7b2f1b0f607 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Init_thread.3.rst @@ -0,0 +1,162 @@ +.. _mpi_init_thread: + + +MPI_Init_thread +=============== + +.. include_body + +:ref:`MPI_Init_thread` - Initializes the MPI execution environment + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Init_thread(int *argc, char ***argv, + int required, int *provided) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_INIT_THREAD(REQUIRED, PROVIDED, IERROR) + INTEGER REQUIRED, PROVIDED, IERROR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Init_thread(required, provided, ierror) + INTEGER, INTENT(IN) :: required + INTEGER, INTENT(OUT) :: provided + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETERS +---------------- +* ``argc``: C only: Pointer to the number of arguments. +* ``argv``: C only: Argument vector. +* ``required``: Desired level of thread support (integer). + +OUTPUT PARAMETERS +----------------- +* ``provided``: Available level of thread support (integer). +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +This routine, or :ref:`MPI_Init`, must be called before most other MPI routines +are called. There are a small number of exceptions, such as +:ref:`MPI_Initialized` and :ref:`MPI_Finalized`. MPI can be initialized at most once; +subsequent calls to :ref:`MPI_Init` or :ref:`MPI_Init_thread` are erroneous. + +:ref:`MPI_Init_thread`, as compared to :ref:`MPI_Init`, has a provision to request a +certain level of thread support in *required*: + +MPI_THREAD_SINGLE + Only one thread will execute. + +MPI_THREAD_FUNNELED + If the process is multithreaded, only the thread that called + :ref:`MPI_Init_thread` will make MPI calls. + +MPI_THREAD_SERIALIZED + If the process is multithreaded, only one thread will make MPI + library calls at one time. + +MPI_THREAD_MULTIPLE + If the process is multithreaded, multiple threads may call MPI at + once with no restrictions. + +The level of thread support available to the program is set in +*provided*. In Open MPI, the value is dependent on how the library was +configured and built. Note that there is no guarantee that *provided* +will be greater than or equal to *required*. + +Also note that calling :ref:`MPI_Init_thread` with a *required* value of +MPI_THREAD_SINGLE is equivalent to calling :ref:`MPI_Init`. + +All MPI programs must contain a call to :ref:`MPI_Init` or :ref:`MPI_Init_thread`. +Open MPI accepts the C *argc* and *argv* arguments to main, but neither +modifies, interprets, nor distributes them: + +:: + + { + /* declare variables */ + MPI_Init_thread(&argc, &argv, req, &prov); + /* parse arguments */ + /* main program */ + MPI_Finalize(); + } + + +NOTES +----- + +The Fortran version does not have provisions for *argc* and *argv* and +takes only IERROR. + +It is the caller's responsibility to check the value of *provided*, as +it may be less than what was requested in *required*. + +The MPI Standard does not say what a program can do before an +:ref:`MPI_Init_thread` or after an :ref:`MPI_Finalize`. In the Open MPI +implementation, it should do as little as possible. In particular, avoid +anything that changes the external state of the program, such as opening +files, reading standard input, or writing to standard output. + + +MPI_THREAD_MULTIPLE Support +^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +MPI_THREAD_MULTIPLE support is included if the environment in which Open +MPI was built supports threading. You can check the output of +**ompi_info**\ (1) to see if Open MPI has MPI_THREAD_MULTIPLE support: + +:: + + shell$ ompi_info | grep "Thread support" + Thread support: posix (MPI_THREAD_MULTIPLE: yes, OPAL support: yes, OMPI progress: no, Event lib: yes) + shell$ + +The "MPI_THREAD_MULTIPLE: yes" portion of the above output indicates +that Open MPI was compiled with MPI_THREAD_MULTIPLE support. + +Note that there is a small performance penalty for using +MPI_THREAD_MULTIPLE support; latencies for short messages will be higher +as compared to when using MPI_THREAD_SINGLE, for example. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. + + +.. seealso:: + :ref:`MPI_Init` :ref:`MPI_Initialized` :ref:`MPI_Finalize` :ref:`MPI_Finalized` diff --git a/docs/man-openmpi/man3/MPI_Initialized.3.rst b/docs/man-openmpi/man3/MPI_Initialized.3.rst new file mode 100644 index 00000000000..af5c2c3a6cf --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Initialized.3.rst @@ -0,0 +1,77 @@ +.. _mpi_initialized: + + +MPI_Initialized +=============== + +.. include_body + +:ref:`MPI_Initialized` - Checks whether MPI has been initialized + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Initialized(int *flag) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_INITIALIZED(FLAG, IERROR) + LOGICAL FLAG + INTEGER IERROR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Initialized(flag, ierror) + LOGICAL, INTENT(OUT) :: flag + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +OUTPUT PARAMETERS +----------------- +* ``flag``: True if MPI has been initialized, and false otherwise (logical). +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +This routine may be used to determine whether MPI has been initialized. +It is one of a small number of routines that may be called before MPI is +initialized and after MPI has been finalized (:ref:`MPI_Finalized` is another). + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. + + +.. seealso:: + :ref:`MPI_Init` :ref:`MPI_Init_thread` :ref:`MPI_Finalize` :ref:`MPI_Finalized` diff --git a/docs/man-openmpi/man3/MPI_Intercomm_create.3.rst b/docs/man-openmpi/man3/MPI_Intercomm_create.3.rst new file mode 100644 index 00000000000..638ffcfd9f9 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Intercomm_create.3.rst @@ -0,0 +1,118 @@ +.. _mpi_intercomm_create: + + +MPI_Intercomm_create +==================== + +.. include_body + +:ref:`MPI_Intercomm_create` - Creates an intercommunicator from two +intracommunicators. + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Intercomm_create(MPI_Comm local_comm, int local_leader, + MPI_Comm peer_comm, int remote_leader, int tag, MPI_Comm *newintercomm) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_INTERCOMM_CREATE(LOCAL_COMM, LOCAL_LEADER, PEER_COMM, + REMOTE_LEADER, TAG, NEWINTERCOMM, IERROR) + INTEGER LOCAL_COMM, LOCAL_LEADER, PEER_COMM, REMOTE_LEADER + INTEGER TAG, NEWINTERCOMM, IERROR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Intercomm_create(local_comm, local_leader, peer_comm, remote_leader, + tag, newintercomm, ierror) + TYPE(MPI_Comm), INTENT(IN) :: local_comm, peer_comm + INTEGER, INTENT(IN) :: local_leader, remote_leader, tag + TYPE(MPI_Comm), INTENT(OUT) :: newintercomm + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETERS +---------------- +* ``local_comm``: The communicator containing the process that initiates the inter-communication (handle). +* ``local_leader``: Rank of local group leader in local_comm (integer). +* ``peer_comm``: "Peer" communicator; significant only at the local_leader (handle). +* ``remote_leader``: Rank of remote group leader in peer_comm; significant only at the local_leader (integer). +* ``tag``: Message tag used to identify new intercommunicator (integer). + +OUTPUT PARAMETERS +----------------- +* ``newintercomm``: Created intercommunicator (handle). +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +This call creates an intercommunicator. It is collective over the union +of the local and remote groups. Processes should provide identical +local_comm and local_leader arguments within each group. Wildcards are +not permitted for remote_leader, local_leader, and tag. + +This call uses point-to-point communication with communicator peer_comm, +and with tag tag between the leaders. Thus, care must be taken that +there be no pending communication on peer_comm that could interfere with +this communication. + +If multiple MPI_Intercomm_creates are being made, they should use +different tags (more precisely, they should ensure that the local and +remote leaders are using different tags for each MPI_intercomm_create). + + +NOTES +----- + +We recommend using a dedicated peer communicator, such as a duplicate of +MPI_COMM_WORLD, to avoid trouble with peer communicators. + +The MPI 1.1 Standard contains two mutually exclusive comments on the +input intracommunicators. One says that their respective groups must be +disjoint; the other that the leaders can be the same process. After some +discussion by the MPI Forum, it has been decided that the groups must be +disjoint. Note that the **reason** given for this in the standard is +**not** the reason for this choice; rather, the **other** operations on +intercommunicators (like :ref:`MPI_Intercomm_merge` ) do not make sense if +the groups are not disjoint. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. + + +.. seealso:: + :ref:`MPI_Intercomm_merge` :ref:`MPI_Comm_free` :ref:`MPI_Comm_remote_group` + :ref:`MPI_Comm_remote_size` diff --git a/docs/man-openmpi/man3/MPI_Intercomm_create_from_groups.3.rst b/docs/man-openmpi/man3/MPI_Intercomm_create_from_groups.3.rst new file mode 100644 index 00000000000..7b5d8dab269 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Intercomm_create_from_groups.3.rst @@ -0,0 +1,112 @@ +.. _mpi_intercomm_create_from_groups: + +MPI_Intercomm_create_from_groups +================================ + +.. include_body + +:ref:`MPI_Intercomm_create_from_groups` - Creates a new inter-communicator from +a local and remote group and stringtag + +Syntax +------ + +C Syntax +^^^^^^^^ + +.. code:: c + + #include + + int MPI_Intercomm_create_from_groups(MPI_Group local_group, int local_leader, MPI_Group remote_group, int remote_leader, const char *stringtag, MPI_Info info, MPI_Errhandler errhandler, MPI_Comm *newintercomm) + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + + MPI_INTERCOMM_CREATE_FROM_GROUPS(LOCAL_GROUP, LOCAL_LEADER, REMOTE_GROUP, REMOTE_LEADER, STRINGTAG, INFO, ERRHANDLER, NEWINTERCOMM, IERROR) + INTEGER LOCAL_GROUP, LOCAL_LEADER, REMOTE_GROUP, REMOTE_LEADER, INFO, ERRHANDLER, NEWINTERCOMM, IERROR + CHARACTER*(*) STRINGTAG + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code:: fortran + + USE mpi_f08 + + MPI_Intercomm_create_from_groups(local_group, local_leader, remote_group, remote_leader, stringtag, info, errhandler, newintercomm, ierror) + TYPE(MPI_Group), INTENT(IN) :: local_group, remote_group + INTEGER, INTENT(IN) :: local_leader, remote_leader + CHARACTER(LEN=*), INTENT(IN) :: stringtag + TYPE(MPI_Info), INTENT(IN) :: info + TYPE(MPI_Errhandler), INTENT(IN) :: errhandler + TYPE(MPI_Comm), INTENT(OUT) :: newintercomm + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + +Input Parameters +---------------- + +- local_group : Local group (handler) +- local_leader : rank of local group leader in local_group (integer) +- remote_group : Remote group (handler) +- remote_leader : rank of remote leader in remote_group, significant + only at local_leader (integer) +- stringtag : Unique identifier for this operation (string) +- info : info object (handler) +- errhandler : error handler to be attached to the new + inter-communicator (handle) + +Output Parameters +----------------- + +- newintercomm : New inter-communicator (handle). +- IERROR : Fortran only: Error status (integer). + +Description +----------- + +:ref:`MPI_Intercomm_create_from_groups` creates an inter-communicator. Unlike +:ref:`MPI_Intercomm_create`, this function uses as input previously defined, +disjoint local and remote groups. The calling MPI process must be a +member of the local group. The call is collective over the union of the +local and remote groups. All involved MPI processes shall provide an +identical value for the stringtag argument. Within each group, all MPI +processes shall provide identical local_group, local_leader arguments. +Wildcards are not permitted for the remote_leader or local_leader +arguments. The stringtag argument serves the same purpose as the +stringtag used in the :ref:`MPI_Comm_create_from_group` function; it +differentiates concurrent calls in a multithreaded environment. The +stringtag shall not exceed MPI_MAX_STRINGTAG_LEN characters in length. +For C, this includes space for a null terminating character. In the +event that MPI_GROUP_EMPTY is supplied as the local_group or +remote_group1 or both, then the call is a local operation and +MPI_COMM_NULL is returned as the newintercomm. + +Notes +----- + +The errhandler argument specifies an error handler to be attached to the +new inter-communicator. The info argument provides hints and assertions, +possibly MPI implementation dependent, which indicate desired +characteristics and guide communicator creation. MPI_MAX_STRINGTAG_LEN +shall have a value of at least 63. + +Errors +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. Before the +error value is returned, the current MPI error handler is called. By +default, this error handler aborts the MPI job, except for I/O function +errors. The error handler may be changed with :ref:`MPI_Comm_set_errhandler`; +the predefined error handler MPI_ERRORS_RETURN may be used to cause +error values to be returned. Note that MPI does not guarantee that an +MPI program can continue past an error. + + +.. seealso:: :ref:`MPI_Comm_create_from_group` diff --git a/docs/man-openmpi/man3/MPI_Intercomm_merge.3.rst b/docs/man-openmpi/man3/MPI_Intercomm_merge.3.rst new file mode 100644 index 00000000000..0fd3ccb0ea1 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Intercomm_merge.3.rst @@ -0,0 +1,91 @@ +.. _mpi_intercomm_merge: + + +MPI_Intercomm_merge +=================== + +.. include_body + +:ref:`MPI_Intercomm_merge` - Creates an intracommunicator from an +intercommunicator. + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Intercomm_merge(MPI_Comm intercomm, int high, + MPI_Comm *newintracomm) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_INTERCOMM_MERGE(INTERCOMM, HIGH, NEWINTRACOMM, IERROR) + INTEGER INTERCOMM, NEWINTRACOMM, IERROR + LOGICAL HIGH + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Intercomm_merge(intercomm, high, newintracomm, ierror) + TYPE(MPI_Comm), INTENT(IN) :: intercomm + LOGICAL, INTENT(IN) :: high + TYPE(MPI_Comm), INTENT(OUT) :: newintracomm + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETERS +---------------- +* ``intercomm``: Intercommunicator (type indicator). +* ``high``: Used to order the groups of the two intracommunicators within comm when creating the new communicator (type indicator). + +OUTPUT PARAMETERS +----------------- +* ``newintracomm``: Created intracommunicator (type indicator). +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +This function creates an intracommunicator from the union of the two +groups that are associated with intercomm. All processes should provide +the same high value within each of the two groups. If processes in one +group provide the value high = false and processes in the other group +provide the value high = true, then the union orders the "low" group +before the "high" group. If all processes provide the same high +argument, then the order of the union is arbitrary. This call is +blocking and collective within the union of the two groups. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. + + +.. seealso:: + :ref:`MPI_Intercomm_create` :ref:`MPI_Comm_free` diff --git a/docs/man-openmpi/man3/MPI_Iprobe.3.rst b/docs/man-openmpi/man3/MPI_Iprobe.3.rst new file mode 100644 index 00000000000..0e2994e672b --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Iprobe.3.rst @@ -0,0 +1,131 @@ +.. _mpi_iprobe: + + +MPI_Iprobe +========== + +.. include_body + +:ref:`MPI_Iprobe` - Nonblocking test for a message. + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Iprobe(int source, int tag, MPI_Comm comm, int *flag, + MPI_Status *status) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_IPROBE(SOURCE, TAG, COMM, FLAG, STATUS, IERROR) + LOGICAL FLAG + INTEGER SOURCE, TAG, COMM, STATUS(MPI_STATUS_SIZE), IERROR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Iprobe(source, tag, comm, flag, status, ierror) + INTEGER, INTENT(IN) :: source, tag + TYPE(MPI_Comm), INTENT(IN) :: comm + LOGICAL, INTENT(OUT) :: flag + TYPE(MPI_Status) :: status + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETERS +---------------- +* ``source``: Source rank or MPI_ANY_SOURCE (integer). +* ``tag``: Tag value or MPI_ANY_TAG (integer). +* ``comm``: Communicator (handle). + +OUTPUT PARAMETERS +----------------- +* ``flag``: Message-waiting flag (logical). +* ``status``: Status object (status). +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +The :ref:`MPI_Probe` and :ref:`MPI_Iprobe` operations allow checking of incoming +messages without actual receipt of them. The user can then decide how to +receive them, based on the information returned by the probe (basically, +the information returned by status). In particular, the user may +allocate memory for the receive buffer, according to the length of the +probed message. + +MPI_Iprobe(source, tag, comm, flag, status) returns flag = true if there +is a message that can be received and that matches the pattern specified +by the arguments source, tag, and comm. The call matches the same +message that would have been received by a call to MPI_Recv(..., source, +tag, comm, status) executed at the same point in the program, and +returns in status the same value that would have been returned by +MPI_Recv(). Otherwise, the call returns flag = false, and leaves status +undefined. + +If :ref:`MPI_Iprobe` returns flag = true, then the content of the status object +can be subsequently accessed as described in Section 3.2.5 of the MPI-1 +Standard, "Return Status," to find the source, tag, and length of the +probed message. + +A subsequent receive executed with the same context, and the source and +tag returned in status by :ref:`MPI_Iprobe` will receive the message that was +matched by the probe if no other intervening receive occurs after the +probe. If the receiving process is multithreaded, it is the user's +responsibility to ensure that the last condition holds. + +The source argument of :ref:`MPI_Probe` can be MPI_ANY_SOURCE, and the tag +argument can be MPI_ANY_TAG, so that one can probe for messages from an +arbitrary source and/or with an arbitrary tag. However, a specific +communication context must be provided with the comm argument. + +If your application does not need to examine the *status* field, you can +save resources by using the predefined constant MPI_STATUS_IGNORE as a +special value for the *status* argument. + +It is not necessary to receive a message immediately after it has been +probed for, and the same message may be probed for several times before +it is received. + + +NOTE +---- + +Users of libmpi-mt should remember that two threads may do an :ref:`MPI_Iprobe` +that actually returns true for the same message for both threads. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. + + +.. seealso:: + :ref:`MPI_Probe` :ref:`MPI_Cancel` diff --git a/docs/man-openmpi/man3/MPI_Irecv.3.rst b/docs/man-openmpi/man3/MPI_Irecv.3.rst new file mode 100644 index 00000000000..5f6c10f9945 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Irecv.3.rst @@ -0,0 +1,102 @@ +.. _mpi_irecv: + + +MPI_Irecv +========= + +.. include_body + +:ref:`MPI_Irecv` - Starts a standard-mode, nonblocking receive. + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Irecv(void *buf, int count, MPI_Datatype datatype, + int source, int tag, MPI_Comm comm, MPI_Request *request) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_IRECV(BUF, COUNT, DATATYPE, SOURCE, TAG, COMM, REQUEST, + IERROR) + BUF(*) + INTEGER COUNT, DATATYPE, SOURCE, TAG, COMM, REQUEST, IERROR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Irecv(buf, count, datatype, source, tag, comm, request, ierror) + TYPE(*), DIMENSION(..), ASYNCHRONOUS :: buf + INTEGER, INTENT(IN) :: count, source, tag + TYPE(MPI_Datatype), INTENT(IN) :: datatype + TYPE(MPI_Comm), INTENT(IN) :: comm + TYPE(MPI_Request), INTENT(OUT) :: request + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETERS +---------------- +* ``buf``: Initial address of receive buffer (choice). +* ``count``: Number of elements in receive buffer (integer). +* ``datatype``: Datatype of each receive buffer element (handle). +* ``source``: Rank of source (integer). +* ``tag``: Message tag (integer). +* ``comm``: Communicator (handle). + +OUTPUT PARAMETERS +----------------- +* ``request``: Communication request (handle). +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +Nonblocking calls allocate a communication request object and associate +it with the request handle (the argument request). The request can be +used later to query the status of the communication or wait for its +completion. + +A nonblocking receive call indicates that the system may start writing +data into the receive buffer. The receiver should not access any part of +the receive buffer after a nonblocking receive operation is called, +until the receive completes. + +A receive request can be determined being completed by calling the +:ref:`MPI_Wait`, :ref:`MPI_Waitany`, :ref:`MPI_Test`, or :ref:`MPI_Testany` with request returned by +this function. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. + + +.. seealso:: + :ref:`MPI_Recv` :ref:`MPI_Probe` :ref:`MPI_Test` :ref:`MPI_Testany` :ref:`MPI_Wait` :ref:`MPI_Waitany` diff --git a/docs/man-openmpi/man3/MPI_Ireduce.3.rst b/docs/man-openmpi/man3/MPI_Ireduce.3.rst new file mode 100644 index 00000000000..1a81bc0cbd7 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Ireduce.3.rst @@ -0,0 +1,9 @@ +.. _mpi_ireduce: + +MPI_Ireduce +=========== + .. include_body + +.. include:: ../man3/MPI_Reduce.3.rst + :start-after: .. include_body + diff --git a/docs/man-openmpi/man3/MPI_Ireduce_scatter.3.rst b/docs/man-openmpi/man3/MPI_Ireduce_scatter.3.rst new file mode 100644 index 00000000000..8eac19a9484 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Ireduce_scatter.3.rst @@ -0,0 +1,9 @@ +.. _mpi_ireduce_scatter: + +MPI_Ireduce_scatter +=================== + .. include_body + +.. include:: ../man3/MPI_Reduce_scatter.3.rst + :start-after: .. include_body + diff --git a/docs/man-openmpi/man3/MPI_Ireduce_scatter_block.3.rst b/docs/man-openmpi/man3/MPI_Ireduce_scatter_block.3.rst new file mode 100644 index 00000000000..771e9d30c96 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Ireduce_scatter_block.3.rst @@ -0,0 +1,9 @@ +.. _mpi_ireduce_scatter_block: + +MPI_Ireduce_scatter_block +========================= + .. include_body + +.. include:: ../man3/MPI_Reduce_scatter_block.3.rst + :start-after: .. include_body + diff --git a/docs/man-openmpi/man3/MPI_Irsend.3.rst b/docs/man-openmpi/man3/MPI_Irsend.3.rst new file mode 100644 index 00000000000..ab2d3575068 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Irsend.3.rst @@ -0,0 +1,97 @@ +.. _mpi_irsend: + + +MPI_Irsend +========== + +.. include_body + +:ref:`MPI_Irsend` - Starts a ready-mode nonblocking send. + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Irsend(const void *buf, int count, MPI_Datatype datatype, int dest, + int tag, MPI_Comm comm, MPI_Request *request) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_IRSEND(BUF, COUNT, DATATYPE, DEST, TAG, COMM, REQUEST, IERROR) + BUF(*) + INTEGER COUNT, DATATYPE, DEST, TAG, COMM, REQUEST, IERROR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Irsend(buf, count, datatype, dest, tag, comm, request, ierror) + TYPE(*), DIMENSION(..), INTENT(IN), ASYNCHRONOUS :: buf + INTEGER, INTENT(IN) :: count, dest, tag + TYPE(MPI_Datatype), INTENT(IN) :: datatype + TYPE(MPI_Comm), INTENT(IN) :: comm + TYPE(MPI_Request), INTENT(OUT) :: request + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETERS +---------------- +* ``buf``: Initial address of send buffer (choice). +* ``count``: Number of elements in send buffer (integer). +* ``datatype``: Datatype of each send buffer element (handle). +* ``dest``: Rank of destination (integer). +* ``tag``: Message tag (integer). +* ``comm``: Communicator (handle). + +OUTPUT PARAMETERS +----------------- +* ``request``: Communication request (handle). +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +:ref:`MPI_Irsend` starts a ready-mode nonblocking send. Nonblocking calls +allocate a communication request object and associate it with the +request handle (the argument request). The request can be used later to +query the status of the communication or to wait for its completion. + +A nonblocking send call indicates that the system may start copying data +out of the send buffer. The sender should not modify any part of the +send buffer after a nonblocking send operation is called, until the send +completes. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. + + +.. seealso:: + :ref:`MPI_Rsend` diff --git a/docs/man-openmpi/man3/MPI_Is_thread_main.3.rst b/docs/man-openmpi/man3/MPI_Is_thread_main.3.rst new file mode 100644 index 00000000000..582b3714db9 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Is_thread_main.3.rst @@ -0,0 +1,79 @@ +.. _mpi_is_thread_main: + + +MPI_Is_thread_main +================== + +.. include_body + +:ref:`MPI_Is_thread_main` - Determines if thread called :ref:`MPI_Init` + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Is_thread_main(int *flag) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_IS_THREAD_MAIN(FLAG, IERROR) + LOGICAL FLAG + INTEGER IERROR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Is_thread_main(flag, ierror) + LOGICAL, INTENT(OUT) :: flag + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +OUTPUT PARAMETERS +----------------- +* ``flag``: True if calling thread is main thread (boolean). +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +:ref:`MPI_Is_thread_main` is called by a thread to find out whether the caller +is the main thread (that is, the thread that called :ref:`MPI_Init` or +MPI_Init_thread). + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. + +See the MPI man page for a full list of MPI error codes. + + +.. seealso:: + :ref:`MPI_Init` :ref:`MPI_Init_thread` diff --git a/docs/man-openmpi/man3/MPI_Iscan.3.rst b/docs/man-openmpi/man3/MPI_Iscan.3.rst new file mode 100644 index 00000000000..bbe3f1986ee --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Iscan.3.rst @@ -0,0 +1,9 @@ +.. _mpi_iscan: + +MPI_Iscan +========= + .. include_body + +.. include:: ../man3/MPI_Scan.3.rst + :start-after: .. include_body + diff --git a/docs/man-openmpi/man3/MPI_Iscatter.3.rst b/docs/man-openmpi/man3/MPI_Iscatter.3.rst new file mode 100644 index 00000000000..73dc0bebb6c --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Iscatter.3.rst @@ -0,0 +1,9 @@ +.. _mpi_iscatter: + +MPI_Iscatter +============ + .. include_body + +.. include:: ../man3/MPI_Scatter.3.rst + :start-after: .. include_body + diff --git a/docs/man-openmpi/man3/MPI_Iscatterv.3.rst b/docs/man-openmpi/man3/MPI_Iscatterv.3.rst new file mode 100644 index 00000000000..3108ee65ce9 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Iscatterv.3.rst @@ -0,0 +1,9 @@ +.. _mpi_iscatterv: + +MPI_Iscatterv +============= + .. include_body + +.. include:: ../man3/MPI_Scatterv.3.rst + :start-after: .. include_body + diff --git a/docs/man-openmpi/man3/MPI_Isend.3.rst b/docs/man-openmpi/man3/MPI_Isend.3.rst new file mode 100644 index 00000000000..23b45bb9796 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Isend.3.rst @@ -0,0 +1,101 @@ +.. _mpi_isend: + + +MPI_Isend +========= + +.. include_body + +:ref:`MPI_Isend` - Starts a standard-mode, nonblocking send. + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Isend(const void *buf, int count, MPI_Datatype datatype, int dest, + int tag, MPI_Comm comm, MPI_Request *request) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_ISEND(BUF, COUNT, DATATYPE, DEST, TAG, COMM, REQUEST, IERROR) + BUF(*) + INTEGER COUNT, DATATYPE, DEST, TAG, COMM, REQUEST, IERROR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Isend(buf, count, datatype, dest, tag, comm, request, ierror) + TYPE(*), DIMENSION(..), INTENT(IN), ASYNCHRONOUS :: buf + INTEGER, INTENT(IN) :: count, dest, tag + TYPE(MPI_Datatype), INTENT(IN) :: datatype + TYPE(MPI_Comm), INTENT(IN) :: comm + TYPE(MPI_Request), INTENT(OUT) :: request + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETERS +---------------- +* ``buf``: Initial address of send buffer (choice). +* ``count``: Number of elements in send buffer (integer). +* ``datatype``: Datatype of each send buffer element (handle). +* ``dest``: Rank of destination (integer). +* ``tag``: Message tag (integer). +* ``comm``: Communicator (handle). + +OUTPUT PARAMETERS +----------------- +* ``request``: Communication request (handle). +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +:ref:`MPI_Isend` starts a standard-mode, nonblocking send. Nonblocking calls +allocate a communication request object and associate it with the +request handle (the argument request). The request can be used later to +query the status of the communication or wait for its completion. + +A nonblocking send call indicates that the system may start copying data +out of the send buffer. The sender should not modify any part of the +send buffer after a nonblocking send operation is called, until the send +completes. + +A send request can be determined being completed by calling the +:ref:`MPI_Wait`, :ref:`MPI_Waitany`, :ref:`MPI_Test`, or :ref:`MPI_Testany` with request returned by +this function. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. + + +.. seealso:: + :ref:`MPI_Send` :ref:`MPI_Wait` :ref:`MPI_Waitany` :ref:`MPI_Test` :ref:`MPI_Testany` diff --git a/docs/man-openmpi/man3/MPI_Isendrecv.3.rst b/docs/man-openmpi/man3/MPI_Isendrecv.3.rst new file mode 100644 index 00000000000..04c1d556ce7 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Isendrecv.3.rst @@ -0,0 +1,123 @@ +.. _mpi_isendrecv: + + +MPI_Isendrecv +============= + +.. include_body + +:ref:`MPI_Isendrecv` - Sends and receives a message. + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Isendrecv(const void *sendbuf, int sendcount, MPI_Datatype sendtype, + int dest, int sendtag, void *recvbuf, int recvcount, + MPI_Datatype recvtype, int source, int recvtag, + MPI_Comm comm, MPI_Request *request) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_ISENDRECV(SENDBUF, SENDCOUNT, SENDTYPE, DEST, SENDTAG, + RECVBUF, RECVCOUNT, RECVTYPE, SOURCE, RECVTAG, COMM, + REQUEST, IERROR) + SENDBUF(*), RECVBUF(*) + INTEGER SENDCOUNT, SENDTYPE, DEST, SENDTAG + INTEGER RECVCOUNT, RECVTYPE, SOURCE, RECVTAG, COMM + INTEGER REQUEST, IERROR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Isendrecv(sendbuf, sendcount, sendtype, dest, sendtag, recvbuf, + recvcount, recvtype, source, recvtag, comm, request, ierror) + TYPE(*), DIMENSION(..), INTENT(IN) :: sendbuf + TYPE(*), DIMENSION(..) :: recvbuf + INTEGER, INTENT(IN) :: sendcount, dest, sendtag, recvcount, source, + recvtag + TYPE(MPI_Datatype), INTENT(IN) :: sendtype, recvtype + TYPE(MPI_Comm), INTENT(IN) :: comm + TYPE(MPI_Request) :: request + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETERS +---------------- +* ``sendbuf``: Initial address of send buffer (choice). +* ``sendcount``: Number of elements to send (integer). +* ``sendtype``: Type of elements in send buffer (handle). +* ``dest``: Rank of destination (integer). +* ``sendtag``: Send tag (integer). +* ``recvcount``: Maximum number of elements to receive (integer). +* ``recvtype``: Type of elements in receive buffer (handle). +* ``source``: Rank of source (integer). +* ``recvtag``: Receive tag (integer). +* ``comm``: Communicator (handle). + +OUTPUT PARAMETERS +----------------- +* ``recvbuf``: Initial address of receive buffer (choice). +* ``request``: Communication request (handle). +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +The non-blocking send-receive operations combine in one call the sending +of a message to one destination and the receiving of another message, +from another process. The two (source and destination) are possibly the +same. This operation is useful for executing a shift operation across a +chain of processes. The send-receive operation can be used in +conjunction with the functions described in the "Process Topologies" +chapter of the MPI Standard in order to perform shifts on various +logical topologies. + +A message sent by a send-receive operation can be received by a regular +receive operation or probed by a probe operation; a send-receive +operation can receive a message sent by a regular send operation. + +:ref:`MPI_Isendrecv` executes a non-blocking send and receive operation. Both +send and receive use the same communicator, but possibly different tags. +The send buffer and receive buffers must be disjoint, and may have +different lengths and datatypes. + +A non-blocking send-receive request can be determined to be completed by +calling the :ref:`MPI_Wait`, :ref:`MPI_Waitany`, :ref:`MPI_Test`, or :ref:`MPI_Testany` with the +request returned by this function. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. + + +.. seealso:: + :ref:`MPI_Isendrecv_replace` :ref:`MPI_Sendrecv` :ref:`MPI_Sendrecv_replace` diff --git a/docs/man-openmpi/man3/MPI_Isendrecv_replace.3.rst b/docs/man-openmpi/man3/MPI_Isendrecv_replace.3.rst new file mode 100644 index 00000000000..aacddba40dd --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Isendrecv_replace.3.rst @@ -0,0 +1,120 @@ +.. _mpi_isendrecv_replace: + + +MPI_Isendrecv_replace +===================== + +.. include_body + +:ref:`MPI_Isendrecv_replace` - Sends and receives a message using a single +buffer. + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Isendrecv_replace(void *buf, int count, MPI_Datatype datatype, + int dest, int sendtag, int source, int recvtag, MPI_Comm comm, + MPI_Request *request) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_ISENDRECV_REPLACE(BUF, COUNT, DATATYPE, DEST, SENDTAG, SOURCE, + RECVTAG, COMM, REQUEST, IERROR) + BUF(*) + INTEGER COUNT, DATATYPE, DEST, SENDTAG + INTEGER SOURCE, RECVTAG, COMM + INTEGER REQUEST, IERROR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Isendrecv_replace(buf, count, datatype, dest, sendtag, source, recvtag, + comm, request, ierror) + TYPE(*), DIMENSION(..) :: buf + INTEGER, INTENT(IN) :: count, dest, sendtag, source, recvtag + TYPE(MPI_Datatype), INTENT(IN) :: datatype + TYPE(MPI_Comm), INTENT(IN) :: comm + TYPE(MPI_Request) :: request + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT/OUTPUT PARAMETER +---------------------- +* ``buf``: Initial address of send and receive buffer (choice). + +INPUT PARAMETERS +---------------- +* ``count``: Number of elements in send and receive buffer (integer). +* ``datatype``: Type of elements to send and receive (handle). +* ``dest``: Rank of destination (integer). +* ``sendtag``: Send message tag (integer). +* ``source``: Rank of source (integer). +* ``recvtag``: Receive message tag (integer). +* ``comm``: Communicator (handle). + +OUTPUT PARAMETERS +----------------- +* ``request``: Communication request (handle). +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +The non-blocking send-receive operations combine in one call the sending +of a message to one destination and the receiving of another message, +from another process. The two (source and destination) are possibly the +same. A send-receive operation is useful for executing a shift operation +across a chain of processes. The send-receive operation can be used in +conjunction with the functions described in the "Process Topologies" +chapter of the MPI Standard in order to perform shifts on various +logical topologies. Also, a send-receive operation is useful for +implementing remote procedure calls. + +A message sent by a send-receive operation can be received by a regular +receive operation or probed by a probe operation; a send-receive +operation can receive a message sent by a regular send operation. + +:ref:`MPI_Isendrecv_replace` executes a non-blocking send and receive. The same +buffer is used both for the send and for the receive, so that the +message sent is replaced by the message received. + +A non-blocking send-receive request can be determined to be completed by +calling the :ref:`MPI_Wait`, :ref:`MPI_Waitany`, :ref:`MPI_Test`, or :ref:`MPI_Testany` with the +request returned by this function. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. + + +.. seealso:: + :ref:`MPI_Isendrecv` :ref:`MPI_Sendrecv` :ref:`MPI_Sendrecv_replace` diff --git a/docs/man-openmpi/man3/MPI_Issend.3.rst b/docs/man-openmpi/man3/MPI_Issend.3.rst new file mode 100644 index 00000000000..a141fea8206 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Issend.3.rst @@ -0,0 +1,99 @@ +.. _mpi_issend: + + +MPI_Issend +========== + +.. include_body + +:ref:`MPI_Issend` - Starts a nonblocking synchronous send. + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Issend(const void *buf, int count, MPI_Datatype datatype, int dest, + int tag, MPI_Comm comm, MPI_Request *request) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_ISSEND(BUF, COUNT, DATATYPE, DEST, TAG, COMM, REQUEST, IERROR) + BUF(*) + INTEGER COUNT, DATATYPE, DEST, TAG, COMM, REQUEST, IERROR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Issend(buf, count, datatype, dest, tag, comm, request, ierror) + TYPE(*), DIMENSION(..), INTENT(IN), ASYNCHRONOUS :: buf + INTEGER, INTENT(IN) :: count, dest, tag + TYPE(MPI_Datatype), INTENT(IN) :: datatype + TYPE(MPI_Comm), INTENT(IN) :: comm + TYPE(MPI_Request), INTENT(OUT) :: request + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETERS +---------------- +* ``buf``: Initial address of send buffer (choice). +* ``count``: Number of elements in send buffer (integer). +* ``datatype``: Datatype of each send buffer element (handle). +* ``dest``: Rank of destination (integer). +* ``tag``: Message tag (integer). +* ``comm``: Communicator (handle). + +OUTPUT PARAMETERS +----------------- +* ``request``: Communication request (handle). +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +Starts a synchronous mode, nonblocking send. + +Nonblocking calls allocate a communication request object and associate +it with the request handle (the argument request). The request can be +used later to query the status of the communication or wait for its +completion. + +A nonblocking send call indicates that the system may start copying data +out of the send buffer. The sender should not modify any part of the +send buffer after a nonblocking send operation is called, until the send +completes. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. + + +.. seealso:: + :ref:`MPI_Ssend` diff --git a/docs/man-openmpi/man3/MPI_Keyval_create.3.rst b/docs/man-openmpi/man3/MPI_Keyval_create.3.rst new file mode 100644 index 00000000000..cfb4a8e629f --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Keyval_create.3.rst @@ -0,0 +1,168 @@ +.. _mpi_keyval_create: + + +MPI_Keyval_create +================= + +.. include_body + +:ref:`MPI_Keyval_create` - Generates a new attribute key -- use of this +routine is deprecated. + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Keyval_create(MPI_Copy_function *copy_fn, + MPI_Delete_function *delete_fn, int *keyval, void *extra_state) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + INCLUDE 'mpif.h' + MPI_KEYVAL_CREATE(COPY_FN, DELETE_FN, KEYVAL, EXTRA_STATE, IERROR) + EXTERNAL COPY_FN, DELETE_FN + INTEGER KEYVAL, EXTRA_STATE, IERROR + + +INPUT PARAMETERS +---------------- +* ``copy_fn``: Copy callback function for keyval. +* ``delete_fn``: Delete callback function for keyval. +* ``extra_state``: Extra state for callback functions. + +OUTPUT PARAMETERS +----------------- +* ``keyval``: Key value for future access (integer). +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +Note that use of this routine is *deprecated* as of MPI-2. Please use +:ref:`MPI_Comm_create_keyval` instead. + +Generates a new attribute key. Keys are locally unique in a process and +opaque to the user, though they are explicitly stored in integers. Once +allocated, the key value can be used to associate attributes and access +them on any locally defined communicator. + +The copy_fn function is invoked when a communicator is duplicated by +:ref:`MPI_COMM_DUP`. copy_fn should be of type MPI_Copy_function, which is +defined as follows: + +:: + + typedef int MPI_Copy_function(MPI_Comm oldcomm, int keyval, + void *extra_state, void *attribute_val_in, + void *attribute_val_out, int *flag) + +A Fortran declaration for such a function is as follows: + +.. code-block:: fortran + + SUBROUTINE COPY_FUNCTION(OLDCOMM, KEYVAL, EXTRA_STATE, ATTRIBUTE_VAL_IN, + ATTRIBUTE_VAL_OUT, FLAG, IERR) + INTEGER OLDCOMM, KEYVAL, EXTRA_STATE, + ATTRIBUTE_VAL_IN, ATTRIBUTE_VAL_OUT, IERR + LOGICAL FLAG + +The copy callback function is invoked for each key value in oldcomm in +arbitrary order. Each call to the copy callback is made with a key value +and its corresponding attribute. If it returns flag = 0, then the +attribute is deleted in the duplicated communicator. Otherwise ( flag = +1), the new attribute value is set to the value returned in +attribute_val_out. The function returns MPI_SUCCESS on success and an +error code on failure (in which case :ref:`MPI_Comm_dup` will fail). + +copy_fn may be specified as MPI_NULL_COPY_FN or MPI_DUP_FN from either C +or Fortran; MPI_NULL_COPY_FN is a function that does nothing other than +return flag = 0, and MPI_SUCCESS. MPI_DUP_FN is a simple-minded copy +function that sets flag = 1, returns the value of attribute_val_in in +attribute_val_out, and returns MPI_SUCCESS. + + +NOTES +----- + +Key values are global (available for any and all communicators). + +There are subtle differences between C and Fortran that require that the +copy_fn be written in the same language that :ref:`MPI_Keyval_create` is called +from. This should not be a problem for most users; only programmers +using both Fortran and C in the same program need to be sure that they +follow this rule. + +Even though both formal arguments attribute_val_in and attribute_val_out +are of type void*, their usage differs. The C copy function is passed by +MPI in attribute_val_in the value of the attribute, and in +attribute_val_out the address of the attribute, so as to allow the +function to return the (new) attribute value. The use of type void\* for +both is to avoid messy type casts. + +A valid copy function is one that completely duplicates the information +by making a full duplicate copy of the data structures implied by an +attribute; another might just make another reference to that data +structure, while using a reference-count mechanism. Other types of +attributes might not copy at all (they might be specific to oldcomm +only). + +Analogous to copy_fn is a callback deletion function, defined as +follows. The delete_fn function is invoked when a communicator is +deleted by :ref:`MPI_Comm_free` or when a call is made explicitly to +:ref:`MPI_Attr_delete`. delete_fn should be of type MPI_Delete_function, which +is defined as follows: + +:: + + typedef int MPI_Delete_function(MPI_Comm comm, int keyval, + void *attribute_val, void *extra_state); + +A Fortran declaration for such a function is as follows: + +.. code-block:: fortran + + SUBROUTINE DELETE_FUNCTION(COMM, KEYVAL,ATTRIBUTE_VAL, EXTRA_STATE, IERR) + INTEGER COMM, KEYVAL, ATTRIBUTE_VAL, EXTRA_STATE, IERR + +This function is called by :ref:`MPI_Comm_free`, :ref:`MPI_Attr_delete`, and +:ref:`MPI_Attr_put` to do whatever is needed to remove an attribute. The +function returns MPI_SUCCESS on success and an error code on failure (in +which case :ref:`MPI_COMM_FREE` will fail). + +delete_fn may be specified as MPI_NULL_DELETE_FN from either C or +FORTRAN; MPI_NULL_DELETE_FN is a function that does nothing, other than +returning MPI_SUCCESS. + +The special key value MPI_KEYVAL_INVALID is never returned by +:ref:`MPI_Keyval_create`. Therefore, it can be used for static initialization +of key values. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. + + +.. seealso:: + :ref:`MPI_Keyval_free` :ref:`MPI_Comm_create_keyval` diff --git a/docs/man-openmpi/man3/MPI_Keyval_free.3.rst b/docs/man-openmpi/man3/MPI_Keyval_free.3.rst new file mode 100644 index 00000000000..28978a403e8 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Keyval_free.3.rst @@ -0,0 +1,82 @@ +.. _mpi_keyval_free: + + +MPI_Keyval_free +=============== + +.. include_body + +:ref:`MPI_Keyval_free` - Frees attribute key for communicator cache +attribute -- use of this routine is deprecated. + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Keyval_free(int *keyval) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + INCLUDE 'mpif.h' + MPI_KEYVAL_FREE(KEYVAL, IERROR) + INTEGER KEYVAL, IERROR + + +INPUT PARAMETER +--------------- +* ``keyval``: Frees the integer key value (integer). + +OUTPUT PARAMETER +---------------- +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +Note that use of this routine is *deprecated* as of MPI-2. Please use +:ref:`MPI_Comm_free_keyval` instead. + +Frees an extant attribute key. This function sets the value of keyval to +MPI_KEYVAL_INVALID. Note that it is not erroneous to free an attribute +key that is in use, because the actual free does not transpire until +after all references (in other communicators on the process) to the key +have been freed. These references need to be explicitly freed by the +program, either via calls to :ref:`MPI_Attr_delete` that free one attribute +instance, or by calls to :ref:`MPI_Comm_free` that free all attribute instances +associated with the freed communicator. + + +NOTE +---- + +Key values are global (they can be used with any and all communicators). + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. + + +.. seealso:: + :ref:`MPI_Keyval_create` :ref:`MPI_Comm_free_keyval` diff --git a/docs/man-openmpi/man3/MPI_Lookup_name.3.rst b/docs/man-openmpi/man3/MPI_Lookup_name.3.rst new file mode 100644 index 00000000000..9780f882736 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Lookup_name.3.rst @@ -0,0 +1,138 @@ +.. _mpi_lookup_name: + + +MPI_Lookup_name +=============== + +.. include_body + +:: + + MPI_Lookup_name - Finds port associated with a service name + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Lookup_name(const char *service_name, MPI_Info info, + char *port_name) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_LOOKUP_NAME(SERVICE_NAME, INFO, PORT_NAME, IERROR) + CHARACTER*(*) SERVICE_NAME, PORT_NAME + INTEGER INFO, IERROR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Lookup_name(service_name, info, port_name, ierror) + CHARACTER(LEN=*), INTENT(IN) :: service_name + TYPE(MPI_Info), INTENT(IN) :: info + CHARACTER(LEN=MPI_MAX_PORT_NAME), INTENT(OUT) :: port_name + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETERS +---------------- +* ``service_name``: A service name (string). +* ``info``: Options to the name service functions (handle). + +OUTPUT PARAMETERS +----------------- +* ``port_name``: a port name (string). +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +This function retrieves a *port_name* published under *service_name* by +a previous invocation of :ref:`MPI_Publish_name`. The application must supply a +*port_name* buffer large enough to hold the largest possible port name +(i.e., MPI_MAX_PORT_NAME bytes). + + +INFO ARGUMENTS +-------------- + +The following keys for *info* are recognized: + +:: + + Key Type Description + --- ---- ----------- + + ompi_lookup_order char * Resolution order for name lookup. + +The *ompi_lookup_order* info key can specify one of four valid string +values (see the NAME SCOPE section below for more information on name +scopes): + +*local*: Only search the local scope for name resolution. + +*global*: Only search the global scope for name resolution. + +*local,global*: Search the local scope for name resolution. If + not found, try searching the global scope for name resolution. This + behavior is the default if the *ompi_lookup_order* info key is not + specified. + +*global,local*: Search the global scope for name resolution. If + not found, try searching the local scope for name resolution. + +If no info key is provided, the search will first check to see if a +global server has been specified and is available. If so, then the +search will default to global scope first, followed by local. Otherwise, +the search will default to local. + + +NAME SCOPE +---------- + +Open MPI supports two name scopes: *global* and *local*. Local scope +values are placed in a data store located on the mpirun of the calling +process' job, while global scope values reside on a central server. +Calls to :ref:`MPI_Unpublish_name` must correctly specify the scope to be used +in finding the value to be removed. The function will return an error if +the specified service name is not found on the indicated location. + +For a more detailed description of scoping rules, please see the +:ref:`MPI_Publish_name` man page. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. + +See the MPI man page for a full list of MPI error codes. + + +.. seealso:: + :ref:`MPI_Publish_name` :ref:`MPI_Open_port` diff --git a/docs/man-openmpi/man3/MPI_Message_c2f.3.rst b/docs/man-openmpi/man3/MPI_Message_c2f.3.rst new file mode 100644 index 00000000000..6ae5a1e9890 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Message_c2f.3.rst @@ -0,0 +1,9 @@ +.. _mpi_message_c2f: + +MPI_Message_c2f +=============== + .. include_body + +.. include:: ../man3/MPI_Comm_f2c.3.rst + :start-after: .. include_body + diff --git a/docs/man-openmpi/man3/MPI_Message_f2c.3.rst b/docs/man-openmpi/man3/MPI_Message_f2c.3.rst new file mode 100644 index 00000000000..197125a0e59 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Message_f2c.3.rst @@ -0,0 +1,9 @@ +.. _mpi_message_f2c: + +MPI_Message_f2c +=============== + .. include_body + +.. include:: ../man3/MPI_Comm_f2c.3.rst + :start-after: .. include_body + diff --git a/docs/man-openmpi/man3/MPI_Mprobe.3.rst b/docs/man-openmpi/man3/MPI_Mprobe.3.rst new file mode 100644 index 00000000000..7cd08dbc9c6 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Mprobe.3.rst @@ -0,0 +1,101 @@ +.. _mpi_mprobe: + + +MPI_Mprobe +========== + +.. include_body + +:ref:`MPI_Mprobe` - Blocking matched probe for a message. + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Mprobe(int source, int tag, MPI_Comm comm, + MPI_Message *message, MPI_Status *status) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_MPROBE(SOURCE, TAG, COMM, MESSAGE, STATUS, IERROR) + INTEGER SOURCE, TAG, COMM, MESSAGE + INTEGER STATUS(MPI_STATUS_SIZE), IERROR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Mprobe(source, tag, comm, message, status, ierror) + INTEGER, INTENT(IN) :: source, tag + TYPE(MPI_Comm), INTENT(IN) :: comm + TYPE(MPI_Message), INTENT(OUT) :: message + TYPE(MPI_Status) :: status + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETERS +---------------- +* ``source``: Source rank or MPI_ANY_SOURCE (integer). +* ``tag``: Tag value or MPI_ANY_TAG (integer). +* ``comm``: Communicator (handle). + +OUTPUT PARAMETERS +----------------- +* ``message``: Message (handle). +* ``status``: Status object (status). +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +Like :ref:`MPI_Probe` and :ref:`MPI_Iprobe`, the :ref:`MPI_Mprobe` and :ref:`MPI_Improbe` operations +allow incoming messages to be queried without actually receiving them, +except that :ref:`MPI_Mprobe` and :ref:`MPI_Improbe` provide a mechanism to receive +the specific message that was matched regardless of other intervening +probe or receive operations. This gives the application an opportunity +to decide how to receive the message, based on the information returned +by the probe. In particular, the application may allocate memory for the +receive buffer according to the length of the probed message. + +A matching probe with MPI_PROC_NULL as *source* returns *message* = +MPI_MESSAGE_NO_PROC, and the *status* object returns source = +MPI_PROC_NULL, tag = MPI_ANY_TAG, and count = 0. + +When :ref:`MPI_Mprobe` returns (from a non-MPI_PROC_NULL *source*), the matched +message can then be received by passing the *message* handle to the +:ref:`MPI_Mrecv` or :ref:`MPI_Imrecv` functions. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. + + +.. seealso:: + :ref:`MPI_Improbe` :ref:`MPI_Probe` :ref:`MPI_Iprobe` :ref:`MPI_Mrecv` :ref:`MPI_Imrecv` :ref:`MPI_Cancel` diff --git a/docs/man-openmpi/man3/MPI_Mrecv.3.rst b/docs/man-openmpi/man3/MPI_Mrecv.3.rst new file mode 100644 index 00000000000..b82157836a4 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Mrecv.3.rst @@ -0,0 +1,94 @@ +.. _mpi_mrecv: + + +MPI_Mrecv +========= + +.. include_body + +:ref:`MPI_Mrecv` - Blocking receive for a matched message + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Mrecv(void *buf, int count, MPI_Datatype type, + MPI_Message *message, MPI_Status *status) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_MRECV(BUF, COUNT, DATATYPE, MESSAGE, STATUS, IERROR) + BUF(*) + INTEGER COUNT, DATATYPE, MESSAGE + INTEGER STATUS(MPI_STATUS_SIZE), IERROR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Mrecv(buf, count, datatype, message, status, ierror) + TYPE(*), DIMENSION(..) :: buf + INTEGER, INTENT(IN) :: count + TYPE(MPI_Datatype), INTENT(IN) :: datatype + TYPE(MPI_Message), INTENT(INOUT) :: message + TYPE(MPI_Status) :: status + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETERS +---------------- +* ``count``: Number of elements to receive (nonnegative integer). +* ``datatype``: Datatype of each send buffer element (handle). +* ``message``: Message (handle). + +OUTPUT PARAMETERS +----------------- +* ``buf``: Initial address of receive buffer (choice). +* ``status``: Status object (status). +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +The functions :ref:`MPI_Mrecv` and :ref:`MPI_Imrecv` receive messages that have been +previously matched by a matching probe. + +If :ref:`MPI_Mrecv` is called with MPI_MESSAGE_NULL as the message argument, +the call returns immediately with the *status* object set to *source* = +MPI_PROC_NULL, *tag* = MPI_ANY_TAG, and *count* = 0, as if a receive +from MPI_PROC_NULL was issued. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. + + +.. seealso:: + :ref:`MPI_Mprobe` :ref:`MPI_Improbe` :ref:`MPI_Probe` :ref:`MPI_Iprobe` :ref:`MPI_Imrecv` :ref:`MPI_Cancel` diff --git a/docs/man-openmpi/man3/MPI_Neighbor_allgather.3.rst b/docs/man-openmpi/man3/MPI_Neighbor_allgather.3.rst new file mode 100644 index 00000000000..33411bcce9e --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Neighbor_allgather.3.rst @@ -0,0 +1,181 @@ +.. _mpi_neighbor_allgather: + + +MPI_Neighbor_allgather +====================== + +.. include_body + +:ref:`MPI_Neighbor_allgather`, :ref:`MPI_Ineighbor_allgather`, +:ref:`MPI_Neighbor_allgather` - Gathers and distributes data from and to all +neighbors + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Neighbor_allgather(const void *sendbuf, int sendcount, + MPI_Datatype sendtype, void *recvbuf, int recvcount, + MPI_Datatype recvtype, MPI_Comm comm) + + int MPI_Ineighbor_allgather(const void *sendbuf, int sendcount, + MPI_Datatype sendtype, void *recvbuf, int recvcount, + MPI_Datatype recvtype, MPI_Comm comm, MPI_Request req) + + int MPI_Neighbor_allgather_init(const void *sendbuf, int sendcount, + MPI_Datatype sendtype, void *recvbuf, int recvcount, + MPI_Datatype recvtype, MPI_Comm comm, MPI_Infoinfo, MPI_Request req) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_NEIGHBOR_ALLGATHER(SENDBUF, SENDCOUNT, SENDTYPE, RECVBUF, RECVCOUNT, + RECVTYPE, COMM, IERROR) + SENDBUF (*), RECVBUF (*) + INTEGER SENDCOUNT, SENDTYPE, RECVCOUNT, RECVTYPE, COMM, + INTEGER IERROR + + MPI_INEIGHBOR_ALLGATHER(SENDBUF, SENDCOUNT, SENDTYPE, RECVBUF, RECVCOUNT, + RECVTYPE, COMM, REQUEST, IERROR) + SENDBUF (*), RECVBUF (*) + INTEGER SENDCOUNT, SENDTYPE, RECVCOUNT, RECVTYPE, COMM, + INTEGER REQUEST, IERROR + + MPI_NEIGHBOR_ALLGATHER_INIT(SENDBUF, SENDCOUNT, SENDTYPE, RECVBUF, RECVCOUNT, + RECVTYPE, COMM, INFO, IREQUEST, IERROR) + SENDBUF (*), RECVBUF (*) + INTEGER SENDCOUNT, SENDTYPE, RECVCOUNT, RECVTYPE, COMM, + INTEGER INFO, REQUEST, IERROR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Neighbor_allgather(sendbuf, sendcount, sendtype, recvbuf, recvcount, + recvtype, comm, ierror) + TYPE(*), DIMENSION(..), INTENT(IN) :: sendbuf + TYPE(*), DIMENSION(..) :: recvbuf + INTEGER, INTENT(IN) :: sendcount, recvcount + TYPE(MPI_Datatype), INTENT(IN) :: sendtype, recvtype + TYPE(MPI_Comm), INTENT(IN) :: comm + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + MPI_Ineighbor_allgather(sendbuf, sendcount, sendtype, recvbuf, recvcount, + recvtype, comm, request, ierror) + TYPE(*), DIMENSION(..), INTENT(IN), ASYNCHRONOUS :: sendbuf + TYPE(*), DIMENSION(..), ASYNCHRONOUS :: recvbuf + INTEGER, INTENT(IN) :: sendcount, recvcount + TYPE(MPI_Datatype), INTENT(IN) :: sendtype, recvtype + TYPE(MPI_Comm), INTENT(IN) :: comm + TYPE(MPI_Request), INTENT(OUT) :: request + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + MPI_Neighbor_allgather_init(sendbuf, sendcount, sendtype, recvbuf, recvcount, + recvtype, comm, info, request, ierror) + TYPE(*), DIMENSION(..), INTENT(IN), ASYNCHRONOUS :: sendbuf + TYPE(*), DIMENSION(..), ASYNCHRONOUS :: recvbuf + INTEGER, INTENT(IN) :: sendcount, recvcount + TYPE(MPI_Datatype), INTENT(IN) :: sendtype, recvtype + TYPE(MPI_Comm), INTENT(IN) :: comm + TYPE(MPI_Info), INTENT(IN) :: info + TYPE(MPI_Request), INTENT(OUT) :: request + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETERS +---------------- +* ``sendbuf``: Starting address of send buffer (choice). +* ``sendcount``: Number of elements in send buffer (integer). +* ``sendtype``: Datatype of send buffer elements (handle). +* ``recvbuf``: Starting address of recv buffer (choice). +* ``recvcount``: Number of elements received from any process (integer). +* ``recvtype``: Datatype of receive buffer elements (handle). +* ``comm``: Communicator (handle). + +OUTPUT PARAMETERS +----------------- +* ``recvbuf``: Address of receive buffer (choice). +* ``request``: Request (handle, non-blocking only). +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +:ref:`MPI_Neighbor_allgather` is similar to :ref:`MPI_Allgather`, except that only the +neighboring processes receive the result, instead of all processes. The +neighbors and buffer layout is determined by the topology of *comm*. + +The type signature associated with sendcount, sendtype at a process must +be equal to the type signature associated with recvcount, recvtype at +any other process. + + +NEIGHBOR ORDERING +----------------- + +For a distributed graph topology, created with :ref:`MPI_Dist_graph_create`, +the sequence of neighbors in the send and receive buffers at each +process is defined as the sequence returned by :ref:`MPI_Dist_graph_neighbors` +for destinations and sources, respectively. For a general graph +topology, created with :ref:`MPI_Graph_create`, the order of neighbors in the +send and receive buffers is defined as the sequence of neighbors as +returned by :ref:`MPI_Graph_neighbors`. Note that general graph topologies +should generally be replaced by the distributed graph topologies. + +For a Cartesian topology, created with :ref:`MPI_Cart_create`, the sequence of +neighbors in the send and receive buffers at each process is defined by +order of the dimensions, first the neighbor in the negative direction +and then in the positive direction with displacement 1. The numbers of +sources and destinations in the communication routines are 2*ndims with +ndims defined in :ref:`MPI_Cart_create`. If a neighbor does not exist, i.e., at +the border of a Cartesian topology in the case of a non-periodic virtual +grid dimension (i.e., periods[...]==false), then this neighbor is +defined to be MPI_PROC_NULL. + +If a neighbor in any of the functions is MPI_PROC_NULL, then the +neighborhood collective communication behaves like a point-to-point +communication with MPI_PROC_NULL in this direction. That is, the buffer +is still part of the sequence of neighbors but it is neither +communicated nor updated. + + +NOTES +----- + +The MPI_IN_PLACE option for *sendbuf* is not meaningful for this +operation. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. + + +.. seealso:: + :ref:`MPI_Neighbor_allgatherv` :ref:`MPI_Cart_create` MPI_Garph_create + :ref:`MPI_Dist_graph_create` :ref:`MPI_Gather` diff --git a/docs/man-openmpi/man3/MPI_Neighbor_allgather_init.3.rst b/docs/man-openmpi/man3/MPI_Neighbor_allgather_init.3.rst new file mode 100644 index 00000000000..12a5d2b7fab --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Neighbor_allgather_init.3.rst @@ -0,0 +1,9 @@ +.. _mpi_neighbor_allgather_init: + +MPI_Neighbor_allgather_init +=========================== + .. include_body + +.. include:: ../man3/MPI_Neighbor_allgather.3.rst + :start-after: .. include_body + diff --git a/docs/man-openmpi/man3/MPI_Neighbor_allgatherv.3.rst b/docs/man-openmpi/man3/MPI_Neighbor_allgatherv.3.rst new file mode 100644 index 00000000000..d0e59b8329a --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Neighbor_allgatherv.3.rst @@ -0,0 +1,190 @@ +.. _mpi_neighbor_allgatherv: + + +MPI_Neighbor_allgatherv +======================= + +.. include_body + +:ref:`MPI_Neighbor_allgatherv`, :ref:`MPI_Ineighbor_allgatherv`, +:ref:`MPI_Neighbor_allgatherv_init` - Gathers and distributes data from and +to all neighbors. Each process may contribute a different amount of +data. + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Neighbor_allgatherv(const void *sendbuf, int sendcount, + MPI_Datatype sendtype, void *recvbuf, const int recvcounts[], + const int displs[], MPI_Datatype recvtype, MPI_Comm comm) + + int MPI_Ineighbor_allgatherv(const void *sendbuf, int sendcount, + MPI_Datatype sendtype, void *recvbuf, const int recvcounts[], + const int displs[], MPI_Datatype recvtype, MPI_Comm comm, + MPI_Request *request) + + int MPI_Neighbor_allgatherv(const void *sendbuf, int sendcount, + MPI_Datatype sendtype, void *recvbuf, const int recvcounts[], + const int displs[], MPI_Datatype recvtype, MPI_Comm comm, + MPI_Info info, MPI_Request *request) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_NEIGHBOR_ALLGATHERV(SENDBUF, SENDCOUNT, SENDTYPE, RECVBUF, + RECVCOUNT, DISPLS, RECVTYPE, COMM, IERROR) + SENDBUF(*), RECVBUF(*) + INTEGER SENDCOUNT, SENDTYPE, RECVCOUNT(*), + INTEGER DISPLS(*), RECVTYPE, COMM, IERROR + + MPI_INEIGHBOR_ALLGATHERV(SENDBUF, SENDCOUNT, SENDTYPE, RECVBUF, + RECVCOUNT, DISPLS, RECVTYPE, COMM, REQUEST, IERROR) + SENDBUF(*), RECVBUF(*) + INTEGER SENDCOUNT, SENDTYPE, RECVCOUNT(*), + INTEGER DISPLS(*), RECVTYPE, COMM,REQUEST, IERROR + + MPI_NEIGHBOR_ALLGATHERV_INIT(SENDBUF, SENDCOUNT, SENDTYPE, RECVBUF, + RECVCOUNT, DISPLS, RECVTYPE, COMM, INFO, REQUEST, IERROR) + SENDBUF(*), RECVBUF(*) + INTEGER SENDCOUNT, SENDTYPE, RECVCOUNT(*), + INTEGER DISPLS(*), RECVTYPE, COMM,INFO,REQUEST, IERROR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Neighbor_allgatherv(sendbuf, sendcount, sendtype, recvbuf, recvcounts, + displs, recvtype, comm, ierror) + TYPE(*), DIMENSION(..), INTENT(IN) :: sendbuf + TYPE(*), DIMENSION(..) :: recvbuf + INTEGER, INTENT(IN) :: sendcount, recvcounts(*), displs(*) + TYPE(MPI_Datatype), INTENT(IN) :: sendtype, recvtype + TYPE(MPI_Comm), INTENT(IN) :: comm + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + MPI_Ineighbor_allgatherv(sendbuf, sendcount, sendtype, recvbuf, recvcounts, + displs, recvtype, comm, request, ierror) + TYPE(*), DIMENSION(..), INTENT(IN), ASYNCHRONOUS :: sendbuf + TYPE(*), DIMENSION(..), ASYNCHRONOUS :: recvbuf + INTEGER, INTENT(IN) :: sendcount + INTEGER, INTENT(IN), ASYNCHRONOUS :: recvcounts(*), displs(*) + TYPE(MPI_Datatype), INTENT(IN) :: sendtype, recvtype + TYPE(MPI_Comm), INTENT(IN) :: comm + TYPE(MPI_Request), INTENT(OUT) :: request + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + MPI_Neighbor_allgatherv_init(sendbuf, sendcount, sendtype, recvbuf, recvcounts, + displs, recvtype, comm, info, request, ierror) + TYPE(*), DIMENSION(..), INTENT(IN), ASYNCHRONOUS :: sendbuf + TYPE(*), DIMENSION(..), ASYNCHRONOUS :: recvbuf + INTEGER, INTENT(IN) :: sendcount + INTEGER, INTENT(IN), ASYNCHRONOUS :: recvcounts(*), displs(*) + TYPE(MPI_Datatype), INTENT(IN) :: sendtype, recvtype + TYPE(MPI_Comm), INTENT(IN) :: comm + TYPE(MPI_Info), INTENT(IN) :: info + TYPE(MPI_Request), INTENT(OUT) :: request + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETERS +---------------- +* ``sendbuf``: Starting address of send buffer (choice). +* ``sendcount``: Number of elements in send buffer (integer). +* ``sendtype``: Datatype of send buffer elements (handle). +* ``recvcount``: Integer array (of length group size) containing the number of elements that are received from each neighbor. +* ``displs``: Integer array (of length group size). Entry i specifies the displacement (relative to recvbuf) at which to place the incoming data from neighbor i. +* ``recvtype``: Datatype of receive buffer elements (handle). +* ``comm``: Communicator (handle). +* ``info Info (handle, persistent only).``: + +OUTPUT PARAMETERS +----------------- +* ``recvbuf``: Address of receive buffer (choice). +* ``request``: Request (handle, non-blocking only). +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +:ref:`MPI_Neighbor_allgatherv` is similar to :ref:`MPI_Neighbor_allgather` in that all +processes gather data from all neighbors, except that each process can +send a different amount of data. The block of data sent from the jth +neighbor is received by every neighbor and placed in the jth block of +the buffer. The neighbors and buffer layout is determined by the +topology of *comm*. *recvbuf.* + +The type signature associated with sendcount, sendtype, at process j +must be equal to the type signature associated with the corresponding +entry in *recvcounts* on neighboring processes. + + +NEIGHBOR ORDERING +----------------- + +For a distributed graph topology, created with :ref:`MPI_Dist_graph_create`, +the sequence of neighbors in the send and receive buffers at each +process is defined as the sequence returned by :ref:`MPI_Dist_graph_neighbors` +for destinations and sources, respectively. For a general graph +topology, created with :ref:`MPI_Graph_create`, the order of neighbors in the +send and receive buffers is defined as the sequence of neighbors as +returned by :ref:`MPI_Graph_neighbors`. Note that general graph topologies +should generally be replaced by the distributed graph topologies. + +For a Cartesian topology, created with :ref:`MPI_Cart_create`, the sequence of +neighbors in the send and receive buffers at each process is defined by +order of the dimensions, first the neighbor in the negative direction +and then in the positive direction with displacement 1. The numbers of +sources and destinations in the communication routines are 2*ndims with +ndims defined in :ref:`MPI_Cart_create`. If a neighbor does not exist, i.e., at +the border of a Cartesian topology in the case of a non-periodic virtual +grid dimension (i.e., periods[...]==false), then this neighbor is +defined to be MPI_PROC_NULL. + +If a neighbor in any of the functions is MPI_PROC_NULL, then the +neighborhood collective communication behaves like a point-to-point +communication with MPI_PROC_NULL in this direction. That is, the buffer +is still part of the sequence of neighbors but it is neither +communicated nor updated. + + +NOTES +----- + +The MPI_IN_PLACE option for *sendbuf* is not meaningful for this +operation. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. + + +.. seealso:: + :ref:`MPI_Neighbor_allgather` :ref:`MPI_Cart_create` :ref:`MPI_Graph_create` + :ref:`MPI_Dist_graph_create` diff --git a/docs/man-openmpi/man3/MPI_Neighbor_allgatherv_init.3.rst b/docs/man-openmpi/man3/MPI_Neighbor_allgatherv_init.3.rst new file mode 100644 index 00000000000..120264a877c --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Neighbor_allgatherv_init.3.rst @@ -0,0 +1,9 @@ +.. _mpi_neighbor_allgatherv_init: + +MPI_Neighbor_allgatherv_init +============================ + .. include_body + +.. include:: ../man3/MPI_Neighbor_allgatherv.3.rst + :start-after: .. include_body + diff --git a/docs/man-openmpi/man3/MPI_Neighbor_alltoall.3.rst b/docs/man-openmpi/man3/MPI_Neighbor_alltoall.3.rst new file mode 100644 index 00000000000..8a277634292 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Neighbor_alltoall.3.rst @@ -0,0 +1,224 @@ +.. _mpi_neighbor_alltoall: + + +MPI_Neighbor_alltoall +===================== + +.. include_body + +:ref:`MPI_Neighbor_alltoall`, :ref:`MPI_Ineighbor_alltoall`, :ref:`MPI_Neighbor_alltoall` - All processes send data to neighboring processes in a virtual topology communicator + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Neighbor_alltoall(const void *sendbuf, int sendcount, + MPI_Datatype sendtype, void *recvbuf, int recvcount, + MPI_Datatype recvtype, MPI_Comm comm) + + int MPI_Ineighbor_alltoall(const void *sendbuf, int sendcount, + MPI_Datatype sendtype, void *recvbuf, int recvcount, + MPI_Datatype recvtype, MPI_Comm comm, MPI_Request *request) + + int MPI_Neighbor_alltoall_init(const void *sendbuf, int sendcount, + MPI_Datatype sendtype, void *recvbuf, int recvcount, + MPI_Datatype recvtype, MPI_Comm comm, MPI_Info info, MPI_Request *request) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_NEIGHBOR_ALLTOALL(SENDBUF, SENDCOUNT, SENDTYPE, RECVBUF, RECVCOUNT, + RECVTYPE, COMM, IERROR) + + SENDBUF(*), RECVBUF(*) + INTEGER SENDCOUNT, SENDTYPE, RECVCOUNT, RECVTYPE + INTEGER COMM, IERROR + + MPI_INEIGHBOR_ALLTOALL(SENDBUF, SENDCOUNT, SENDTYPE, RECVBUF, RECVCOUNT, + RECVTYPE, COMM, REQUEST, IERROR) + + SENDBUF(*), RECVBUF(*) + INTEGER SENDCOUNT, SENDTYPE, RECVCOUNT, RECVTYPE + INTEGER COMM, REQUEST, IERROR + + MPI_NEIGHBOR_ALLTOALL_INIT(SENDBUF, SENDCOUNT, SENDTYPE, RECVBUF, RECVCOUNT, + RECVTYPE, COMM, INFO, REQUEST, IERROR) + + SENDBUF(*), RECVBUF(*) + INTEGER SENDCOUNT, SENDTYPE, RECVCOUNT, RECVTYPE + INTEGER COMM, INFO, REQUEST, IERROR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Neighbor_alltoall(sendbuf, sendcount, sendtype, recvbuf, recvcount, + recvtype, comm, ierror) + + TYPE(*), DIMENSION(..), INTENT(IN) :: sendbuf + TYPE(*), DIMENSION(..) :: recvbuf + INTEGER, INTENT(IN) :: sendcount, recvcount + TYPE(MPI_Datatype), INTENT(IN) :: sendtype, recvtype + TYPE(MPI_Comm), INTENT(IN) :: comm + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + MPI_Ineighbor_alltoall(sendbuf, sendcount, sendtype, recvbuf, recvcount, + recvtype, comm, request, ierror) + + TYPE(*), DIMENSION(..), INTENT(IN), ASYNCHRONOUS :: sendbuf + TYPE(*), DIMENSION(..), ASYNCHRONOUS :: recvbuf + INTEGER, INTENT(IN) :: sendcount, recvcount + TYPE(MPI_Datatype), INTENT(IN) :: sendtype, recvtype + TYPE(MPI_Comm), INTENT(IN) :: comm + TYPE(MPI_Request), INTENT(OUT) :: request + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + MPI_Neighbor_alltoall_init(sendbuf, sendcount, sendtype, recvbuf, recvcount, + recvtype, comm, info, request, ierror) + + TYPE(*), DIMENSION(..), INTENT(IN), ASYNCHRONOUS :: sendbuf + TYPE(*), DIMENSION(..), ASYNCHRONOUS :: recvbuf + INTEGER, INTENT(IN) :: sendcount, recvcount + TYPE(MPI_Datatype), INTENT(IN) :: sendtype, recvtype + TYPE(MPI_Comm), INTENT(IN) :: comm + TYPE(MPI_Info), INTENT(IN) :: info + TYPE(MPI_Request), INTENT(OUT) :: request + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETERS +---------------- +* ``sendbuf``: Starting address of send buffer (choice). +* ``sendcount``: Number of elements to send to each process (integer). +* ``sendtype``: Datatype of send buffer elements (handle). +* ``recvcount``: Number of elements to receive from each process (integer). +* ``recvtype``: Datatype of receive buffer elements (handle). +* ``comm``: Communicator over which data is to be exchanged (handle). +* ``info``: Info (handle, persistent only). + +OUTPUT PARAMETERS +----------------- +* ``recvbuf``: Starting address of receive buffer (choice). +* ``request``: Request (handle, non-blocking only). +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +:ref:`MPI_Neighbor_alltoall` is a collective operation in which all processes +send and receive the same amount of data to each neighbor. The operation +of this routine can be represented as follows, where each process +performs 2n (n being the number of neighbors in communicator *comm*) +independent point-to-point communications. The neighbors and buffer +layout are determined by the topology of *comm*. + +Example of :ref:`MPI_Neighbor_alltoall` semantics for cartesian topologies: + +:: + + MPI_Cart_get(comm, maxdims, dims, periods, coords); + for (dim = 0, i = 0 ; dim < dims ; ++dim) { + MPI_Cart_shift(comm, dim, 1, &r0, &r1); + MPI_Isend(sendbuf + i * sendcount * extent(sendtype), + sendcount, sendtype, r0, ..., comm, ...); + MPI_Irecv(recvbuf + i * recvcount * extent(recvtype), + recvcount, recvtype, r0, ..., comm, ...); + ++i; + MPI_Isend(sendbuf + i * sendcount * extent(sendtype), + sendcount, sendtype, r1, ..., comm, &req[i]); + MPI_Irecv(recvbuf + i * recvcount * extent(recvtype), + recvcount, recvtype, r1, ..., comm, ...); + ++i; + } + + MPI_Waitall (...); + +Each process breaks up its local *sendbuf* into n blocks - each +containing *sendcount* elements of type *sendtype* - and divides its +*recvbuf* similarly according to *recvcount* and *recvtype*. Process j +sends the k-th block of its local *sendbuf* to neighbor k, which places +the data in the j-th block of its local *recvbuf*. The amount of data +sent must be equal to the amount of data received, pairwise, between +every pair of processes. + + +NEIGHBOR ORDERING +----------------- + +For a distributed graph topology, created with :ref:`MPI_Dist_graph_create`, +the sequence of neighbors in the send and receive buffers at each +process is defined as the sequence returned by :ref:`MPI_Dist_graph_neighbors` +for destinations and sources, respectively. For a general graph +topology, created with :ref:`MPI_Graph_create`, the order of neighbors in the +send and receive buffers is defined as the sequence of neighbors as +returned by :ref:`MPI_Graph_neighbors`. Note that general graph topologies +should generally be replaced by the distributed graph topologies. + +For a Cartesian topology, created with :ref:`MPI_Cart_create`, the sequence of +neighbors in the send and receive buffers at each process is defined by +order of the dimensions, first the neighbor in the negative direction +and then in the positive direction with displacement 1. The numbers of +sources and destinations in the communication routines are 2*ndims with +ndims defined in :ref:`MPI_Cart_create`. If a neighbor does not exist, i.e., at +the border of a Cartesian topology in the case of a non-periodic virtual +grid dimension (i.e., periods[...]==false), then this neighbor is +defined to be MPI_PROC_NULL. + +If a neighbor in any of the functions is MPI_PROC_NULL, then the +neighborhood collective communication behaves like a point-to-point +communication with MPI_PROC_NULL in this direction. That is, the buffer +is still part of the sequence of neighbors but it is neither +communicated nor updated. + + +NOTES +----- + +The MPI_IN_PLACE option for *sendbuf* is not meaningful for this +function. + +All arguments on all processes are significant. The *comm* argument, in +particular, must describe the same communicator on all processes. *comm* +must be either a cartesian, graph, or dist graph communicator. + +There are two MPI library functions that are more general than +:ref:`MPI_Neighbor_alltoall`. :ref:`MPI_Neighbor_alltoallv` allows all-to-all +communication to and from buffers that need not be contiguous; different +processes may send and receive different amounts of data. +:ref:`MPI_Neighbor_alltoallw` expands :ref:`MPI_Neighbor_alltoallv`'s functionality to +allow the exchange of data with different datatypes. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. + + +.. seealso:: + :ref:`MPI_Neighbor_alltoallv` :ref:`MPI_Neighbor_alltoallw` :ref:`MPI_Cart_create` + :ref:`MPI_Graph_create` :ref:`MPI_Dist_graph_create` :ref:`MPI_Dist_graph_create_adjacent` diff --git a/docs/man-openmpi/man3/MPI_Neighbor_alltoall_init.3.rst b/docs/man-openmpi/man3/MPI_Neighbor_alltoall_init.3.rst new file mode 100644 index 00000000000..b199671db87 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Neighbor_alltoall_init.3.rst @@ -0,0 +1,9 @@ +.. _mpi_neighbor_alltoall_init: + +MPI_Neighbor_alltoall_init +========================== + .. include_body + +.. include:: ../man3/MPI_Neighbor_alltoall.3.rst + :start-after: .. include_body + diff --git a/docs/man-openmpi/man3/MPI_Neighbor_alltoallv.3.rst b/docs/man-openmpi/man3/MPI_Neighbor_alltoallv.3.rst new file mode 100644 index 00000000000..5c3f4ee40ad --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Neighbor_alltoallv.3.rst @@ -0,0 +1,240 @@ +.. _mpi_neighbor_alltoallv: + + +MPI_Neighbor_alltoallv +====================== + +.. include_body + +:ref:`MPI_Neighbor_alltoallv`, :ref:`MPI_Ineighbor_alltoallv`, +:ref:`MPI_Neighbor_alltoallv_init` - All processes send different amounts of +data to, and receive different amounts of data from, all neighbors + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Neighbor_alltoallv(const void *sendbuf, const int sendcounts[], + const int sdispls[], MPI_Datatype sendtype, + void *recvbuf, const int recvcounts[], + const int rdispls[], MPI_Datatype recvtype, MPI_Comm comm) + + int MPI_Ineighbor_alltoallv(const void *sendbuf, const int sendcounts[], + const int sdispls[], MPI_Datatype sendtype, + void *recvbuf, const int recvcounts[], + const int rdispls[], MPI_Datatype recvtype, MPI_Comm comm, + MPI_Request *request) + + int MPI_Neighbor_alltoallv_init(const void *sendbuf, const int sendcounts[], + const int sdispls[], MPI_Datatype sendtype, + void *recvbuf, const int recvcounts[], + const int rdispls[], MPI_Datatype recvtype, MPI_Comm comm, + MPI_Info info, MPI_Request *request) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_NEIGHBOR_ALLTOALLV(SENDBUF, SENDCOUNTS, SDISPLS, SENDTYPE, + RECVBUF, RECVCOUNTS, RDISPLS, RECVTYPE, COMM, IERROR) + + SENDBUF(*), RECVBUF(*) + INTEGER SENDCOUNTS(*), SDISPLS(*), SENDTYPE + INTEGER RECVCOUNTS(*), RDISPLS(*), RECVTYPE + INTEGER COMM, IERROR + + MPI_INEIGHBOR_ALLTOALLV(SENDBUF, SENDCOUNTS, SDISPLS, SENDTYPE, + RECVBUF, RECVCOUNTS, RDISPLS, RECVTYPE, COMM, REQUEST, IERROR) + + SENDBUF(*), RECVBUF(*) + INTEGER SENDCOUNTS(*), SDISPLS(*), SENDTYPE + INTEGER RECVCOUNTS(*), RDISPLS(*), RECVTYPE + INTEGER COMM, REQUEST, IERROR + + MPI_NEIGHBOR_ALLTOALLV_INIT(SENDBUF, SENDCOUNTS, SDISPLS, SENDTYPE, + RECVBUF, RECVCOUNTS, RDISPLS, RECVTYPE, COMM, INFO, REQUEST, IERROR) + + SENDBUF(*), RECVBUF(*) + INTEGER SENDCOUNTS(*), SDISPLS(*), SENDTYPE + INTEGER RECVCOUNTS(*), RDISPLS(*), RECVTYPE + INTEGER COMM, INFO, REQUEST, IERROR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Neighbor_alltoallv(sendbuf, sendcounts, sdispls, sendtype, recvbuf, + recvcounts, rdispls, recvtype, comm, ierror) + + TYPE(*), DIMENSION(..), INTENT(IN) :: sendbuf + TYPE(*), DIMENSION(..) :: recvbuf + INTEGER, INTENT(IN) :: sendcounts(*), sdispls(*), recvcounts(*), + rdispls(*) + TYPE(MPI_Datatype), INTENT(IN) :: sendtype, recvtype + TYPE(MPI_Comm), INTENT(IN) :: comm + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + MPI_Ineighbor_alltoallv(sendbuf, sendcounts, sdispls, sendtype, recvbuf, + recvcounts, rdispls, recvtype, comm, request, ierror) + + TYPE(*), DIMENSION(..), INTENT(IN), ASYNCHRONOUS :: sendbuf + TYPE(*), DIMENSION(..), ASYNCHRONOUS :: recvbuf + INTEGER, INTENT(IN), ASYNCHRONOUS :: sendcounts(*), sdispls(*), + recvcounts(*), rdispls(*) + TYPE(MPI_Datatype), INTENT(IN) :: sendtype, recvtype + TYPE(MPI_Comm), INTENT(IN) :: comm + TYPE(MPI_Request), INTENT(OUT) :: request + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + MPI_Neighbor_alltoallv_init(sendbuf, sendcounts, sdispls, sendtype, recvbuf, + recvcounts, rdispls, recvtype, comm, info, request, ierror) + + TYPE(*), DIMENSION(..), INTENT(IN), ASYNCHRONOUS :: sendbuf + TYPE(*), DIMENSION(..), ASYNCHRONOUS :: recvbuf + INTEGER, INTENT(IN), ASYNCHRONOUS :: sendcounts(*), sdispls(*), + recvcounts(*), rdispls(*) + TYPE(MPI_Datatype), INTENT(IN) :: sendtype, recvtype + TYPE(MPI_Comm), INTENT(IN) :: comm + TYPE(MPI_Info), INTENT(IN) :: info + TYPE(MPI_Request), INTENT(OUT) :: request + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETERS +---------------- +* ``sendbuf``: Starting address of send buffer. +* ``sendcounts``: Integer array, where entry i specifies the number of elements to send to neighbor i. +* ``sdispls``: Integer array, where entry i specifies the displacement (offset from *sendbuf*, in units of *sendtype*) from which to send data to neighbor i. +* ``sendtype``: Datatype of send buffer elements. +* ``recvcounts``: Integer array, where entry j specifies the number of elements to receive from neighbor j. +* ``rdispls``: Integer array, where entry j specifies the displacement (offset from *recvbuf*, in units of *recvtype*) to which data from neighbor j should be written. +* ``recvtype``: Datatype of receive buffer elements. +* ``comm``: Communicator over which data is to be exchanged. +* ``info``: Info (handle, persistent only). + +OUTPUT PARAMETERS +----------------- +* ``recvbuf``: Address of receive buffer. +* ``request``: Request (handle, non-blocking only). +* ``IERROR``: Fortran only: Error status. + +DESCRIPTION +----------- + +:ref:`MPI_Neighbor_alltoallv` is a generalized collective operation in which +all processes send data to and receive data from all neighbors. It adds +flexibility to :ref:`MPI_Neighbor_alltoall` by allowing the user to specify +data to send and receive vector-style (via a displacement and element +count). The operation of this routine can be thought of as follows, +where each process performs 2n (n being the number of neighbors in to +topology of communicator *comm*) independent point-to-point +communications. The neighbors and buffer layout are determined by the +topology of *comm*. + +:: + + MPI_Cart_get(comm, maxdims, dims, periods, coords); + for (dim = 0, i = 0 ; dim < dims ; ++dim) { + MPI_Cart_shift(comm, dim, 1, &r0, &r1); + MPI_Isend(sendbuf + sdispls[i] * extent(sendtype), + sendcount, sendtype, r0, ..., comm, ...); + MPI_Irecv(recvbuf + rdispls[i] * extent(recvtype), + recvcount, recvtype, r0, ..., comm, ...); + ++i; + MPI_Isend(sendbuf + sdispls[i] * extent(sendtype), + sendcount, sendtype, r1, ..., comm, &req[i]); + MPI_Irecv(recvbuf + rdispls[i] * extent(recvtype), + recvcount, recvtype, r1, ..., comm, ...); + ++i; + } + +Process j sends the k-th block of its local *sendbuf* to neighbor k, +which places the data in the j-th block of its local *recvbuf*. + +When a pair of processes exchanges data, each may pass different element +count and datatype arguments so long as the sender specifies the same +amount of data to send (in bytes) as the receiver expects to receive. + +Note that process i may send a different amount of data to process j +than it receives from process j. Also, a process may send entirely +different amounts of data to different processes in the communicator. + + +NEIGHBOR ORDERING +----------------- + +For a distributed graph topology, created with :ref:`MPI_Dist_graph_create`, +the sequence of neighbors in the send and receive buffers at each +process is defined as the sequence returned by :ref:`MPI_Dist_graph_neighbors` +for destinations and sources, respectively. For a general graph +topology, created with :ref:`MPI_Graph_create`, the order of neighbors in the +send and receive buffers is defined as the sequence of neighbors as +returned by :ref:`MPI_Graph_neighbors`. Note that general graph topologies +should generally be replaced by the distributed graph topologies. + +For a Cartesian topology, created with :ref:`MPI_Cart_create`, the sequence of +neighbors in the send and receive buffers at each process is defined by +order of the dimensions, first the neighbor in the negative direction +and then in the positive direction with displacement 1. The numbers of +sources and destinations in the communication routines are 2*ndims with +ndims defined in :ref:`MPI_Cart_create`. If a neighbor does not exist, i.e., at +the border of a Cartesian topology in the case of a non-periodic virtual +grid dimension (i.e., periods[...]==false), then this neighbor is +defined to be MPI_PROC_NULL. + +If a neighbor in any of the functions is MPI_PROC_NULL, then the +neighborhood collective communication behaves like a point-to-point +communication with MPI_PROC_NULL in this direction. That is, the buffer +is still part of the sequence of neighbors but it is neither +communicated nor updated. + + +NOTES +----- + +The MPI_IN_PLACE option for *sendbuf* is not meaningful for this +operation. + +The specification of counts and displacements should not cause any +location to be written more than once. + +All arguments on all processes are significant. The *comm* argument, in +particular, must describe the same communicator on all processes. + +The offsets of *sdispls* and *rdispls* are measured in units of +*sendtype* and *recvtype*, respectively. Compare this to +:ref:`MPI_Neighbor_alltoallw`, where these offsets are measured in bytes. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. + + +.. seealso:: + :ref:`MPI_Neighbor_alltoall` :ref:`MPI_Neighbor_alltoallw` :ref:`MPI_Cart_create` + :ref:`MPI_Graph_create` :ref:`MPI_Dist_graph_create` diff --git a/docs/man-openmpi/man3/MPI_Neighbor_alltoallv_init.3.rst b/docs/man-openmpi/man3/MPI_Neighbor_alltoallv_init.3.rst new file mode 100644 index 00000000000..74425737112 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Neighbor_alltoallv_init.3.rst @@ -0,0 +1,9 @@ +.. _mpi_neighbor_alltoallv_init: + +MPI_Neighbor_alltoallv_init +=========================== + .. include_body + +.. include:: ../man3/MPI_Neighbor_alltoallv.3.rst + :start-after: .. include_body + diff --git a/docs/man-openmpi/man3/MPI_Neighbor_alltoallw.3.rst b/docs/man-openmpi/man3/MPI_Neighbor_alltoallw.3.rst new file mode 100644 index 00000000000..26fd3b5f03e --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Neighbor_alltoallw.3.rst @@ -0,0 +1,227 @@ +.. _mpi_neighbor_alltoallw: + + +MPI_Neighbor_alltoallw +====================== + +.. include_body + +:ref:`MPI_Neighbor_alltoallw`, :ref:`MPI_Ineighbor_alltoallw`, +:ref:`MPI_Neighbor_alltoallw_init` - All processes send data of different +types to, and receive data of different types from, all processes + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Neighbor_alltoallw(const void *sendbuf, const int sendcounts[], + const MPI_Aint sdispls[], const MPI_Datatype sendtypes[], + void *recvbuf, const int recvcounts[], const MPI_Aint rdispls[], + const MPI_Datatype recvtypes[], MPI_Comm comm) + + int MPI_Ineighbor_alltoallw(const void *sendbuf, const int sendcounts[], + const MPI_Aint sdispls[], const MPI_Datatype sendtypes[], + void *recvbuf, const int recvcounts[], const MPI_Aint rdispls[], + const MPI_Datatype recvtypes[], MPI_Comm comm, MPI_Request *request) + + int MPI_Neighbor_alltoallw_init(const void *sendbuf, const int sendcounts[], + const MPI_Aint sdispls[], const MPI_Datatype sendtypes[], + void *recvbuf, const int recvcounts[], const MPI_Aint rdispls[], + const MPI_Datatype recvtypes[], MPI_Comm comm, MPI_Info info, MPI_Request *request) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_NEIGHBOR_ALLTOALLW(SENDBUF, SENDCOUNTS, SDISPLS, SENDTYPES, + RECVBUF, RECVCOUNTS, RDISPLS, RECVTYPES, COMM, IERROR) + + SENDBUF(*), RECVBUF(*) + INTEGER SENDCOUNTS(*), SENDTYPES(*) + INTEGER RECVCOUNTS(*), RECVTYPES(*) + INTEGER(KIND=MPI_ADDRESS_KIND) SDISPLS(*), RDISPLS(*) + INTEGER COMM, IERROR + + MPI_INEIGHBOR_ALLTOALLW(SENDBUF, SENDCOUNTS, SDISPLS, SENDTYPES, + RECVBUF, RECVCOUNTS, RDISPLS, RECVTYPES, COMM, REQUEST, IERROR) + + SENDBUF(*), RECVBUF(*) + INTEGER SENDCOUNTS(*), SENDTYPES(*) + INTEGER RECVCOUNTS(*), RECVTYPES(*) + INTEGER(KIND=MPI_ADDRESS_KIND) SDISPLS(*), RDISPLS(*) + INTEGER COMM, REQUEST, IERROR + + MPI_NEIGHBOR_ALLTOALLW_INIT(SENDBUF, SENDCOUNTS, SDISPLS, SENDTYPES, + RECVBUF, RECVCOUNTS, RDISPLS, RECVTYPES, COMM, INFO, REQUEST, IERROR) + + SENDBUF(*), RECVBUF(*) + INTEGER SENDCOUNTS(*), SENDTYPES(*) + INTEGER RECVCOUNTS(*), RECVTYPES(*) + INTEGER(KIND=MPI_ADDRESS_KIND) SDISPLS(*), RDISPLS(*) + INTEGER COMM, INFO, REQUEST, IERROR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Neighbor_alltoallw(sendbuf, sendcounts, sdispls, sendtypes, recvbuf, + recvcounts, rdispls, recvtypes, comm, ierror) + + TYPE(*), DIMENSION(..), INTENT(IN) :: sendbuf + TYPE(*), DIMENSION(..) :: recvbuf + INTEGER, INTENT(IN) :: sendcounts(*), recvcounts(*) + INTEGER(KIND=MPI_ADDRESS_KIND), INTENT(IN) :: sdispls(*), rdispls(*) + TYPE(MPI_Datatype), INTENT(IN) :: sendtypes(*), recvtypes(*) + TYPE(MPI_Comm), INTENT(IN) :: comm + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + MPI_Ineighbor_alltoallw(sendbuf, sendcounts, sdispls, sendtypes, recvbuf, + recvcounts, rdispls, recvtypes, comm, request, ierror) + + TYPE(*), DIMENSION(..), INTENT(IN), ASYNCHRONOUS :: sendbuf + TYPE(*), DIMENSION(..), ASYNCHRONOUS :: recvbuf + INTEGER, INTENT(IN), ASYNCHRONOUS :: sendcounts(*), recvcounts(*) + INTEGER(KIND=MPI_ADDRESS_KIND), INTENT(IN), ASYNCHRONOUS :: + sdispls(*), rdispls(*) + TYPE(MPI_Datatype), INTENT(IN), ASYNCHRONOUS :: sendtypes(*), + recvtypes(*) + TYPE(MPI_Comm), INTENT(IN) :: comm + TYPE(MPI_Request), INTENT(OUT) :: request + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + MPI_Neighbor_alltoallw_init(sendbuf, sendcounts, sdispls, sendtypes, recvbuf, + recvcounts, rdispls, recvtypes, comm, info, request, ierror) + + TYPE(*), DIMENSION(..), INTENT(IN), ASYNCHRONOUS :: sendbuf + TYPE(*), DIMENSION(..), ASYNCHRONOUS :: recvbuf + INTEGER, INTENT(IN), ASYNCHRONOUS :: sendcounts(*), recvcounts(*) + INTEGER(KIND=MPI_ADDRESS_KIND), INTENT(IN), ASYNCHRONOUS :: + sdispls(*), rdispls(*) + TYPE(MPI_Datatype), INTENT(IN), ASYNCHRONOUS :: sendtypes(*), + recvtypes(*) + TYPE(MPI_Comm), INTENT(IN) :: comm + TYPE(MPI_Info), INTENT(IN) :: info + TYPE(MPI_Request), INTENT(OUT) :: request + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETERS +---------------- +* ``sendbuf``: Starting address of send buffer. +* ``sendcounts``: Integer array, where entry i specifies the number of elements to send to neighbor i. +* ``sdispls``: Integer array, where entry i specifies the displacement (in bytes, offset from *sendbuf*) from which to send data to neighbor i. +* ``sendtypes``: Datatype array, where entry i specifies the datatype to use when sending data to neighbor i. +* ``recvcounts``: Integer array, where entry j specifies the number of elements to receive from neighbor j. +* ``rdispls``: Integer array, where entry j specifies the displacement (in bytes, offset from *recvbuf*) to which data from neighbor j should be written. +* ``recvtypes``: Datatype array, where entry j specifies the datatype to use when receiving data from neighbor j. +* ``comm``: Communicator over which data is to be exchanged. +* ``info``: Info (handle, persistent only). + +OUTPUT PARAMETERS +----------------- +* ``recvbuf``: Address of receive buffer. +* ``request``: Request (handle, non-blocking only). +* ``IERROR``: Fortran only: Error status. + +DESCRIPTION +----------- + +:ref:`MPI_Neighbor_alltoallw` is a generalized collective operation in which +all processes send data to and receive data from all neighbors. It adds +flexibility to :ref:`MPI_Neighbor_alltoallv` by allowing the user to specify +the datatype of individual data blocks (in addition to displacement and +element count). Its operation can be thought of in the following way, +where each process performs 2n (n being the number of neighbors in the +topology of communicator *comm*) independent point-to-point +communications. The neighbors and buffer layout are determined by the +topology of *comm*. + +:: + + MPI_Cart_get(comm, maxdims, dims, periods, coords); + for (dim = 0, i = 0 ; dim < dims ; ++dim) { + MPI_Cart_shift(comm, dim, 1, &r0, &r1); + MPI_Isend(sendbuf + sdispls[i] * extent(sendtype), + sendcount, sendtypes[i], r0, ..., comm, ...); + MPI_Irecv(recvbuf + rdispls[i] * extent(recvtype), + recvcount, recvtypes[i], r0, ..., comm, ...); + ++i; + MPI_Isend(sendbuf + sdispls[i] * extent(sendtype), + sendcount, sendtypes[i], r1, ..., comm, &req[i]); + MPI_Irecv(recvbuf + rdispls[i] * extent(recvtype), + recvcount, recvtypes[i], r1, ..., comm, ...); + ++i; + } + + MPI_Wait_all (...); + + MPI_Comm_size(comm, &n); + for (i = 0, i < n; i++) + MPI_Send(sendbuf + sdispls[i], sendcounts[i], + sendtypes[i], i, ..., comm); + for (i = 0, i < n; i++) + MPI_Recv(recvbuf + rdispls[i], recvcounts[i], + recvtypes[i], i, ..., comm); + +Process j sends the k-th block of its local *sendbuf* to neighbor k, +which places the data in the j-th block of its local *recvbuf*. + +When a pair of processes exchanges data, each may pass different element +count and datatype arguments so long as the sender specifies the same +amount of data to send (in bytes) as the receiver expects to receive. + +Note that process i may send a different amount of data to process j +than it receives from process j. Also, a process may send entirely +different amounts and types of data to different processes in the +communicator. + + +NOTES +----- + +The MPI_IN_PLACE option for *sendbuf* is not meaningful for this +operation + +The specification of counts, types, and displacements should not cause +any location to be written more than once. + +All arguments on all processes are significant. The *comm* argument, in +particular, must describe the same communicator on all processes. + +The offsets of *sdispls* and *rdispls* are measured in bytes. Compare +this to :ref:`MPI_Neighbor_alltoallv`, where these offsets are measured in +units of *sendtype* and *recvtype*, respectively. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. + + +.. seealso:: + :ref:`MPI_Neighbor_alltoall` :ref:`MPI_Neighbor_alltoallv` :ref:`MPI_Cart_create` + :ref:`MPI_Graph_create` :ref:`MPI_Dist_graph_create` diff --git a/docs/man-openmpi/man3/MPI_Neighbor_alltoallw_init.3.rst b/docs/man-openmpi/man3/MPI_Neighbor_alltoallw_init.3.rst new file mode 100644 index 00000000000..9919b9e975e --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Neighbor_alltoallw_init.3.rst @@ -0,0 +1,9 @@ +.. _mpi_neighbor_alltoallw_init: + +MPI_Neighbor_alltoallw_init +=========================== + .. include_body + +.. include:: ../man3/MPI_Neighbor_alltoallw.3.rst + :start-after: .. include_body + diff --git a/docs/man-openmpi/man3/MPI_Op_c2f.3.rst b/docs/man-openmpi/man3/MPI_Op_c2f.3.rst new file mode 100644 index 00000000000..d656f9a283a --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Op_c2f.3.rst @@ -0,0 +1,9 @@ +.. _mpi_op_c2f: + +MPI_Op_c2f +========== + .. include_body + +.. include:: ../man3/MPI_Comm_f2c.3.rst + :start-after: .. include_body + diff --git a/docs/man-openmpi/man3/MPI_Op_commutative.3.rst b/docs/man-openmpi/man3/MPI_Op_commutative.3.rst new file mode 100644 index 00000000000..7114de3e78c --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Op_commutative.3.rst @@ -0,0 +1,80 @@ +.. _mpi_op_commutative: + + +MPI_Op_commutative +================== + +.. include_body + +:ref:`MPI_Op_commutative` - Query of commutativity of reduction operation. + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Op_commutative(MPI_Op op, int *commute) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_OP_COMMUTATIVE(OP, COMMUTE, IERROR) + LOGICAL COMMUTE + INTEGER OP, IERROR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Op_commutative(op, commute, ierror) + TYPE(MPI_Op), INTENT(IN) :: op + INTEGER, INTENT(OUT) :: commute + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETER +--------------- +* ``op``: Operation (handle). + +OUTPUT PARAMETERS +----------------- +* ``commute``: True if op is commutative, false otherwise (logical). +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +Reduction operations can be queried for their commutativity. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. + + +.. seealso:: + :ref:`MPI_Op_create` diff --git a/docs/man-openmpi/man3/MPI_Op_create.3.rst b/docs/man-openmpi/man3/MPI_Op_create.3.rst new file mode 100644 index 00000000000..d211d8dbed7 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Op_create.3.rst @@ -0,0 +1,229 @@ +.. _mpi_op_create: + + +MPI_Op_create +============= + +.. include_body + +:ref:`MPI_Op_create` - Creates a user-defined combination function handle. + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Op_create(MPI_User_function *function, int commute, + MPI_Op *op) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_OP_CREATE(FUNCTION, COMMUTE, OP, IERROR) + EXTERNAL FUNCTION + LOGICAL COMMUTE + INTEGER OP, IERROR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Op_create(user_fn, commute, op, ierror) + PROCEDURE(MPI_User_function) :: user_fn + LOGICAL, INTENT(IN) :: commute + TYPE(MPI_Op), INTENT(OUT) :: op + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETERS +---------------- +* ``function``: User-defined function (function). +* ``commute``: True if commutative; false otherwise. + +OUTPUT PARAMETERS +----------------- +* ``op``: Operation (handle). +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +:ref:`MPI_Op_create` binds a user-defined global operation to an op handle that +can subsequently be used in :ref:`MPI_Reduce`, :ref:`MPI_Allreduce`, +:ref:`MPI_Reduce_scatter`, and :ref:`MPI_Scan`. The user-defined operation is assumed +to be associative. If commute = true, then the operation should be both +commutative and associative. If commute = false, then the order of +operands is fixed and is defined to be in ascending, process rank order, +beginning with process zero. The order of evaluation can be changed, +taking advantage of the associativity of the operation. If commute = +true then the order of evaluation can be changed, taking advantage of +commutativity and associativity. + +*function* is the user-defined function, which must have the following +four arguments: invec, inoutvec, len, and datatype. + +The ANSI-C prototype for the function is the following: + +.. code-block:: c + + typedef void MPI_User_function(void *invec, void *inoutvec, + int *len, + MPI_Datatype *datatype); + +The Fortran declaration of the user-defined function appears below. + +.. code-block:: fortran + + FUNCTION USER_FUNCTION( INVEC(*), INOUTVEC(*), LEN, TYPE) + INVEC(LEN), INOUTVEC(LEN) + INTEGER LEN, TYPE + +The datatype argument is a handle to the data type that was passed into +the call to :ref:`MPI_Reduce`. The user reduce function should be written such +that the following holds: Let u[0], ..., u[len-1] be the len elements in +the communication buffer described by the arguments invec, len, and +datatype when the function is invoked; let v[0], ..., v[len-1] be len +elements in the communication buffer described by the arguments +inoutvec, len, and datatype when the function is invoked; let w[0], ..., +w[len-1] be len elements in the communication buffer described by the +arguments inoutvec, len, and datatype when the function returns; then +w[i] = u[i] o v[i], for i=0 ,..., len-1, where o is the reduce operation +that the function computes. + +Informally, we can think of invec and inoutvec as arrays of len elements +that function is combining. The result of the reduction over-writes +values in inoutvec, hence the name. Each invocation of the function +results in the pointwise evaluation of the reduce operator on len +elements: i.e, the function returns in inoutvec[i] the value invec[i] o +inoutvec[i], for i = 0..., count-1, where o is the combining operation +computed by the function. + +By internally comparing the value of the datatype argument to known, +global handles, it is possible to overload the use of a single +user-defined function for several different data types. + +General datatypes may be passed to the user function. However, use of +datatypes that are not contiguous is likely to lead to inefficiencies. + +No MPI communication function may be called inside the user function. +:ref:`MPI_Abort` may be called inside the function in case of an error. + + +NOTES +----- + +Suppose one defines a library of user-defined reduce functions that are +overloaded: The datatype argument is used to select the right execution +path at each invocation, according to the types of the operands. The +user-defined reduce function cannot "decode" the datatype argument that +it is passed, and cannot identify, by itself, the correspondence between +the datatype handles and the datatype they represent. This +correspondence was established when the datatypes were created. Before +the library is used, a library initialization preamble must be executed. +This preamble code will define the datatypes that are used by the +library and store handles to these datatypes in global, static variables +that are shared by the user code and the library code. + +**Example:** Example of user-defined reduce: + +Compute the product of an array of complex numbers, in C. + +.. code-block:: c + + typedef struct { + double real,imag; + } Complex; + + /* the user-defined function + */ + void myProd( Complex *in, Complex *inout, int *len, + MPI_Datatype *dptr ) + { + int i; + Complex c; + + for (i=0; i< *len; ++i) { + c.real = inout->real*in->real - + inout->imag*in->imag; + c.imag = inout->real*in->imag + + inout->imag*in->real; + *inout = c; + in++; inout++; + } + } + + /* and, to call it... + */ + ... + + /* each process has an array of 100 Complexes + */ + Complex a[100], answer[100]; + MPI_Op myOp; + MPI_Datatype ctype; + + /* explain to MPI how type Complex is defined + */ + MPI_Type_contiguous( 2, MPI_DOUBLE, &ctype ); + MPI_Type_commit( &ctype ); + /* create the complex-product user-op + */ + MPI_Op_create( myProd, True, &myOp ); + + MPI_Reduce( a, answer, 100, ctype, myOp, root, comm ); + + /* At this point, the answer, which consists of 100 Complexes, + * resides on process root + */ + +The Fortran version of :ref:`MPI_Reduce` will invoke a user-defined reduce +function using the Fortran calling conventions and will pass a +Fortran-type datatype argument; the C version will use C calling +convention and the C representation of a datatype handle. Users who plan +to mix languages should define their reduction functions accordingly. + + +NOTES ON COLLECTIVE OPERATIONS +------------------------------ + +The reduction functions ( MPI_Op ) do not return an error value. As a +result, if the functions detect an error, all they can do is either call +:ref:`MPI_Abort` or silently skip the problem. Thus, if you change the error +handler from MPI_ERRORS_ARE_FATAL to something else, for example, +MPI_ERRORS_RETURN , then no error may be indicated. + +The reason for this is the performance problems in ensuring that all +collective routines return the same error value. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. + + +.. seealso:: + :ref:`MPI_Reduce` :ref:`MPI_Reduce_scatter` :ref:`MPI_Allreduce` :ref:`MPI_Scan` :ref:`MPI_Op_free` diff --git a/docs/man-openmpi/man3/MPI_Op_f2c.3.rst b/docs/man-openmpi/man3/MPI_Op_f2c.3.rst new file mode 100644 index 00000000000..a7fa8dbc8fd --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Op_f2c.3.rst @@ -0,0 +1,9 @@ +.. _mpi_op_f2c: + +MPI_Op_f2c +========== + .. include_body + +.. include:: ../man3/MPI_Comm_f2c.3.rst + :start-after: .. include_body + diff --git a/docs/man-openmpi/man3/MPI_Op_free.3.rst b/docs/man-openmpi/man3/MPI_Op_free.3.rst new file mode 100644 index 00000000000..717267d3389 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Op_free.3.rst @@ -0,0 +1,78 @@ +.. _mpi_op_free: + + +MPI_Op_free +=========== + +.. include_body + +:ref:`MPI_Op_free` - Frees a user-defined combination function handle. + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Op_free(MPI_Op *op) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_OP_FREE(OP, IERROR) + INTEGER OP, IERROR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Op_free(op, ierror) + TYPE(MPI_Op), INTENT(INOUT) :: op + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT/OUTPUT PARAMETER +---------------------- +* ``op``: Operation (handle). + +OUTPUT PARAMETER +---------------- +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +Marks a user-defined reduction operation for deallocation and sets *op* +to MPI_OP_NULL. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. + + +.. seealso:: + :ref:`MPI_Op_create` :ref:`MPI_Reduce` :ref:`MPI_Allreduce` :ref:`MPI_Reduce_scatter` :ref:`MPI_Scan` diff --git a/docs/man-openmpi/man3/MPI_Open_port.3.rst b/docs/man-openmpi/man3/MPI_Open_port.3.rst new file mode 100644 index 00000000000..725741756ad --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Open_port.3.rst @@ -0,0 +1,94 @@ +.. _mpi_open_port: + + +MPI_Open_port +============= + +.. include_body + +:ref:`MPI_Open_port` - Establishes a network address for a server to accept +connections from clients. + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Open_port(MPI_Info info, char *port_name) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_OPEN_PORT(INFO, PORT_NAME, IERROR) + CHARACTER*(*) PORT_NAME + INTEGER INFO, IERROR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Open_port(info, port_name, ierror) + TYPE(MPI_Info), INTENT(IN) :: info + CHARACTER(LEN=MPI_MAX_PORT_NAME), INTENT(OUT) :: port_name + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETER +--------------- +* ``info``: Options on how to establish an address (handle). No options currently supported. + +OUTPUT PARAMETERS +----------------- +* ``port_name``: Newly established port (string). +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +:ref:`MPI_Open_port` establishes a network address, encoded in the *port_name* +string, at which the server will be able to accept connections from +clients. *port_name* is supplied by the system. + +MPI copies a system-supplied port name into *port_name*. *port_name* +identifies the newly opened port and can be used by a client to contact +the server. The maximum size string that may be supplied by the system +is MPI_MAX_PORT_NAME. + + +SUPPORTED INFO KEYS +------------------- + +None. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. + + +.. seealso:: + :ref:`MPI_Comm_accept` :ref:`MPI_Comm_connect` diff --git a/docs/man-openmpi/man3/MPI_Pack.3.rst b/docs/man-openmpi/man3/MPI_Pack.3.rst new file mode 100644 index 00000000000..fe3f901401a --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Pack.3.rst @@ -0,0 +1,131 @@ +.. _mpi_pack: + + +MPI_Pack +======== + +.. include_body + +:ref:`MPI_Pack` - Packs data of a given datatype into contiguous memory. + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Pack(const void *inbuf, int incount, MPI_Datatype datatype, + void *outbuf, int outsize, int *position, MPI_Comm comm) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_PACK(INBUF, INCOUNT, DATATYPE, OUTBUF,OUTSIZE, POSITION, + COMM, IERROR) + INBUF(*), OUTBUF(*) + INTEGER INCOUNT, DATATYPE, OUTSIZE, POSITION, COMM, IERROR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Pack(inbuf, incount, datatype, outbuf, outsize, position, comm, ierror) + TYPE(*), DIMENSION(..), INTENT(IN) :: inbuf + TYPE(*), DIMENSION(..) :: outbuf + INTEGER, INTENT(IN) :: incount, outsize + TYPE(MPI_Datatype), INTENT(IN) :: datatype + INTEGER, INTENT(INOUT) :: position + TYPE(MPI_Comm), INTENT(IN) :: comm + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETERS +---------------- +* ``inbuf``: Input buffer start (choice). +* ``incount``: Number of input data items (integer). +* ``datatype``: Datatype of each input data item (handle). +* ``outsize``: Output buffer size, in bytes (integer). +* ``comm``: Communicator for packed message (handle). + +INPUT/OUTPUT PARAMETER +---------------------- +* ``position``: Current position in buffer, in bytes (integer). + +OUTPUT PARAMETERS +----------------- +* ``outbuf``: Output buffer start (choice). +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +Packs the message in the send buffer specified by *inbuf*, *incount*, +*datatype* into the buffer space specified by *outbuf* and *outsize*. +The input buffer can be any communication buffer allowed in :ref:`MPI_Send`. +The output buffer is a contiguous storage area containing *outsize* +bytes, starting at the address *outbuf* (length is counted in bytes, not +elements, as if it were a communication buffer for a message of type +MPI_Packed). + +The input value of *position* is the first location in the output buffer +to be used for packing. *position* is incremented by the size of the +packed message, and the output value of *position* is the first location +in the output buffer following the locations occupied by the packed +message. The *comm* argument is the communicator that will be +subsequently used for sending the packed message. + +**Example:** An example using :ref:`MPI_Pack`: + +:: + + int position, i, j, a[2]; + char buff[1000]; + + .... + + MPI_Comm_rank(MPI_COMM_WORLD, &myrank); + if (myrank == 0) + { + / * SENDER CODE */ + + position = 0; + MPI_Pack(&i, 1, MPI_INT, buff, 1000, &position, MPI_COMM_WORLD); + MPI_Pack(&j, 1, MPI_INT, buff, 1000, &position, MPI_COMM_WORLD); + MPI_Send( buff, position, MPI_PACKED, 1, 0, MPI_COMM_WORLD); + } + else /* RECEIVER CODE */ + MPI_Recv( a, 2, MPI_INT, 0, 0, MPI_COMM_WORLD) + + } + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. + + +.. seealso:: + :ref:`MPI_Unpack` :ref:`MPI_Pack_size` diff --git a/docs/man-openmpi/man3/MPI_Pack_external.3.rst b/docs/man-openmpi/man3/MPI_Pack_external.3.rst new file mode 100644 index 00000000000..4387e675ff1 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Pack_external.3.rst @@ -0,0 +1,193 @@ +.. _mpi_pack_external: + + +MPI_Pack_external +================= + +.. include_body + +:ref:`MPI_Pack_external` - Writes data to a portable format + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Pack_external(const char *datarep, const void *inbuf, + int incount, MPI_Datatype datatype, + void *outbuf, MPI_Aint outsize, + MPI_Aint *position) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_PACK_EXTERNAL(DATAREP, INBUF, INCOUNT, DATATYPE, + OUTBUF, OUTSIZE, POSITION, IERROR) + + INTEGER INCOUNT, DATATYPE, IERROR + INTEGER(KIND=MPI_ADDRESS_KIND) OUTSIZE, POSITION + CHARACTER*(*) DATAREP + INBUF(*), OUTBUF(*) + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Pack_external(datarep, inbuf, incount, datatype, outbuf, outsize, + position, ierror) + CHARACTER(LEN=*), INTENT(IN) :: datarep + TYPE(*), DIMENSION(..), INTENT(IN) :: inbuf + TYPE(*), DIMENSION(..) :: outbuf + INTEGER, INTENT(IN) :: incount + TYPE(MPI_Datatype), INTENT(IN) :: datatype + INTEGER(KIND=MPI_ADDRESS_KIND), INTENT(IN) :: outsize + INTEGER(KIND=MPI_ADDRESS_KIND), INTENT(INOUT) :: position + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETERS +---------------- +* ``datarep``: Data representation (string). +* ``inbuf``: Input buffer start (choice). +* ``incount``: Number of input data items (integer). +* ``datatype``: Datatype of each input data item (handle). +* ``outsize``: Output buffer size, in bytes (integer). + +INPUT/OUTPUT PARAMETER +---------------------- +* ``position``: Current position in buffer, in bytes (integer). + +OUTPUT PARAMETERS +----------------- +* ``outbuf``: Output buffer start (choice). +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +:ref:`MPI_Pack_external` packs data into the external32 format, a universal +data representation defined by the MPI Forum. This format is useful for +exchanging data between MPI implementations, or when writing data to a +file. + +The input buffer is specified by *inbuf*, *incount* and *datatype*, and +may be any communication buffer allowed in :ref:`MPI_Send`. The output buffer +*outbuf* must be a contiguous storage area containing *outsize* bytes. + +The input value of *position* is the first position in *outbuf* to be +used for packing (measured in bytes, not elements, relative to the start +of the buffer). When the function returns, *position* is incremented by +the size of the packed message, so that it points to the first location +in *outbuf* following the packed message. This way it may be used as +input to a subsequent call to :ref:`MPI_Pack_external`. + +**Example:** An example using :ref:`MPI_Pack_external`: + +:: + + int position, i; + double msg[5]; + char buf[1000]; + + ... + + MPI_Comm_rank(MPI_COMM_WORLD, &myrank); + if (myrank == 0) { /* SENDER CODE */ + position = 0; + i = 5; /* number of doubles in msg[] */ + MPI_Pack_external("external32", &i, 1, MPI_INT, + buf, 1000, &position); + MPI_Pack_external("external32", &msg, i, MPI_DOUBLE, + buf, 1000, &position); + MPI_Send(buf, position, MPI_BYTE, 1, 0, + MPI_COMM_WORLD); + } else { /* RECEIVER CODE */ + MPI_Recv(buf, 1, MPI_BYTE, 0, 0, MPI_COMM_WORLD, + MPI_STATUS_IGNORE); + MPI_Unpack_external("external32", buf, 1000, + MPI_INT, &i, 1, &position); + MPI_Unpack_external("external32", buf, 1000, + MPI_DOUBLE, &msg, i, &position); + } + + +NOTES +----- + +The *datarep* argument specifies the data format. The only valid value +in the current version of MPI is "external32". The argument is provided +for future extensibility. + +To understand the behavior of pack and unpack, it is convenient to think +of the data part of a message as being the sequence obtained by +concatenating the successive values sent in that message. The pack +operation stores this sequence in the buffer space, as if sending the +message to that buffer. The unpack operation retrieves this sequence +from buffer space, as if receiving a message from that buffer. (It is +helpful to think of internal Fortran files or sscanf in C for a similar +function.) + +Several messages can be successively packed into one packing unit. This +is effected by several successive related calls to :ref:`MPI_Pack_external`, +where the first call provides *position*\ =0, and each successive call +inputs the value of *position* that was output by the previous call, +along with the same values for *outbuf* and *outcount*. This packing +unit now contains the equivalent information that would have been stored +in a message by one send call with a send buffer that is the +"concatenation" of the individual send buffers. + +A packing unit can be sent using type MPI_BYTE. Any point-to-point or +collective communication function can be used to move the sequence of +bytes that forms the packing unit from one process to another. This +packing unit can now be received using any receive operation, with any +datatype. (The type-matching rules are relaxed for messages sent with +type MPI_BYTE.) + +A packing unit can be unpacked into several successive messages. This is +effected by several successive related calls to :ref:`MPI_Unpack_external`, +where the first call provides *position*\ =0, and each successive call +inputs the value of position that was output by the previous call, and +the same values for *inbuf* and *insize*. + +The concatenation of two packing units is not necessarily a packing +unit; nor is a substring of a packing unit necessarily a packing unit. +Thus, one cannot concatenate two packing units and then unpack the +result as one packing unit; nor can one unpack a substring of a packing +unit as a separate packing unit. Each packing unit that was created by a +related sequence of pack calls must be unpacked as a unit by a sequence +of related unpack calls. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. + +See the MPI man page for a full list of MPI error codes. + + +.. seealso:: + :ref:`MPI_Pack_external_size` :ref:`MPI_Send` :ref:`MPI_Unpack_external` sscanf(3C) diff --git a/docs/man-openmpi/man3/MPI_Pack_external_size.3.rst b/docs/man-openmpi/man3/MPI_Pack_external_size.3.rst new file mode 100644 index 00000000000..fb567f378ce --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Pack_external_size.3.rst @@ -0,0 +1,107 @@ +.. _mpi_pack_external_size: + + +MPI_Pack_external_size +====================== + +.. include_body + +:ref:`MPI_Pack_external_size` - Calculates upper bound on space needed to +write to a portable format + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Pack_external_size(char *datarep, int incount, + MPI_Datatype datatype, MPI_Aint *size) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_PACK_EXTERNAL_SIZE(DATAREP, INCOUNT, DATATYPE, SIZE, IERROR) + + INTEGER INCOUNT, DATATYPE, IERROR + INTEGER(KIND=MPI_ADDRESS_KIND) SIZE + CHARACTER*(*) DATAREP + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Pack_external_size(datarep, incount, datatype, size, ierror) + TYPE(MPI_Datatype), INTENT(IN) :: datatype + INTEGER, INTENT(IN) :: incount + CHARACTER(LEN=*), INTENT(IN) :: datarep + INTEGER(KIND=MPI_ADDRESS_KIND), INTENT(OUT) :: size + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETERS +---------------- +* ``datarep``: Data representation (string). +* ``incount``: Number of input data items (integer). +* ``datatype``: Datatype of each input data item (handle). + +OUTPUT PARAMETERS +----------------- +* ``size``: Upper bound on size of packed message, in bytes (integer). +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +:ref:`MPI_Pack_external_size` allows the application to find out how much space +is needed to pack a message in the portable format defined by the MPI +Forum. It returns in *size* an upper bound on the increment in +*position* that would occur in a call to :ref:`MPI_Pack_external` with the same +values for *datarep*, *incount*, and *datatype*. + +The call returns an upper bound, rather than an exact bound, as the +exact amount of space needed to pack the message may depend on context +and alignment (e.g., the first message packed in a packing unit may take +more space). + + +NOTES +----- + +The *datarep* argument specifies the data format. The only valid value +in the current version of MPI is "external32". The argument is provided +for future extensibility. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. + +See the MPI man page for a full list of MPI error codes. + + +.. seealso:: + :ref:`MPI_Pack_external` :ref:`MPI_Unpack_external` diff --git a/docs/man-openmpi/man3/MPI_Pack_size.3.rst b/docs/man-openmpi/man3/MPI_Pack_size.3.rst new file mode 100644 index 00000000000..e30a103f6a4 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Pack_size.3.rst @@ -0,0 +1,94 @@ +.. _mpi_pack_size: + + +MPI_Pack_size +============= + +.. include_body + +:ref:`MPI_Pack_size` - Returns the upper bound on the amount of space +needed to pack a message. + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Pack_size(int incount, MPI_Datatype datatype, MPI_Comm comm, + int *size) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_PACK_SIZE(INCOUNT, DATATYPE, COMM, SIZE, IERROR) + INTEGER INCOUNT, DATATYPE, COMM, SIZE, IERROR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Pack_size(incount, datatype, comm, size, ierror) + INTEGER, INTENT(IN) :: incount + TYPE(MPI_Datatype), INTENT(IN) :: datatype + TYPE(MPI_Comm), INTENT(IN) :: comm + INTEGER, INTENT(OUT) :: size + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETERS +---------------- +* ``incount``: Count argument to packing call (integer). +* ``datatype``: Datatype argument to packing call (handle). +* ``comm``: Communicator argument to packing call (handle). + +OUTPUT PARAMETERS +----------------- +* ``size``: Upper bound on size of packed message, in bytes (integer). +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +:ref:`MPI_Pack_size` allows the application to find out how much space is +needed to pack a message. A call to MPI_Pack_size(incount, datatype, +comm, size) returns in size an upper bound on the increment in position +that would occur in a call to :ref:`MPI_Pack`, with the same values for +*incount*, *datatype*, and *comm*. + +**Rationale:** The call returns an upper bound, rather than an exact +bound, since the exact amount of space needed to pack the message may +depend on the context (e.g., first message packed in a packing unit may +take more space). + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. + + +.. seealso:: + :ref:`MPI_Pack` :ref:`MPI_Unpack` diff --git a/docs/man-openmpi/man3/MPI_Parrived.3.rst b/docs/man-openmpi/man3/MPI_Parrived.3.rst new file mode 100644 index 00000000000..bf2f60c42a2 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Parrived.3.rst @@ -0,0 +1,76 @@ +.. _mpi_parrived: + + +MPI_Parrived +============ + +.. include_body + +:ref:`MPI_Parrived` - Tests for completion of a specified receive-side +partition. + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Parrived(MPI_Request *request, int partition, int *flag) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_PARRIVED(REQUEST, PARTITION, FLAG IERROR) + INTEGER REQUEST, PARTITION, FLAG(*), IERROR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Parrived(request, partition, flag, ierror) + TYPE(MPI_Request), INTENT(in) :: request + INTEGER, INTENT(IN) :: partition + INTEGER, INTENT(out) :: flag + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETERS +---------------- +* ``request``: Communication request (handle). +* ``partition``: The number of the partition to test for completion (integer). + +OUTPUT PARAMETERS +----------------- +* ``flag``: True if partition is completed. +* ``IERROR``: Fortran only: Error status (integer). + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. + + +.. seealso:: + :ref:`MPI_Pready_list` :ref:`MPI_Pready_range` :ref:`MPI_Parrived` diff --git a/docs/man-openmpi/man3/MPI_Pcontrol.3.rst b/docs/man-openmpi/man3/MPI_Pcontrol.3.rst new file mode 100644 index 00000000000..46fe3d273fe --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Pcontrol.3.rst @@ -0,0 +1,95 @@ +.. _mpi_pcontrol: + + +MPI_Pcontrol +============ + +.. include_body + +:ref:`MPI_Pcontrol` - Controls profiling. + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Pcontrol(const int level, ... ) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_PCONTROL(LEVEL) + INTEGER LEVEL + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Pcontrol(level) + INTEGER, INTENT(IN) :: level + + +INPUT PARAMETER +--------------- +* ``level``: Profiling level. + +DESCRIPTION +----------- + +MPI libraries themselves make no use of this routine; they simply return +immediately to the user code. However the presence of calls to this +routine allows a profiling package to be explicitly called by the user. + +Since MPI has no control of the implementation of the profiling code, we +are unable to specify precisely the semantics that will be provided by +calls to :ref:`MPI_Pcontrol`. This vagueness extends to the number of arguments +to the function, and their datatypes. + +However to provide some level of portability of user codes to different +profiling libraries, we request the following meanings for certain +values of level: + + o level==0 Profiling is disabled. + + o level==1 Profiling is enabled at a normal default level of detail. + + o level==2 Profile buffers are flushed. (This may be a no-op in some profilers). + + o All other values of level have profile library-defined effects and additional arguments. + +We also request that the default state after :ref:`MPI_Init` has been called is +for profiling to be enabled at the normal default level (i.e., as if +:ref:`MPI_Pcontrol` had just been called with the argument 1). This allows +users to link with a profiling library and obtain profile output without +having to modify their source code at all. + +The provision of :ref:`MPI_Pcontrol` as a no-op in the standard MPI library +allows users to modify their source code to obtain more detailed +profiling information, but still be able to link exactly the same code +against the standard MPI library. + + +NOTES +----- + +This routine provides a common interface for profiling control. The +interpretation of level and any other arguments is left to the profiling +library. + +This function does not return an error value. Consequently, the result +of calling it before :ref:`MPI_Init` or after :ref:`MPI_Finalize` is undefined. diff --git a/docs/man-openmpi/man3/MPI_Pready.3.rst b/docs/man-openmpi/man3/MPI_Pready.3.rst new file mode 100644 index 00000000000..6c44b2e6f12 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Pready.3.rst @@ -0,0 +1,74 @@ +.. _mpi_pready: + + +MPI_Pready +========== + +.. include_body + +:ref:`MPI_Pready` - Indicates that a given send-side partition is ready to +be transferred. + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Pready(int partition, MPI_Request *request) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_PREADY(PARTITION, REQUEST, IERROR) + INTEGER PARTITION, REQUEST, IERROR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Pready(partition, request, ierror) + INTEGER, INTENT(IN) :: partition + TYPE(MPI_Request), INTENT(IN) :: request + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETERS +---------------- +* ``partition``: The number of the partition to mark ready for transfer (integer). +* ``request``: Communication request (handle). + +OUTPUT PARAMETERS +----------------- +* ``IERROR``: Fortran only: Error status (integer). + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. + + +.. seealso:: + :ref:`MPI_Pready_list` :ref:`MPI_Pready_range` :ref:`MPI_Parrived` diff --git a/docs/man-openmpi/man3/MPI_Pready_list.3.rst b/docs/man-openmpi/man3/MPI_Pready_list.3.rst new file mode 100644 index 00000000000..8c943335c06 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Pready_list.3.rst @@ -0,0 +1,76 @@ +.. _mpi_pready_list: + + +MPI_Pready_list +=============== + +.. include_body + +:ref:`MPI_Pready_list` - Indicates that a list given send-side partitions +are ready to be transferred. + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Pready_list(int length, int *partitions, MPI_Request *request) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_PREADY_LIST(LENGTH, PARTITIONS, REQUEST, IERROR) + INTEGER LENGTH, PARTITIONS(*), REQUEST, IERROR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Pready_list(length, partitions, request, ierror) + INTEGER, INTENT(IN) :: length + INTEGER, INTENT(IN) :: partitions + TYPE(MPI_Request), INTENT(IN) :: request + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETERS +---------------- +* ``length``: The length of the given partition array (integer). +* ``partitions``: An array of numbers of partitions to mark ready for transfer (integer). +* ``request``: Communication request (handle). + +OUTPUT PARAMETERS +----------------- +* ``IERROR``: Fortran only: Error status (integer). + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. + + +.. seealso:: + :ref:`MPI_Pready` :ref:`MPI_Pready_range` :ref:`MPI_Parrived` diff --git a/docs/man-openmpi/man3/MPI_Pready_range.3.rst b/docs/man-openmpi/man3/MPI_Pready_range.3.rst new file mode 100644 index 00000000000..3e91b7786af --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Pready_range.3.rst @@ -0,0 +1,75 @@ +.. _mpi_pready_range: + + +MPI_Pready_range +================ + +.. include_body + +:ref:`MPI_Pready_range` - Indicates that a given range os send-side +partitions are ready to be transferred. + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Pready_range(int partition_low, int partition_high, MPI_Request *request) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_PREADY(PARTITION_LOW, PARTITION_HIGH, REQUEST, IERROR) + INTEGER PARTITION_LOW, PARTITION_HIGH, REQUEST, IERROR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Pready(partition_low, partition_high, request, ierror) + INTEGER, INTENT(IN) :: partition_low, partition_high + TYPE(MPI_Request), INTENT(IN) :: request + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETERS +---------------- +* ``partition_low``: The lowest of the range of partitions to mark ready for transfer (integer). +* ``partition_high``: The highest of the range of partitions to mark ready for transfer (integer). +* ``request``: Communication request (handle). + +OUTPUT PARAMETERS +----------------- +* ``IERROR``: Fortran only: Error status (integer). + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. + + +.. seealso:: + :ref:`MPI_Pready` :ref:`MPI_Pready_list` :ref:`MPI_Parrived` diff --git a/docs/man-openmpi/man3/MPI_Precv_init.3.rst b/docs/man-openmpi/man3/MPI_Precv_init.3.rst new file mode 100644 index 00000000000..fc8f282f77b --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Precv_init.3.rst @@ -0,0 +1,94 @@ +.. _mpi_precv_init: + + +MPI_Precv_init +============== + +.. include_body + +:ref:`MPI_Precv_init` - Initializes a partitioned receive. + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Precv_init(const void *buf, int partitions, int count, MPI_Datatype datatype, int dest, + int tag, MPI_Comm comm, MPI_Request *request) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_PRECV_INIT(BUF, PARTITIONS, COUNT, DATATYPE, DEST, TAG, COMM, REQUEST, IERROR) + BUF(*) + INTEGER PARTITIONS, COUNT, DATATYPE, DEST, TAG, COMM, REQUEST, IERROR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Precv_init(buf, partitions, count, datatype, dest, tag, comm, request, ierror) + TYPE(*), DIMENSION(..), INTENT(IN), ASYNCHRONOUS :: buf + INTEGER, INTENT(IN) :: partitions, count, dest, tag + TYPE(MPI_Datatype), INTENT(IN) :: datatype + TYPE(MPI_Comm), INTENT(IN) :: comm + TYPE(MPI_Request), INTENT(OUT) :: request + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETERS +---------------- +* ``buf``: Initial address of receive buffer (choice). +* ``partitions``: Number of partitions (integer). +* ``count``: Number of elements to be received per partition (integer). +* ``datatype``: Datatype of each element (handle). +* ``dest``: Rank of source (integer). +* ``tag``: Message tag (integer). +* ``comm``: Communicator (handle). + +OUTPUT PARAMETERS +----------------- +* ``request``: Communication request (handle). +* ``IERROR``: Fortran only: Error status (integer). + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. + + +NOTE +---- + +The current implementation is an early prototype and is not fully +compliant with the MPI-4.0 specification. Specifically this function and +it's counterpart (MPI_Psend_init) will block until the partitioned +communication request is initialized on both ends. This behavior will be +corrected in future versions. + + +.. seealso:: + :ref:`MPI_Psend_init` diff --git a/docs/man-openmpi/man3/MPI_Probe.3.rst b/docs/man-openmpi/man3/MPI_Probe.3.rst new file mode 100644 index 00000000000..da6754123a7 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Probe.3.rst @@ -0,0 +1,156 @@ +.. _mpi_probe: + + +MPI_Probe +========= + +.. include_body + +:ref:`MPI_Probe` - Blocking test for a message. + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Probe(int source, int tag, MPI_Comm comm, MPI_Status *status) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_PROBE(SOURCE, TAG, COMM, STATUS, IERROR) + INTEGER SOURCE, TAG, COMM, STATUS(MPI_STATUS_SIZE), IERROR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Probe(source, tag, comm, status, ierror) + INTEGER, INTENT(IN) :: source, tag + TYPE(MPI_Comm), INTENT(IN) :: comm + TYPE(MPI_Status) :: status + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETERS +---------------- +* ``source``: Source rank or MPI_ANY_SOURCE (integer). +* ``tag``: Tag value or MPI_ANY_TAG (integer). +* ``comm``: Communicator (handle). + +OUTPUT PARAMETERS +----------------- +* ``status``: Status object (status). +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +The :ref:`MPI_Probe` and :ref:`MPI_Iprobe` operations allow checking of incoming +messages, without actual receipt of them. The user can then decide how +to receive them, based on the information returned by the probe in the +status variable. For example, the user may allocate memory for the +receive buffer, according to the length of the probed message. + +:ref:`MPI_Probe` behaves like :ref:`MPI_Iprobe` except that it is a blocking call that +returns only after a matching message has been found. + +If your application does not need to examine the *status* field, you can +save resources by using the predefined constant MPI_STATUS_IGNORE as a +special value for the *status* argument. + +The semantics of :ref:`MPI_Probe` and :ref:`MPI_Iprobe` guarantee progress: If a call +to :ref:`MPI_Probe` has been issued by a process, and a send that matches the +probe has been initiated by some process, then the call to :ref:`MPI_Probe` +will return, unless the message is received by another concurrent +receive operation (that is executed by another thread at the probing +process). Similarly, if a process busy waits with :ref:`MPI_Iprobe` and a +matching message has been issued, then the call to :ref:`MPI_Iprobe` will +eventually return flag = true unless the message is received by another +concurrent receive operation. + +**Example 1:** Use blocking probe to wait for an incoming message. + +:: + + CALL MPI_COMM_RANK(comm, rank, ierr) + IF (rank.EQ.0) THEN + CALL MPI_SEND(i, 1, MPI_INTEGER, 2, 0, comm, ierr) + ELSE IF(rank.EQ.1) THEN + CALL MPI_SEND(x, 1, MPI_REAL, 2, 0, comm, ierr) + ELSE ! rank.EQ.2 + DO i=1, 2 + CALL MPI_PROBE(MPI_ANY_SOURCE, 0, + comm, status, ierr) + IF (status(MPI_SOURCE) = 0) THEN + 100 CALL MPI_RECV(i, 1, MPI_INTEGER, 0, 0, status, ierr) + ELSE + 200 CALL MPI_RECV(x, 1, MPI_REAL, 1, 0, status, ierr) + END IF + END DO + END IF + +Each message is received with the right type. + +**Example 2:** A program similar to the previous example, but with a +problem. + +:: + + CALL MPI_COMM_RANK(comm, rank, ierr) + IF (rank.EQ.0) THEN + CALL MPI_SEND(i, 1, MPI_INTEGER, 2, 0, comm, ierr) + ELSE IF(rank.EQ.1) THEN + CALL MPI_SEND(x, 1, MPI_REAL, 2, 0, comm, ierr) + ELSE + DO i=1, 2 + CALL MPI_PROBE(MPI_ANY_SOURCE, 0, + comm, status, ierr) + IF (status(MPI_SOURCE) = 0) THEN + 100 CALL MPI_RECV(i, 1, MPI_INTEGER, MPI_ANY_SOURCE, + 0, status, ierr) + ELSE + 200 CALL MPI_RECV(x, 1, MPI_REAL, MPI_ANY_SOURCE, + 0, status, ierr) + END IF + END DO + END IF + +We slightly modified Example 2, using MPI_ANY_SOURCE as the source +argument in the two receive calls in statements labeled 100 and 200. The +program is now incorrect: The receive operation may receive a message +that is distinct from the message probed by the preceding call to +:ref:`MPI_Probe`. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. + + +.. seealso:: + :ref:`MPI_Iprobe` :ref:`MPI_Cancel` diff --git a/docs/man-openmpi/man3/MPI_Psend_init.3.rst b/docs/man-openmpi/man3/MPI_Psend_init.3.rst new file mode 100644 index 00000000000..2ba8fb73be4 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Psend_init.3.rst @@ -0,0 +1,94 @@ +.. _mpi_psend_init: + + +MPI_Psend_init +============== + +.. include_body + +:ref:`MPI_Psend_init` - Initializes a partitioned send. + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Psend_init(const void *buf, int partitions, int count, MPI_Datatype datatype, int dest, + int tag, MPI_Comm comm, MPI_Request *request) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_PSEND_INIT(BUF, PARTITIONS, COUNT, DATATYPE, DEST, TAG, COMM, REQUEST, IERROR) + BUF(*) + INTEGER PARTITIONS, COUNT, DATATYPE, DEST, TAG, COMM, REQUEST, IERROR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Psend_init(buf, partitions, count, datatype, dest, tag, comm, request, ierror) + TYPE(*), DIMENSION(..), INTENT(IN), ASYNCHRONOUS :: buf + INTEGER, INTENT(IN) :: partitions, count, dest, tag + TYPE(MPI_Datatype), INTENT(IN) :: datatype + TYPE(MPI_Comm), INTENT(IN) :: comm + TYPE(MPI_Request), INTENT(OUT) :: request + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETERS +---------------- +* ``buf``: Initial address of send buffer (choice). +* ``partitions``: Number of partitions (integer). +* ``count``: Number of elements to be sent per partition (integer). +* ``datatype``: Datatype of each element (handle). +* ``dest``: Rank of source (integer). +* ``tag``: Message tag (integer). +* ``comm``: Communicator (handle). + +OUTPUT PARAMETERS +----------------- +* ``request``: Communication request (handle). +* ``IERROR``: Fortran only: Error status (integer). + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. + + +NOTE +---- + +The current implementation is an early prototype and is not fully +compliant with the MPI-4.0 specification. Specifically this function and +it's counterpart (MPI_Precv_init) will block until the partitioned +communication request is initialized on both ends. This behavior will be +corrected in future versions. + + +.. seealso:: + :ref:`MPI_Precv_init` diff --git a/docs/man-openmpi/man3/MPI_Publish_name.3.rst b/docs/man-openmpi/man3/MPI_Publish_name.3.rst new file mode 100644 index 00000000000..224b8ab2319 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Publish_name.3.rst @@ -0,0 +1,171 @@ +.. _mpi_publish_name: + + +MPI_Publish_name +================ + +.. include_body + +:: + + MPI_Publish_name - Publishes a service name associated with a port + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Publish_name(const char *service_name, MPI_Info info, + const char *port_name) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_PUBLISH_NAME(SERVICE_NAME, INFO, PORT_NAME, IERROR) + CHARACTER*(*) SERVICE_NAME, PORT_NAME + INTEGER INFO, IERROR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Publish_name(service_name, info, port_name, ierror) + TYPE(MPI_Info), INTENT(IN) :: info + CHARACTER(LEN=*), INTENT(IN) :: service_name, port_name + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETERS +---------------- +* ``service_name``: A service name (string). +* ``info``: Options to the name service functions (handle). +* ``port_name``: A port name (string). + +OUTPUT PARAMETER +---------------- +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +This routine publishes the pair (*service_name, port_name*) so that an +application may retrieve *port_name* by calling :ref:`MPI_Lookup_name` with +*service_name* as an argument. It is an error to publish the same +*service_name* twice, or to use a *port_name* argument that was not +previously opened by the calling process via a call to :ref:`MPI_Open_port`. + + +INFO ARGUMENTS +-------------- + +The following keys for *info* are recognized: + +:: + + Key Type Description + --- ---- ----------- + + ompi_global_scope bool If set to true, publish the name in + the global scope. Publish in the local + scope otherwise. See the NAME SCOPE + section for more details. + + ompi_unique bool If set to true, return an error if the + specified service_name already exists. + Default to overwriting any pre-existing + value. + +*bool* info keys are actually strings but are evaluated as follows: if +the string value is a number, it is converted to an integer and cast to +a boolean (meaning that zero integers are false and non-zero values are +true). If the string value is (case-insensitive) "yes" or "true", the +boolean is true. If the string value is (case-insensitive) "no" or +"false", the boolean is false. All other string values are unrecognized, +and therefore false. + +If no info key is provided, the function will first check to see if a +global server has been specified and is available. If so, then the +publish function will default to global scope first, followed by local. +Otherwise, the data will default to publish with local scope. + + +NAME SCOPE +---------- + +Open MPI supports two name scopes: *global* and *local*. Local scope +will place the specified service/port pair in a data store located on +the mpirun of the calling process' job. Thus, data published with local +scope will only be accessible to processes in jobs spawned by that +mpirun - e.g., processes in the calling process' job, or in jobs spawned +via :ref:`MPI_Comm_spawn`. + +Global scope places the specified service/port pair in a data store +located on a central server that is accessible to all jobs running in +the cluster or environment. Thus, data published with global scope can +be accessed by multiple mpiruns and used for :ref:`MPI_Comm_Connect` and +:ref:`MPI_Comm_accept` between jobs. + +Note that global scope operations require both the presence of the +central server and that the calling process be able to communicate to +that server. :ref:`MPI_Publish_name` will return an error if global scope is +specified and a global server is either not specified or cannot be +found. + +Open MPI provides a server called *ompi-server* to support global scope +operations. Please refer to its manual page for a more detailed +description of data store/lookup operations. + +As an example of the impact of these scoping rules, consider the case +where a job has been started with mpirun - call this job "job1". A +process in job1 creates and publishes a service/port pair using a local +scope. Open MPI will store this data in the data store within mpirun. + +A process in job1 (perhaps the same as did the publish, or perhaps some +other process in the job) subsequently calls :ref:`MPI_Comm_spawn` to start +another job (call it "job2") under this mpirun. Since the two jobs share +a common mpirun, both jobs have access to local scope data. Hence, a +process in job2 can perform an :ref:`MPI_Lookup_name` with a local scope to +retrieve the information. + +However, assume another user starts a job using mpirun - call this job +"job3". Because the service/port data published by job1 specified local +scope, processes in job3 cannot access that data. In contrast, if the +data had been published using global scope, then any process in job3 +could access the data, provided that mpirun was given knowledge of how +to contact the central server and the process could establish +communication with it. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. + +See the MPI man page for a full list of MPI error codes. + + +.. seealso:: + :ref:`MPI_Lookup_name` :ref:`MPI_Open_port` diff --git a/docs/man-openmpi/man3/MPI_Put.3.rst b/docs/man-openmpi/man3/MPI_Put.3.rst new file mode 100644 index 00000000000..e7b8a8c3398 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Put.3.rst @@ -0,0 +1,197 @@ +.. _mpi_put: + + +MPI_Put +======= + +.. include_body + +:ref:`MPI_Put`, :ref:`MPI_Rput` - Copies data from the origin memory to the +target. + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + MPI_Put(const void *origin_addr, int origin_count, MPI_Datatype + origin_datatype, int target_rank, MPI_Aint target_disp, + int target_count, MPI_Datatype target_datatype, MPI_Win win) + + MPI_Rput(const void *origin_addr, int origin_count, MPI_Datatype + origin_datatype, int target_rank, MPI_Aint target_disp, + int target_count, MPI_Datatype target_datatype, MPI_Win win, + MPI_Request *request) + + +Fortran Syntax (see FORTRAN 77 NOTES) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_PUT(ORIGIN_ADDR, ORIGIN_COUNT, ORIGIN_DATATYPE, TARGET_RANK, + TARGET_DISP, TARGET_COUNT, TARGET_DATATYPE, WIN, IERROR) + ORIGIN_ADDR(*) + INTEGER(KIND=MPI_ADDRESS_KIND) TARGET_DISP + INTEGER ORIGIN_COUNT, ORIGIN_DATATYPE, TARGET_RANK, TARGET_COUNT, + TARGET_DATATYPE, WIN, IERROR + + MPI_RPUT(ORIGIN_ADDR, ORIGIN_COUNT, ORIGIN_DATATYPE, TARGET_RANK, + TARGET_DISP, TARGET_COUNT, TARGET_DATATYPE, WIN, REQUEST, IERROR) + ORIGIN_ADDR(*) + INTEGER(KIND=MPI_ADDRESS_KIND) TARGET_DISP + INTEGER ORIGIN_COUNT, ORIGIN_DATATYPE, TARGET_RANK, TARGET_COUNT, + TARGET_DATATYPE, WIN, REQUEST, IERROR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Put(origin_addr, origin_count, origin_datatype, target_rank, + target_disp, target_count, target_datatype, win, ierror) + TYPE(*), DIMENSION(..), INTENT(IN), ASYNCHRONOUS :: origin_addr + INTEGER, INTENT(IN) :: origin_count, target_rank, target_count + TYPE(MPI_Datatype), INTENT(IN) :: origin_datatype, target_datatype + INTEGER(KIND=MPI_ADDRESS_KIND), INTENT(IN) :: target_disp + TYPE(MPI_Win), INTENT(IN) :: win + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + MPI_Rput(origin_addr, origin_count, origin_datatype, target_rank, + target_disp, target_count, target_datatype, win, request, + ierror) + TYPE(*), DIMENSION(..), INTENT(IN), ASYNCHRONOUS :: origin_addr + INTEGER, INTENT(IN) :: origin_count, target_rank, target_count + TYPE(MPI_Datatype), INTENT(IN) :: origin_datatype, target_datatype + INTEGER(KIND=MPI_ADDRESS_KIND), INTENT(IN) :: target_disp + TYPE(MPI_Win), INTENT(IN) :: win + TYPE(MPI_Request), INTENT(OUT) :: request + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETERS +---------------- +* ``origin_addr``: Initial address of origin buffer (choice). +* ``origin_count``: Number of entries in origin buffer (nonnegative integer). +* ``origin_datatype``: Data type of each entry in origin buffer (handle). +* ``target_rank``: Rank of target (nonnegative integer). +* ``target_disp``: Displacement from start of window to target buffer (nonnegative integer). +* ``target_count``: Number of entries in target buffer (nonnegative integer). +* ``target_datatype``: Data type of each entry in target buffer (handle). +* ``win``: Window object used for communication (handle). + +OUTPUT PARAMETER +---------------- +* ``request``: MPI_Rput: RMA request +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +:ref:`MPI_Put` transfers *origin_count* successive entries of the type +specified by *origin_datatype*, starting at address *origin_addr* on the +origin node to the target node specified by the *win*, *target_rank* + +pair. The data are written in the target buffer at address *target_addr* +^ *window_base* + *target_disp* x *disp_unit*, where *window_base* and +*disp_unit* are the base address and window displacement unit specified +at window initialization, by the target process. + +The target buffer is specified by the arguments *target_count* and +*target_datatype*. + +The data transfer is the same as that which would occur if the origin +process executed a send operation with arguments *origin_addr*, +*origin_count*, *origin_datatype*, *target_rank*, *tag*, *comm*, and the +target process executed a receive operation with arguments +*target_addr*, *target_count*, *target_datatype*, *source*, *tag*, +*comm*, where *target_addr* is the target buffer address computed as +explained above, and *comm* is a communicator for the group of *win*. + +The communication must satisfy the same constraints as for a similar +message-passing communication. The *target_datatype* may not specify +overlapping entries in the target buffer. The message sent must fit, +without truncation, in the target buffer. Furthermore, the target buffer +must fit in the target window. In addition, only processes within the +same buffer can access the target window. + +The *target_datatype* argument is a handle to a datatype object defined +at the origin process. However, this object is interpreted at the target +process: The outcome is as if the target datatype object were defined at +the target process, by the same sequence of calls used to define it at +the origin process. The target data type must contain only relative +displacements, not absolute addresses. The same holds for get and +accumulate. + +:ref:`MPI_Rput` is similar to :ref:`MPI_Put`, except that it allocates a +communication request object and associates it with the request handle +(the argument *request*). The completion of an :ref:`MPI_Rput` operation (i.e., +after the corresponding test or wait) indicates that the sender is now +free to update the locations in the *origin_addr* buffer. It does not +indicate that the data is available at the target window. If remote +completion is required, :ref:`MPI_Win_flush`, :ref:`MPI_Win_flush_all`, +:ref:`MPI_Win_unlock`, or :ref:`MPI_Win_unlock_all` can be used. + + +NOTES +----- + +The *target_datatype* argument is a handle to a datatype object that is +defined at the origin process, even though it defines a data layout in +the target process memory. This does not cause problems in a homogeneous +or heterogeneous environment, as long as only portable data types are +used (portable data types are defined in Section 2.4 of the MPI-2 +Standard). + +The performance of a put transfer can be significantly affected, on some +systems, from the choice of window location and the shape and location +of the origin and target buffer: Transfers to a target window in memory +allocated by :ref:`MPI_Alloc_mem` may be much faster on shared memory systems; +transfers from contiguous buffers will be faster on most, if not all, +systems; the alignment of the communication buffers may also impact +performance. + + +FORTRAN 77 NOTES +---------------- + +The MPI standard prescribes portable Fortran syntax for the +*TARGET_DISP* argument only for Fortran 90. FORTRAN 77 users may use the +non-portable syntax + +:: + + INTEGER*MPI_ADDRESS_KIND TARGET_DISP + +where MPI_ADDRESS_KIND is a constant defined in mpif.h and gives the +length of the declared integer in bytes. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. + + +.. seealso:: + :ref:`MPI_Get` :ref:`MPI_Rget` :ref:`MPI_Accumulate` :ref:`MPI_Win_flush` :ref:`MPI_Win_flush_all` + :ref:`MPI_Win_unlock` :ref:`MPI_Win_unlock_all` diff --git a/docs/man-openmpi/man3/MPI_Query_thread.3.rst b/docs/man-openmpi/man3/MPI_Query_thread.3.rst new file mode 100644 index 00000000000..b075f5f104c --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Query_thread.3.rst @@ -0,0 +1,103 @@ +.. _mpi_query_thread: + + +MPI_Query_thread +================ + +.. include_body + +:ref:`MPI_Query_thread` - Returns the current level of thread support + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Query_thread(int *provided) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_QUERY_THREAD(PROVIDED, IERROR) + INTEGER PROVIDED, IERROR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Query_thread(provided, ierror) + INTEGER, INTENT(OUT) :: provided + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +OUTPUT PARAMETERS +----------------- +* ``provided``: C/Fortran only: Level of thread support (integer). +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +This routine returns in *provided* the current level of thread support. +If MPI was initialized by a call to :ref:`MPI_Init_thread`, *provided* will +have the same value as was returned by that function. + +The possible values of *provided* are as follows: + +MPI_THREAD_SINGLE + Only one thread may execute. + +MPI_THREAD_FUNNELED + If the process is multithreaded, only the thread that called + MPI_Init[_thread] may make MPI calls. + +MPI_THREAD_SERIALIZED + If the process is multithreaded, only one thread may make MPI library + calls at one time. + +MPI_THREAD_MULTIPLE + If the process is multithreaded, multiple threads may call MPI at + once with no restrictions. + + +NOTES +----- + +In Open MPI, *provided* is always MPI_THREAD_SINGLE, unless the program +has been linked with the multithreaded library, in which case *provided* +is MPI_THREAD_MULTIPLE. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. + +See the MPI man page for a full list of MPI error codes. + + +.. seealso:: + :ref:`MPI_Init` :ref:`MPI_Init_thread` diff --git a/docs/man-openmpi/man3/MPI_Raccumulate.3.rst b/docs/man-openmpi/man3/MPI_Raccumulate.3.rst new file mode 100644 index 00000000000..f3698b51eec --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Raccumulate.3.rst @@ -0,0 +1,9 @@ +.. _mpi_raccumulate: + +MPI_Raccumulate +=============== + .. include_body + +.. include:: ../man3/MPI_Accumulate.3.rst + :start-after: .. include_body + diff --git a/docs/man-openmpi/man3/MPI_Recv.3.rst b/docs/man-openmpi/man3/MPI_Recv.3.rst new file mode 100644 index 00000000000..3959d70519b --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Recv.3.rst @@ -0,0 +1,159 @@ +.. _mpi_recv: + + +MPI_Recv +======== + +.. include_body + +:ref:`MPI_Recv` - Performs a standard-mode blocking receive. + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Recv(void *buf, int count, MPI_Datatype datatype, + int source, int tag, MPI_Comm comm, MPI_Status *status) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_RECV(BUF, COUNT, DATATYPE, SOURCE, TAG, COMM, STATUS, IERROR) + BUF(*) + INTEGER COUNT, DATATYPE, SOURCE, TAG, COMM + INTEGER STATUS(MPI_STATUS_SIZE), IERROR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Recv(buf, count, datatype, source, tag, comm, status, ierror) + TYPE(*), DIMENSION(..) :: buf + INTEGER, INTENT(IN) :: count, source, tag + TYPE(MPI_Datatype), INTENT(IN) :: datatype + TYPE(MPI_Comm), INTENT(IN) :: comm + TYPE(MPI_Status) :: status + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETERS +---------------- +* ``count``: Maximum number of elements to receive (integer). +* ``datatype``: Datatype of each receive buffer entry (handle). +* ``source``: Rank of source (integer). +* ``tag``: Message tag (integer). +* ``comm``: Communicator (handle). + +OUTPUT PARAMETERS +----------------- +* ``buf``: Initial address of receive buffer (choice). +* ``status``: Status object (status). +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +This basic receive operation, :ref:`MPI_Recv`, is blocking: it returns only +after the receive buffer contains the newly received message. A receive +can complete before the matching send has completed (of course, it can +complete only after the matching send has started). + +The blocking semantics of this call are described in Section 3.4 of the +MPI-1 Standard, "Communication Modes." + +The receive buffer contains a number (defined by the value of *count*) +of consecutive elements. The first element in the set of elements is +located at *address_buf*. The type of each of these elements is +specified by *datatype*. + +The length of the received message must be less than or equal to the +length of the receive buffer. An MPI_ERR_TRUNCATE is returned upon the +overflow condition. + +If a message that is shorter than the length of the receive buffer +arrives, then only those locations corresponding to the (shorter) +received message are modified. + + +NOTES +----- + +The *count* argument indicates the maximum number of entries of type +*datatype* that can be received in a message. Once a message is +received, use the :ref:`MPI_Get_count` function to determine the actual number +of entries within that message. + +To receive messages of unknown length, use the :ref:`MPI_Probe` function. (For +more information about :ref:`MPI_Probe` and :ref:`MPI_Cancel`, see their respective +man pages; also, see Section 3.8 of the MPI-1 Standard, "Probe and +Cancel.") + +A message can be received by a receive operation only if it is addressed +to the receiving process, and if its source, tag, and communicator +(comm) values match the source, tag, and comm values specified by the +receive operation. The receive operation may specify a wildcard value +for source and/or tag, indicating that any source and/or tag are +acceptable. The wildcard value for source is source = MPI_ANY_SOURCE. +The wildcard value for tag is tag = MPI_ANY_TAG. There is no wildcard +value for comm. The scope of these wildcards is limited to the proceses +in the group of the specified communicator. + +The message tag is specified by the tag argument of the receive +operation. + +The argument source, if different from MPI_ANY_SOURCE, is specified as a +rank within the process group associated with that same communicator +(remote process group, for intercommunicators). Thus, the range of valid +values for the source argument is {0,...,n-1} {MPI_ANY_SOURCE}, where n +is the number of processes in this group. + +Note the asymmetry between send and receive operations: A receive +operation may accept messages from an arbitrary sender; on the other +hand, a send operation must specify a unique receiver. This matches a +"push" communication mechanism, where data transfer is effected by the +sender (rather than a "pull" mechanism, where data transfer is effected +by the receiver). + +Source = destination is allowed, that is, a process can send a message +to itself. However, it is not recommended for a process to send messages +to itself using the blocking send and receive operations described +above, since this may lead to deadlock. See Section 3.5 of the MPI-1 +Standard, "Semantics of Point-to-Point Communication." + +If your application does not need to examine the *status* field, you can +save resources by using the predefined constant MPI_STATUS_IGNORE as a +special value for the *status* argument. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. + + +.. seealso:: + :ref:`MPI_Irecv` :ref:`MPI_Probe` diff --git a/docs/man-openmpi/man3/MPI_Recv_init.3.rst b/docs/man-openmpi/man3/MPI_Recv_init.3.rst new file mode 100644 index 00000000000..cd0dc29b4d9 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Recv_init.3.rst @@ -0,0 +1,102 @@ +.. _mpi_recv_init: + + +MPI_Recv_init +============= + +.. include_body + +:ref:`MPI_Recv_init` - Builds a handle for a receive. + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Recv_init(void *buf, int count, MPI_Datatype datatype, + int source, int tag, MPI_Comm comm, MPI_Request *request) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_RECV_INIT(BUF, COUNT, DATATYPE, SOURCE, TAG, COMM, REQUEST, + IERROR) + BUF(*) + INTEGER COUNT, DATATYPE, SOURCE, TAG, COMM, REQUEST, IERROR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Recv_init(buf, count, datatype, source, tag, comm, request, ierror) + TYPE(*), DIMENSION(..), ASYNCHRONOUS :: buf + INTEGER, INTENT(IN) :: count, source, tag + TYPE(MPI_Datatype), INTENT(IN) :: datatype + TYPE(MPI_Comm), INTENT(IN) :: comm + TYPE(MPI_Request), INTENT(OUT) :: request + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETERS +---------------- +* ``count``: Maximum number of elements to receive (integer). +* ``datatype``: Type of each entry (handle). +* ``source``: Rank of source (integer). +* ``tag``: Message tag (integer). +* ``comm``: Communicator (handle). + +INPUT/OUTPUT PARAMETER +---------------------- +* ``buf``: Initial address of receive buffer (choice). + +OUTPUT PARAMETERS +----------------- +* ``request``: Communication request (handle). +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +Creates a persistent communication request for a receive operation. The +argument *buf* is marked as OUT because the user gives permission to +write on the receive buffer by passing the argument to :ref:`MPI_Recv_init`. + +A persistent communication request is inactive after it is created -- no +active communication is attached to the request. + +A communication (send or receive) that uses a persistent request is +initiated by the function :ref:`MPI_Start` or :ref:`MPI_Startall`. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. + + +.. seealso:: + :ref:`MPI_Bsend_init` :ref:`MPI_Rsend_init` :ref:`MPI_Send_init` MPI_Sssend_init :ref:`MPI_Start` + :ref:`MPI_Startall` :ref:`MPI_Request_free` diff --git a/docs/man-openmpi/man3/MPI_Reduce.3.rst b/docs/man-openmpi/man3/MPI_Reduce.3.rst new file mode 100644 index 00000000000..5d66aab0b75 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Reduce.3.rst @@ -0,0 +1,565 @@ +.. _mpi_reduce: + + +MPI_Reduce +========== + +.. include_body + +:ref:`MPI_Reduce`, :ref:`MPI_Ireduce`, :ref:`MPI_Reduce_init` - Reduces values on all +processes within a group. + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Reduce(const void *sendbuf, void *recvbuf, int count, + MPI_Datatype datatype, MPI_Op op, int root, + MPI_Comm comm) + + int MPI_Ireduce(const void *sendbuf, void *recvbuf, int count, + MPI_Datatype datatype, MPI_Op op, int root, + MPI_Comm comm, MPI_Request *request) + + + int MPI_Reduce_init(const void *sendbuf, void *recvbuf, int count, + MPI_Datatype datatype, MPI_Op op, int root, + MPI_Comm comm, MPI_Info info, MPI_Request *request) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_REDUCE(SENDBUF, RECVBUF, COUNT, DATATYPE, OP, ROOT, COMM, + IERROR) + SENDBUF(*), RECVBUF(*) + INTEGER COUNT, DATATYPE, OP, ROOT, COMM, IERROR + + MPI_IREDUCE(SENDBUF, RECVBUF, COUNT, DATATYPE, OP, ROOT, COMM, + REQUEST, IERROR) + SENDBUF(*), RECVBUF(*) + INTEGER COUNT, DATATYPE, OP, ROOT, COMM, REQUEST, IERROR + + MPI_REDUCE_INIT(SENDBUF, RECVBUF, COUNT, DATATYPE, OP, ROOT, COMM, + INFO, REQUEST, IERROR) + SENDBUF(*), RECVBUF(*) + INTEGER COUNT, DATATYPE, OP, ROOT, COMM, INFO, REQUEST, IERROR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Reduce(sendbuf, recvbuf, count, datatype, op, root, comm, ierror) + TYPE(*), DIMENSION(..), INTENT(IN) :: sendbuf + TYPE(*), DIMENSION(..) :: recvbuf + INTEGER, INTENT(IN) :: count, root + TYPE(MPI_Datatype), INTENT(IN) :: datatype + TYPE(MPI_Op), INTENT(IN) :: op + TYPE(MPI_Comm), INTENT(IN) :: comm + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + MPI_Ireduce(sendbuf, recvbuf, count, datatype, op, root, comm, request, + ierror) + TYPE(*), DIMENSION(..), INTENT(IN), ASYNCHRONOUS :: sendbuf + TYPE(*), DIMENSION(..), ASYNCHRONOUS :: recvbuf + INTEGER, INTENT(IN) :: count, root + TYPE(MPI_Datatype), INTENT(IN) :: datatype + TYPE(MPI_Op), INTENT(IN) :: op + TYPE(MPI_Comm), INTENT(IN) :: comm + TYPE(MPI_Request), INTENT(OUT) :: request + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + + MPI_Reduce_init(sendbuf, recvbuf, count, datatype, op, root, comm, info, request, + ierror) + TYPE(*), DIMENSION(..), INTENT(IN), ASYNCHRONOUS :: sendbuf + TYPE(*), DIMENSION(..), ASYNCHRONOUS :: recvbuf + INTEGER, INTENT(IN) :: count, root + TYPE(MPI_Datatype), INTENT(IN) :: datatype + TYPE(MPI_Op), INTENT(IN) :: op + TYPE(MPI_Comm), INTENT(IN) :: comm + TYPE(MPI_Info), INTENT(IN) :: info + TYPE(MPI_Request), INTENT(OUT) :: request + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETERS +---------------- +* ``sendbuf``: Address of send buffer (choice). +* ``count``: Number of elements in send buffer (integer). +* ``datatype``: Data type of elements of send buffer (handle). +* ``op``: Reduce operation (handle). +* ``root``: Rank of root process (integer). +* ``comm``: Communicator (handle). +* ``info``: Info (handle, persistent). + +OUTPUT PARAMETERS +----------------- +* ``recvbuf``: Address of receive buffer (choice, significant only at root). +* ``request``: Request (handle, non-blocking only). +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +The global reduce functions (:ref:`MPI_Reduce`, :ref:`MPI_Op_create`, :ref:`MPI_Op_free`, +:ref:`MPI_Allreduce`, :ref:`MPI_Reduce_scatter`, MPI_Scan) perform a global reduce +operation (such as sum, max, logical AND, etc.) across all the members +of a group. The reduction operation can be either one of a predefined +list of operations, or a user-defined operation. The global reduction +functions come in several flavors: a reduce that returns the result of +the reduction at one node, an all-reduce that returns this result at all +nodes, and a scan (parallel prefix) operation. In addition, a +reduce-scatter operation combines the functionality of a reduce and a +scatter operation. + +:ref:`MPI_Reduce` combines the elements provided in the input buffer of each +process in the group, using the operation op, and returns the combined +value in the output buffer of the process with rank root. The input +buffer is defined by the arguments sendbuf, count, and datatype; the +output buffer is defined by the arguments recvbuf, count, and datatype; +both have the same number of elements, with the same type. The routine +is called by all group members using the same arguments for count, +datatype, op, root, and comm. Thus, all processes provide input buffers +and output buffers of the same length, with elements of the same type. +Each process can provide one element, or a sequence of elements, in +which case the combine operation is executed element-wise on each entry +of the sequence. For example, if the operation is MPI_MAX and the send +buffer contains two elements that are floating-point numbers (count = 2 +and datatype = MPI_FLOAT), then recvbuf(1) = global max (sendbuf(1)) and +recvbuf(2) = global max(sendbuf(2)). + + +USE OF IN-PLACE OPTION +---------------------- + +When the communicator is an intracommunicator, you can perform a reduce +operation in-place (the output buffer is used as the input buffer). Use +the variable MPI_IN_PLACE as the value of the root process *sendbuf*. In +this case, the input data is taken at the root from the receive buffer, +where it will be replaced by the output data. + +Note that MPI_IN_PLACE is a special kind of value; it has the same +restrictions on its use as MPI_BOTTOM. + +Because the in-place option converts the receive buffer into a +send-and-receive buffer, a Fortran binding that includes INTENT must +mark these as INOUT, not OUT. + + +WHEN COMMUNICATOR IS AN INTER-COMMUNICATOR +------------------------------------------ + +When the communicator is an inter-communicator, the root process in the +first group combines data from all the processes in the second group and +then performs the *op* operation. The first group defines the root +process. That process uses MPI_ROOT as the value of its *root* argument. +The remaining processes use MPI_PROC_NULL as the value of their *root* +argument. All processes in the second group use the rank of that root +process in the first group as the value of their *root* argument. Only +the send buffer arguments are significant in the second group, and only +the receive buffer arguments are significant in the root process of the +first group. + + +PREDEFINED REDUCE OPERATIONS +---------------------------- + +The set of predefined operations provided by MPI is listed below +(Predefined Reduce Operations). That section also enumerates the +datatypes each operation can be applied to. In addition, users may +define their own operations that can be overloaded to operate on several +datatypes, either basic or derived. This is further explained in the +description of the user-defined operations (see the man pages for +:ref:`MPI_Op_create` and MPI_Op_free). + +The operation op is always assumed to be associative. All predefined +operations are also assumed to be commutative. Users may define +operations that are assumed to be associative, but not commutative. The +\``canonical'' evaluation order of a reduction is determined by the +ranks of the processes in the group. However, the implementation can +take advantage of associativity, or associativity and commutativity, in +order to change the order of evaluation. This may change the result of +the reduction for operations that are not strictly associative and +commutative, such as floating point addition. + +Predefined operators work only with the MPI types listed below +(Predefined Reduce Operations, and the section MINLOC and MAXLOC, +below). User-defined operators may operate on general, derived +datatypes. In this case, each argument that the reduce operation is +applied to is one element described by such a datatype, which may +contain several basic values. This is further explained in Section 4.9.4 +of the MPI Standard, "User-Defined Operations." + +The following predefined operations are supplied for :ref:`MPI_Reduce` and +related functions :ref:`MPI_Allreduce`, :ref:`MPI_Reduce_scatter`, and :ref:`MPI_Scan`. These +operations are invoked by placing the following in op: + +:: + + Name Meaning + --------- -------------------- + MPI_MAX maximum + MPI_MIN minimum + MPI_SUM sum + MPI_PROD product + MPI_LAND logical and + MPI_BAND bit-wise and + MPI_LOR logical or + MPI_BOR bit-wise or + MPI_LXOR logical xor + MPI_BXOR bit-wise xor + MPI_MAXLOC max value and location + MPI_MINLOC min value and location + +The two operations MPI_MINLOC and MPI_MAXLOC are discussed separately +below (MINLOC and MAXLOC). For the other predefined operations, we +enumerate below the allowed combinations of op and datatype arguments. +First, define groups of MPI basic datatypes in the following way: + +.. code-block:: c + + C integer: MPI_INT, MPI_LONG, MPI_SHORT, + MPI_UNSIGNED_SHORT, MPI_UNSIGNED, + MPI_UNSIGNED_LONG + Fortran integer: MPI_INTEGER + Floating-point: MPI_FLOAT, MPI_DOUBLE, MPI_REAL, + MPI_DOUBLE_PRECISION, MPI_LONG_DOUBLE + Logical: MPI_LOGICAL + Complex: MPI_COMPLEX + Byte: MPI_BYTE + +Now, the valid datatypes for each option is specified below. + +:: + + Op Allowed Types + ---------------- --------------------------- + MPI_MAX, MPI_MIN C integer, Fortran integer, + floating-point + + MPI_SUM, MPI_PROD C integer, Fortran integer, + floating-point, complex + + MPI_LAND, MPI_LOR, C integer, logical + MPI_LXOR + + MPI_BAND, MPI_BOR, C integer, Fortran integer, byte + MPI_BXOR + +**Example 1:** A routine that computes the dot product of two vectors +that are distributed across a group of processes and returns the answer +at process zero. + +:: + + SUBROUTINE PAR_BLAS1(m, a, b, c, comm) + REAL a(m), b(m) ! local slice of array + REAL c ! result (at process zero) + REAL sum + INTEGER m, comm, i, ierr + + ! local sum + sum = 0.0 + DO i = 1, m + sum = sum + a(i)*b(i) + END DO + + ! global sum + CALL MPI_REDUCE(sum, c, 1, MPI_REAL, MPI_SUM, 0, comm, ierr) + RETURN + +**Example 2:** A routine that computes the product of a vector and an +array that are distributed across a group of processes and returns the +answer at process zero. + +:: + + SUBROUTINE PAR_BLAS2(m, n, a, b, c, comm) + REAL a(m), b(m,n) ! local slice of array + REAL c(n) ! result + REAL sum(n) + INTEGER n, comm, i, j, ierr + + ! local sum + DO j= 1, n + sum(j) = 0.0 + DO i = 1, m + sum(j) = sum(j) + a(i)*b(i,j) + END DO + END DO + + ! global sum + CALL MPI_REDUCE(sum, c, n, MPI_REAL, MPI_SUM, 0, comm, ierr) + + ! return result at process zero (and garbage at the other nodes) + RETURN + + +MINLOC AND MAXLOC +----------------- + +The operator MPI_MINLOC is used to compute a global minimum and also an +index attached to the minimum value. MPI_MAXLOC similarly computes a +global maximum and index. One application of these is to compute a +global minimum (maximum) and the rank of the process containing this +value. + +The operation that defines MPI_MAXLOC is + +.. code-block:: c + + ( u ) ( v ) ( w ) + ( ) o ( ) = ( ) + ( i ) ( j ) ( k ) + + where + + w = max(u, v) + + and + + ( i if u > v + ( + k = ( min(i, j) if u = v + ( + ( j if u < v) + + + MPI_MINLOC is defined similarly: + + ( u ) ( v ) ( w ) + ( ) o ( ) = ( ) + ( i ) ( j ) ( k ) + + where + + w = min(u, v) + + and + + ( i if u < v + ( + k = ( min(i, j) if u = v + ( + ( j if u > v) + +Both operations are associative and commutative. Note that if MPI_MAXLOC +is applied to reduce a sequence of pairs (u(0), 0), (u(1), 1), ..., +(u(n-1), n-1), then the value returned is (u , r), where u= max(i) u(i) +and r is the index of the first global maximum in the sequence. Thus, if +each process supplies a value and its rank within the group, then a +reduce operation with op = MPI_MAXLOC will return the maximum value and +the rank of the first process with that value. Similarly, MPI_MINLOC can +be used to return a minimum and its index. More generally, MPI_MINLOC +computes a lexicographic minimum, where elements are ordered according +to the first component of each pair, and ties are resolved according to +the second component. + +The reduce operation is defined to operate on arguments that consist of +a pair: value and index. For both Fortran and C, types are provided to +describe the pair. The potentially mixed-type nature of such arguments +is a problem in Fortran. The problem is circumvented, for Fortran, by +having the MPI-provided type consist of a pair of the same type as +value, and coercing the index to this type also. In C, the MPI-provided +pair type has distinct types and the index is an int. + +In order to use MPI_MINLOC and MPI_MAXLOC in a reduce operation, one +must provide a datatype argument that represents a pair (value and +index). MPI provides nine such predefined datatypes. The operations +MPI_MAXLOC and MPI_MINLOC can be used with each of the following +datatypes: + +:: + + Fortran: + Name Description + MPI_2REAL pair of REALs + MPI_2DOUBLE_PRECISION pair of DOUBLE-PRECISION variables + MPI_2INTEGER pair of INTEGERs + + C: + Name Description + MPI_FLOAT_INT float and int + MPI_DOUBLE_INT double and int + MPI_LONG_INT long and int + MPI_2INT pair of ints + MPI_SHORT_INT short and int + MPI_LONG_DOUBLE_INT long double and int + +The data type MPI_2REAL is equivalent to: + +:: + + MPI_TYPE_CONTIGUOUS(2, MPI_REAL, MPI_2REAL) + +Similar statements apply for MPI_2INTEGER, MPI_2DOUBLE_PRECISION, and +MPI_2INT. + +The datatype MPI_FLOAT_INT is as if defined by the following sequence of +instructions. + +:: + + type[0] = MPI_FLOAT + type[1] = MPI_INT + disp[0] = 0 + disp[1] = sizeof(float) + block[0] = 1 + block[1] = 1 + MPI_TYPE_STRUCT(2, block, disp, type, MPI_FLOAT_INT) + +Similar statements apply for MPI_LONG_INT and MPI_DOUBLE_INT. + +**Example 3:** Each process has an array of 30 doubles, in C. For each +of the 30 locations, compute the value and rank of the process +containing the largest value. + +:: + + ... + /* each process has an array of 30 double: ain[30] + */ + double ain[30], aout[30]; + int ind[30]; + struct { + double val; + int rank; + } in[30], out[30]; + int i, myrank, root; + + MPI_Comm_rank(MPI_COMM_WORLD, &myrank); + for (i=0; i<30; ++i) { + in[i].val = ain[i]; + in[i].rank = myrank; + } + MPI_Reduce( in, out, 30, MPI_DOUBLE_INT, MPI_MAXLOC, root, comm ); + /* At this point, the answer resides on process root + */ + if (myrank == root) { + /* read ranks out + */ + for (i=0; i<30; ++i) { + aout[i] = out[i].val; + ind[i] = out[i].rank; + } + } + +**Example 4:** Same example, in Fortran. + +.. code-block:: fortran + + ... + ! each process has an array of 30 double: ain(30) + + DOUBLE PRECISION ain(30), aout(30) + INTEGER ind(30); + DOUBLE PRECISION in(2,30), out(2,30) + INTEGER i, myrank, root, ierr; + + MPI_COMM_RANK(MPI_COMM_WORLD, myrank); + DO I=1, 30 + in(1,i) = ain(i) + in(2,i) = myrank ! myrank is coerced to a double + END DO + + MPI_REDUCE( in, out, 30, MPI_2DOUBLE_PRECISION, MPI_MAXLOC, root, + comm, ierr ); + ! At this point, the answer resides on process root + + IF (myrank .EQ. root) THEN + ! read ranks out + DO I= 1, 30 + aout(i) = out(1,i) + ind(i) = out(2,i) ! rank is coerced back to an integer + END DO + END IF + +**Example 5:** Each process has a nonempty array of values. Find the +minimum global value, the rank of the process that holds it, and its +index on this process. + +:: + + #define LEN 1000 + + float val[LEN]; /* local array of values */ + int count; /* local number of values */ + int myrank, minrank, minindex; + float minval; + + struct { + float value; + int index; + } in, out; + + /* local minloc */ + in.value = val[0]; + in.index = 0; + for (i=1; i < count; i++) + if (in.value > val[i]) { + in.value = val[i]; + in.index = i; + } + + /* global minloc */ + MPI_Comm_rank(MPI_COMM_WORLD, &myrank); + in.index = myrank*LEN + in.index; + MPI_Reduce( in, out, 1, MPI_FLOAT_INT, MPI_MINLOC, root, comm ); + /* At this point, the answer resides on process root + */ + if (myrank == root) { + /* read answer out + */ + minval = out.value; + minrank = out.index / LEN; + minindex = out.index % LEN; + +All MPI objects (e.g., MPI_Datatype, MPI_Comm) are of type INTEGER in +Fortran. + + +NOTES ON COLLECTIVE OPERATIONS +------------------------------ + +The reduction functions ( MPI_Op ) do not return an error value. As a +result, if the functions detect an error, all they can do is either call +:ref:`MPI_Abort` or silently skip the problem. Thus, if you change the error +handler from MPI_ERRORS_ARE_FATAL to something else, for example, +MPI_ERRORS_RETURN , then no error may be indicated. + +The reason for this is the performance problems in ensuring that all +collective routines return the same error value. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. + + +.. seealso:: + :ref:`MPI_Allreduce` :ref:`MPI_Reduce_scatter` :ref:`MPI_Scan` :ref:`MPI_Op_create` :ref:`MPI_Op_free` diff --git a/docs/man-openmpi/man3/MPI_Reduce_init.3.rst b/docs/man-openmpi/man3/MPI_Reduce_init.3.rst new file mode 100644 index 00000000000..043ad928ad0 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Reduce_init.3.rst @@ -0,0 +1,9 @@ +.. _mpi_reduce_init: + +MPI_Reduce_init +=============== + .. include_body + +.. include:: ../man3/MPI_Reduce.3.rst + :start-after: .. include_body + diff --git a/docs/man-openmpi/man3/MPI_Reduce_local.3.rst b/docs/man-openmpi/man3/MPI_Reduce_local.3.rst new file mode 100644 index 00000000000..6b0de5f5316 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Reduce_local.3.rst @@ -0,0 +1,334 @@ +.. _mpi_reduce_local: + + +MPI_Reduce_local +================ + +.. include_body + +:ref:`MPI_Reduce_local` - Perform a local reduction + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Reduce_local(const void *inbuf, void *inoutbuf, int count, + MPI_Datatype datatype, MPI_Op op) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_REDUCE_LOCAL(INBUF, INOUTBUF, COUNT, DATATYPE, OP, IERROR) + INBUF(*), INOUTBUF(*) + INTEGER COUNT, DATATYPE, OP, IERROR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Reduce_local(inbuf, inoutbuf, count, datatype, op, ierror) + TYPE(*), DIMENSION(..), INTENT(IN) :: inbuf + TYPE(*), DIMENSION(..) :: inoutbuf + INTEGER, INTENT(IN) :: count + TYPE(MPI_Datatype), INTENT(IN) :: datatype + TYPE(MPI_Op), INTENT(IN) :: op + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETERS +---------------- +* ``inbuf``: Address of input buffer (choice). +* ``count``: Number of elements in input buffer (integer). +* ``datatype``: Data type of elements of input buffer (handle). +* ``op``: Reduce operation (handle). + +OUTPUT PARAMETERS +----------------- +* ``inoutbuf``: Address of in/out buffer (choice). +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +The global reduce functions (:ref:`MPI_Reduce_local`, :ref:`MPI_Op_create`, +:ref:`MPI_Op_free`, :ref:`MPI_Allreduce`, MPI_Reduce_local_scatter, MPI_Scan) perform +a global reduce operation (such as sum, max, logical AND, etc.) across +all the members of a group. The reduction operation can be either one of +a predefined list of operations, or a user-defined operation. The global +reduction functions come in several flavors: a reduce that returns the +result of the reduction at one node, an all-reduce that returns this +result at all nodes, and a scan (parallel prefix) operation. In +addition, a reduce-scatter operation combines the functionality of a +reduce and a scatter operation. + +:ref:`MPI_Reduce_local` combines the elements provided in the input and +input/output buffers of the local process, using the operation op, and +returns the combined value in the inout/output buffer. The input buffer +is defined by the arguments inbuf, count, and datatype; the output +buffer is defined by the arguments inoutbuf, count, and datatype; both +have the same number of elements, with the same type. The routine is a +local call. The process can provide one element, or a sequence of +elements, in which case the combine operation is executed element-wise +on each entry of the sequence. For example, if the operation is MPI_MAX +and the input buffer contains two elements that are floating-point +numbers (count = 2 and datatype = MPI_FLOAT), then inoutbuf(1) = global +max (inbuf(1)) and inoutbuf(2) = global max(inbuf(2)). + + +USE OF IN-PLACE OPTION +---------------------- + +The use of MPI_IN_PLACE is disallowed with :ref:`MPI_Reduce_local`. + + +PREDEFINED REDUCE OPERATIONS +---------------------------- + +The set of predefined operations provided by MPI is listed below +(Predefined Reduce Operations). That section also enumerates the +datatypes each operation can be applied to. In addition, users may +define their own operations that can be overloaded to operate on several +datatypes, either basic or derived. This is further explained in the +description of the user-defined operations (see the man pages for +:ref:`MPI_Op_create` and MPI_Op_free). + +The operation op is always assumed to be associative. All predefined +operations are also assumed to be commutative. Users may define +operations that are assumed to be associative, but not commutative. The +\``canonical'' evaluation order of a reduction is determined by the +ranks of the processes in the group. However, the implementation can +take advantage of associativity, or associativity and commutativity, in +order to change the order of evaluation. This may change the result of +the reduction for operations that are not strictly associative and +commutative, such as floating point addition. + +Predefined operators work only with the MPI types listed below +(Predefined Reduce Operations, and the section MINLOC and MAXLOC, +below). User-defined operators may operate on general, derived +datatypes. In this case, each argument that the reduce operation is +applied to is one element described by such a datatype, which may +contain several basic values. This is further explained in Section 4.9.4 +of the MPI Standard, "User-Defined Operations." + +The following predefined operations are supplied for :ref:`MPI_Reduce_local` +and related functions :ref:`MPI_Allreduce`, :ref:`MPI_Reduce_scatter`, and :ref:`MPI_Scan`. +These operations are invoked by placing the following in op: + +:: + + Name Meaning + --------- -------------------- + MPI_MAX maximum + MPI_MIN minimum + MPI_SUM sum + MPI_PROD product + MPI_LAND logical and + MPI_BAND bit-wise and + MPI_LOR logical or + MPI_BOR bit-wise or + MPI_LXOR logical xor + MPI_BXOR bit-wise xor + MPI_MAXLOC max value and location + MPI_MINLOC min value and location + +The two operations MPI_MINLOC and MPI_MAXLOC are discussed separately +below (MINLOC and MAXLOC). For the other predefined operations, we +enumerate below the allowed combinations of op and datatype arguments. +First, define groups of MPI basic datatypes in the following way: + +.. code-block:: c + + C integer: MPI_INT, MPI_LONG, MPI_SHORT, + MPI_UNSIGNED_SHORT, MPI_UNSIGNED, + MPI_UNSIGNED_LONG + Fortran integer: MPI_INTEGER + Floating-point: MPI_FLOAT, MPI_DOUBLE, MPI_REAL, + MPI_DOUBLE_PRECISION, MPI_LONG_DOUBLE + Logical: MPI_LOGICAL + Complex: MPI_COMPLEX + Byte: MPI_BYTE + +Now, the valid datatypes for each option is specified below. + +:: + + Op Allowed Types + ---------------- --------------------------- + MPI_MAX, MPI_MIN C integer, Fortran integer, + floating-point + + MPI_SUM, MPI_PROD C integer, Fortran integer, + floating-point, complex + + MPI_LAND, MPI_LOR, C integer, logical + MPI_LXOR + + MPI_BAND, MPI_BOR, C integer, Fortran integer, byte + MPI_BXOR + + +MINLOC AND MAXLOC +----------------- + +The operator MPI_MINLOC is used to compute a global minimum and also an +index attached to the minimum value. MPI_MAXLOC similarly computes a +global maximum and index. One application of these is to compute a +global minimum (maximum) and the rank of the process containing this +value. + +The operation that defines MPI_MAXLOC is + +.. code-block:: c + + ( u ) ( v ) ( w ) + ( ) o ( ) = ( ) + ( i ) ( j ) ( k ) + + where + + w = max(u, v) + + and + + ( i if u > v + ( + k = ( min(i, j) if u = v + ( + ( j if u < v) + + + MPI_MINLOC is defined similarly: + + ( u ) ( v ) ( w ) + ( ) o ( ) = ( ) + ( i ) ( j ) ( k ) + + where + + w = min(u, v) + + and + + ( i if u < v + ( + k = ( min(i, j) if u = v + ( + ( j if u > v) + +Both operations are associative and commutative. Note that if MPI_MAXLOC +is applied to reduce a sequence of pairs (u(0), 0), (u(1), 1), ..., +(u(n-1), n-1), then the value returned is (u , r), where u= max(i) u(i) +and r is the index of the first global maximum in the sequence. Thus, if +each process supplies a value and its rank within the group, then a +reduce operation with op = MPI_MAXLOC will return the maximum value and +the rank of the first process with that value. Similarly, MPI_MINLOC can +be used to return a minimum and its index. More generally, MPI_MINLOC +computes a lexicographic minimum, where elements are ordered according +to the first component of each pair, and ties are resolved according to +the second component. + +The reduce operation is defined to operate on arguments that consist of +a pair: value and index. For both Fortran and C, types are provided to +describe the pair. The potentially mixed-type nature of such arguments +is a problem in Fortran. The problem is circumvented, for Fortran, by +having the MPI-provided type consist of a pair of the same type as +value, and coercing the index to this type also. In C, the MPI-provided +pair type has distinct types and the index is an int. + +In order to use MPI_MINLOC and MPI_MAXLOC in a reduce operation, one +must provide a datatype argument that represents a pair (value and +index). MPI provides nine such predefined datatypes. The operations +MPI_MAXLOC and MPI_MINLOC can be used with each of the following +datatypes: + +:: + + Fortran: + Name Description + MPI_2REAL pair of REALs + MPI_2DOUBLE_PRECISION pair of DOUBLE-PRECISION variables + MPI_2INTEGER pair of INTEGERs + + C: + Name Description + MPI_FLOAT_INT float and int + MPI_DOUBLE_INT double and int + MPI_LONG_INT long and int + MPI_2INT pair of ints + MPI_SHORT_INT short and int + MPI_LONG_DOUBLE_INT long double and int + +The data type MPI_2REAL is equivalent to: + +:: + + MPI_TYPE_CONTIGUOUS(2, MPI_REAL, MPI_2REAL) + +Similar statements apply for MPI_2INTEGER, MPI_2DOUBLE_PRECISION, and +MPI_2INT. + +The datatype MPI_FLOAT_INT is as if defined by the following sequence of +instructions. + +:: + + type[0] = MPI_FLOAT + type[1] = MPI_INT + disp[0] = 0 + disp[1] = sizeof(float) + block[0] = 1 + block[1] = 1 + MPI_TYPE_STRUCT(2, block, disp, type, MPI_FLOAT_INT) + +Similar statements apply for MPI_LONG_INT and MPI_DOUBLE_INT. + +All MPI objects (e.g., MPI_Datatype, MPI_Comm) are of type INTEGER in +Fortran. + + +NOTES ON COLLECTIVE OPERATIONS +------------------------------ + +The reduction operators ( MPI_Op ) do not return an error value. As a +result, if the functions detect an error, all they can do is either call +:ref:`MPI_Abort` or silently skip the problem. Thus, if you change the error +handler from MPI_ERRORS_ARE_FATAL to something else, for example, +MPI_ERRORS_RETURN , then no error may be indicated. + +The reason for this is the performance problems in ensuring that all +collective routines return the same error value. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. + + +.. seealso:: + :ref:`MPI_Allreduce` :ref:`MPI_Reduce` :ref:`MPI_Reduce_scatter` :ref:`MPI_Scan` :ref:`MPI_Op_create` + :ref:`MPI_Op_free` diff --git a/docs/man-openmpi/man3/MPI_Reduce_scatter.3.rst b/docs/man-openmpi/man3/MPI_Reduce_scatter.3.rst new file mode 100644 index 00000000000..f5c83693e81 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Reduce_scatter.3.rst @@ -0,0 +1,174 @@ +.. _mpi_reduce_scatter: + + +MPI_Reduce_scatter +================== + +.. include_body + +:ref:`MPI_Reduce_scatter`, :ref:`MPI_Ireduce_scatter`, :ref:`MPI_Reduce_scatter_init` - +Combines values and scatters the results. + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Reduce_scatter(const void *sendbuf, void *recvbuf, const int recvcounts[], + MPI_Datatype datatype, MPI_Op op, MPI_Comm comm) + + int MPI_Ireduce_scatter(const void *sendbuf, void *recvbuf, const int recvcounts[], + MPI_Datatype datatype, MPI_Op op, MPI_Comm comm, MPI_Request *request) + +int MPI_Reduce_scatter_init(const void \*\ *sendbuf*, void\ *\*recvbuf*, +const int\ *recvcounts*\ [], MPI_Datatype\ *datatype*, MPI_Op\ *op*, +MPI_Comm\ *comm*, MPI_Info *info*, MPI_Request *\*request*) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_REDUCE_SCATTER(SENDBUF, RECVBUF, RECVCOUNTS, DATATYPE, OP, + COMM, IERROR) + SENDBUF(*), RECVBUF(*) + INTEGER RECVCOUNTS(*), DATATYPE, OP, COMM, IERROR + + MPI_IREDUCE_SCATTER(SENDBUF, RECVBUF, RECVCOUNTS, DATATYPE, OP, + COMM, REQUEST, IERROR) + SENDBUF(*), RECVBUF(*) + INTEGER RECVCOUNTS(*), DATATYPE, OP, COMM, REQUEST, IERROR + + MPI_REDUCE_SCATTER_INIT(SENDBUF, RECVBUF, RECVCOUNTS, DATATYPE, OP, + COMM, INFO, REQUEST, IERROR) + SENDBUF(*), RECVBUF(*) + INTEGER RECVCOUNTS(*), DATATYPE, OP, COMM, INFO, REQUEST, IERROR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Reduce_scatter(sendbuf, recvbuf, recvcounts, datatype, op, comm, + ierror) + TYPE(*), DIMENSION(..), INTENT(IN) :: sendbuf + TYPE(*), DIMENSION(..) :: recvbuf + INTEGER, INTENT(IN) :: recvcounts(*) + TYPE(MPI_Datatype), INTENT(IN) :: datatype + TYPE(MPI_Op), INTENT(IN) :: op + TYPE(MPI_Comm), INTENT(IN) :: comm + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + MPI_Ireduce_scatter(sendbuf, recvbuf, recvcounts, datatype, op, comm, + request, ierror) + TYPE(*), DIMENSION(..), INTENT(IN), ASYNCHRONOUS :: sendbuf + TYPE(*), DIMENSION(..), ASYNCHRONOUS :: recvbuf + INTEGER, INTENT(IN), ASYNCHRONOUS :: recvcounts(*) + TYPE(MPI_Datatype), INTENT(IN) :: datatype + TYPE(MPI_Op), INTENT(IN) :: op + TYPE(MPI_Comm), INTENT(IN) :: comm + TYPE(MPI_Request), INTENT(OUT) :: request + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + MPI_Reduce_scatter_init(sendbuf, recvbuf, recvcounts, datatype, op, comm, + info, request, ierror) + TYPE(*), DIMENSION(..), INTENT(IN), ASYNCHRONOUS :: sendbuf + TYPE(*), DIMENSION(..), ASYNCHRONOUS :: recvbuf + INTEGER, INTENT(IN), ASYNCHRONOUS :: recvcounts(*) + TYPE(MPI_Datatype), INTENT(IN) :: datatype + TYPE(MPI_Op), INTENT(IN) :: op + TYPE(MPI_Comm), INTENT(IN) :: comm + TYPE(MPI_Info), INTENT(IN) :: info + TYPE(MPI_Request), INTENT(OUT) :: request + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETERS +---------------- +* ``sendbuf``: Starting address of send buffer (choice). +* ``recvcounts``: Integer array specifying the number of elements in result distributed to each process. Array must be identical on all calling processes. +* ``datatype``: Datatype of elements of input buffer (handle). +* ``op``: Operation (handle). +* ``comm``: Communicator (handle). +* ``info``: Info (handle, persistent). + +OUTPUT PARAMETERS +----------------- +* ``recvbuf``: Starting address of receive buffer (choice). +* ``request``: Request (handle, non-blocking only). +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +:ref:`MPI_Reduce_scatter` first does an element-wise reduction on vector of +*count* = S(i)\ *recvcounts*\ [i] elements in the send buffer defined by +*sendbuf*, *count*, and *datatype*. Next, the resulting vector of +results is split into n disjoint segments, where n is the number of +processes in the group. Segment i contains *recvcounts*\ [i] elements. +The ith segment is sent to process i and stored in the receive buffer +defined by *recvbuf*, *recvcounts*\ [i], and *datatype*. + + +USE OF IN-PLACE OPTION +---------------------- + +When the communicator is an intracommunicator, you can perform a +reduce-scatter operation in-place (the output buffer is used as the +input buffer). Use the variable MPI_IN_PLACE as the value of the +*sendbuf*. In this case, the input data is taken from the top of the +receive buffer. The area occupied by the input data may be either longer +or shorter than the data filled by the output data. + + +WHEN COMMUNICATOR IS AN INTER-COMMUNICATOR +------------------------------------------ + +When the communicator is an inter-communicator, the reduce-scatter +operation occurs in two phases. First, the result of the reduction +performed on the data provided by the processes in the first group is +scattered among the processes in the second group. Then the reverse +occurs: the reduction performed on the data provided by the processes in +the second group is scattered among the processes in the first group. +For each group, all processes provide the same *recvcounts* argument, +and the sum of the *recvcounts* values should be the same for both +groups. + + +NOTES ON COLLECTIVE OPERATIONS +------------------------------ + +The reduction functions ( MPI_Op ) do not return an error value. As a +result, if the functions detect an error, all they can do is either call +:ref:`MPI_Abort` or silently skip the problem. Thus, if you change the error +handler from MPI_ERRORS_ARE_FATAL to something else, for example, +MPI_ERRORS_RETURN , then no error may be indicated. + +The reason for this is the performance problems in ensuring that all +collective routines return the same error value. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. diff --git a/docs/man-openmpi/man3/MPI_Reduce_scatter_block.3.rst b/docs/man-openmpi/man3/MPI_Reduce_scatter_block.3.rst new file mode 100644 index 00000000000..03460c04e90 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Reduce_scatter_block.3.rst @@ -0,0 +1,181 @@ +.. _mpi_reduce_scatter_block: + + +MPI_Reduce_scatter_block +======================== + +.. include_body + +:ref:`MPI_Reduce_scatter_block`, :ref:`MPI_Ireduce_scatter_block`, +:ref:`MPI_Reduce_scatter_block_init` - Combines values and scatters the +results in blocks. + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Reduce_scatter_block(const void *sendbuf, void *recvbuf, int recvcount, + MPI_Datatype datatype, MPI_Op op, MPI_Comm comm) + + int MPI_Ireduce_scatter_block(const void *sendbuf, void *recvbuf, int recvcount, + MPI_Datatype datatype, MPI_Op op, MPI_Comm comm, MPI_Request *request) + + + int MPI_Reduce_scatter_block_init(const void *sendbuf, void *recvbuf, int recvcount, + MPI_Datatype datatype, MPI_Op op, MPI_Comm comm, MPI_Info info, MPI_Request *request) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_REDUCE_SCATTER_BLOCK(SENDBUF, RECVBUF, RECVCOUNT, DATATYPE, OP, + COMM, IERROR) + SENDBUF(*), RECVBUF(*) + INTEGER RECVCOUNT, DATATYPE, OP, COMM, IERROR + + MPI_IREDUCE_SCATTER_BLOCK(SENDBUF, RECVBUF, RECVCOUNT, DATATYPE, OP, + COMM, REQUEST, IERROR) + SENDBUF(*), RECVBUF(*) + INTEGER RECVCOUNT, DATATYPE, OP, COMM, REQUEST, IERROR + + + MPI_REDUCE_SCATTER_BLOCK_INOT(SENDBUF, RECVBUF, RECVCOUNT, DATATYPE, OP, + COMM, INFO, REQUEST, IERROR) + SENDBUF(*), RECVBUF(*) + INTEGER RECVCOUNT, DATATYPE, OP, COMM, INFO, REQUEST, IERROR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Ireduce_scatter_block(sendbuf, recvbuf, recvcount, datatype, op, comm, + ierror) + TYPE(*), DIMENSION(..), INTENT(IN), ASYNCHRONOUS :: sendbuf + TYPE(*), DIMENSION(..), ASYNCHRONOUS :: recvbuf + INTEGER, INTENT(IN) :: recvcount + TYPE(MPI_Datatype), INTENT(IN) :: datatype + TYPE(MPI_Op), INTENT(IN) :: op + TYPE(MPI_Comm), INTENT(IN) :: comm + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + MPI_Ireduce_scatter_block(sendbuf, recvbuf, recvcount, datatype, op, comm, + request, ierror) + TYPE(*), DIMENSION(..), INTENT(IN), ASYNCHRONOUS :: sendbuf + TYPE(*), DIMENSION(..), ASYNCHRONOUS :: recvbuf + INTEGER, INTENT(IN) :: recvcount + TYPE(MPI_Datatype), INTENT(IN) :: datatype + TYPE(MPI_Op), INTENT(IN) :: op + TYPE(MPI_Comm), INTENT(IN) :: comm + TYPE(MPI_Request), INTENT(OUT) :: request + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + MPI_Reduce_scatter_block_init(sendbuf, recvbuf, recvcount, datatype, op, comm, + info, request, ierror) + TYPE(*), DIMENSION(..), INTENT(IN), ASYNCHRONOUS :: sendbuf + TYPE(*), DIMENSION(..), ASYNCHRONOUS :: recvbuf + INTEGER, INTENT(IN) :: recvcount + TYPE(MPI_Datatype), INTENT(IN) :: datatype + TYPE(MPI_Op), INTENT(IN) :: op + TYPE(MPI_Comm), INTENT(IN) :: comm + TYPE(MPI_Info), INTENT(IN) :: info + TYPE(MPI_Request), INTENT(OUT) :: request + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETERS +---------------- +* ``sendbuf``: Starting address of send buffer (choice). +* ``recvcount``: lement count per block (non-negative integer). +* ``datatype``: Datatype of elements of input buffer (handle). +* ``op``: Operation (handle). +* ``comm``: Communicator (handle). +* ``info``: Info (handle, persistent only). + +OUTPUT PARAMETERS +----------------- +* ``recvbuf``: Starting address of receive buffer (choice). +* ``request``: Request (handle, non-blocking only). +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +:ref:`MPI_Reduce_scatter_block` first does an element-wise reduction on vector +of *count* = n \* *recvcount* elements in the send buffer defined by +*sendbuf*, *count*, and *datatype*, using the operation *op*, where n is +the number of processes in the group of *comm*. Next, the resulting +vector of results is split into n disjoint segments, where n is the +number of processes in the group. Each segments contains *recvcount* +elements. The ith segment is sent to process i and stored in the receive +buffer defined by *recvbuf*, *recvcount*, and *datatype*. + + +USE OF IN-PLACE OPTION +---------------------- + +When the communicator is an intracommunicator, you can perform a +reduce-scatter operation in-place (the output buffer is used as the +input buffer). Use the variable MPI_IN_PLACE as the value of the +*sendbuf*. In this case, the input data is taken from the top of the +receive buffer. The area occupied by the input data may be either longer +or shorter than the data filled by the output data. + + +WHEN COMMUNICATOR IS AN INTER-COMMUNICATOR +------------------------------------------ + +When the communicator is an inter-communicator, the reduce-scatter +operation occurs in two phases. First, the result of the reduction +performed on the data provided by the processes in the first group is +scattered among the processes in the second group. Then the reverse +occurs: the reduction performed on the data provided by the processes in +the second group is scattered among the processes in the first group. +For each group, all processes provide the same *recvcounts* argument, +and the sum of the *recvcounts* values should be the same for both +groups. + + +NOTES ON COLLECTIVE OPERATIONS +------------------------------ + +The reduction functions ( MPI_Op ) do not return an error value. As a +result, if the functions detect an error, all they can do is either call +:ref:`MPI_Abort` or silently skip the problem. Thus, if you change the error +handler from MPI_ERRORS_ARE_FATAL to something else, for example, +MPI_ERRORS_RETURN , then no error may be indicated. + +The reason for this is the performance problems in ensuring that all +collective routines return the same error value. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. + + +.. seealso:: + :ref:`MPI_Reduce_scatter` diff --git a/docs/man-openmpi/man3/MPI_Reduce_scatter_block_init.3.rst b/docs/man-openmpi/man3/MPI_Reduce_scatter_block_init.3.rst new file mode 100644 index 00000000000..e16c8a88d39 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Reduce_scatter_block_init.3.rst @@ -0,0 +1,9 @@ +.. _mpi_reduce_scatter_block_init: + +MPI_Reduce_scatter_block_init +============================= + .. include_body + +.. include:: ../man3/MPI_Reduce_scatter_block.3.rst + :start-after: .. include_body + diff --git a/docs/man-openmpi/man3/MPI_Reduce_scatter_init.3.rst b/docs/man-openmpi/man3/MPI_Reduce_scatter_init.3.rst new file mode 100644 index 00000000000..2a07a9a0c88 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Reduce_scatter_init.3.rst @@ -0,0 +1,9 @@ +.. _mpi_reduce_scatter_init: + +MPI_Reduce_scatter_init +======================= + .. include_body + +.. include:: ../man3/MPI_Reduce_scatter.3.rst + :start-after: .. include_body + diff --git a/docs/man-openmpi/man3/MPI_Register_datarep.3.rst b/docs/man-openmpi/man3/MPI_Register_datarep.3.rst new file mode 100644 index 00000000000..bac151657e1 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Register_datarep.3.rst @@ -0,0 +1,129 @@ +.. _mpi_register_datarep: + + +MPI_Register_datarep +==================== + +.. include_body + +:ref:`MPI_Register_datarep` - Defines data representation. + + +SYNTAX +------ + + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Register_datarep(const char *datarep, + MPI_Datarep_conversion_function *read_conversion_fn, + MPI_Datarep_conversion_function *write_conversion_fn, + MPI_Datarep_extent_function *dtype_file_extent_fn, + void *extra_state) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_REGISTER_DATAREP(DATAREP, READ_CONVERSION_FN, + WRITE_CONVERSION_FN, DTYPE_FILE_EXTENT_FN, + EXTRA_STATE, IERROR) + CHARACTER*(*) DATAREP + EXTERNAL READ_CONVERSION_FN, WRITE_CONVERSION_FN, DTYPE_FILE_EXTENT_FN + INTEGER IERROR + INTEGER(KIND=MPI_ADDRESS_KIND) EXTRA_STATE + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Register_datarep(datarep, read_conversion_fn, write_conversion_fn, + dtype_file_extent_fn, extra_state, ierror) + CHARACTER(LEN=*), INTENT(IN) :: datarep + PROCEDURE(MPI_Datarep_conversion_function) :: read_conversion_fn + PROCEDURE(MPI_Datarep_conversion_function) :: write_conversion_fn + PROCEDURE(MPI_Datarep_extent_function) :: dtype_file_extent_fn + INTEGER(KIND=MPI_ADDRESS_KIND), INTENT(IN) :: extra_state + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETERS +---------------- +* ``datarep``: Data representation identifier (string). +* ``read_conversion_fn``: Function invoked to convert from file representation to native representation (function). +* ``write_conversion_fn``: Function invoked to convert from native representation to file representation (function). +* ``dtype_file_extent_fn``: Function invoked to get the extent of a data type as represented in the file (function). +* ``extra_state``: Extra state. + +OUTPUT PARAMETER +---------------- +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +:ref:`MPI_Register_datarep` defines a data representation. It associates the +data representation's identifier (a string) with the functions that +convert from file representation to the native representation and vice +versa, with the function that gets the extent of a data type as +represented in the file, as well as with "extra state," which is used +for passing arguments. Once a data representation has been registered +using this routine, you may specify its identifier as an argument to +:ref:`MPI_File_set_view`, causing subsequent data-access operations to call the +specified conversion functions. + +The call associates *read_conversion_fn*, *write_conversion_fn*, and +*dtype_file_extent_fn* with the data representation identifier +*datarep*. *datarep* can then be used as an argument to +:ref:`MPI_File_set_view`, causing subsequent data access operations to call the +conversion functions to convert all data items accessed between file +data representation and native representation. :ref:`MPI_Register_datarep` is a +local operation and only registers the data representation for the +calling MPI process. If *datarep* is already defined, an error in the +error class MPI_ERR_DUP_DATAREP is raised using the default file error +handler. The length of a data representation string is limited to the +value of MPI_MAX_DATAREP_STRING. MPI_MAX_DATAREP_STRING must have a +value of at least 64. No routines are provided to delete data +representations and free the associated resources; it is not expected +that an application will generate them in significant numbers. + + +NOTES +----- + +The Fortran version of each MPI I/O routine includes a final argument, +IERROR, which is not defined in the PARAMETERS sections. This argument +is used to return the error status of the routine in the manner typical +for Fortran library routines. + +The C version of each routine returns an error status as an integer +return value. + +Error classes are found in mpi.h (for C) and mpif.h (for Fortran). + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. For MPI I/O function errors, the default error handler is set to +MPI_ERRORS_RETURN. The error handler may be changed with +:ref:`MPI_File_set_errhandler`; the predefined error handler +MPI_ERRORS_ARE_FATAL may be used to make I/O errors fatal. Note that MPI +does not guarantee that an MPI program can continue past an error. diff --git a/docs/man-openmpi/man3/MPI_Request_c2f.3.rst b/docs/man-openmpi/man3/MPI_Request_c2f.3.rst new file mode 100644 index 00000000000..f3a4841f2cd --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Request_c2f.3.rst @@ -0,0 +1,9 @@ +.. _mpi_request_c2f: + +MPI_Request_c2f +=============== + .. include_body + +.. include:: ../man3/MPI_Comm_f2c.3.rst + :start-after: .. include_body + diff --git a/docs/man-openmpi/man3/MPI_Request_f2c.3.rst b/docs/man-openmpi/man3/MPI_Request_f2c.3.rst new file mode 100644 index 00000000000..01c91f8da4c --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Request_f2c.3.rst @@ -0,0 +1,9 @@ +.. _mpi_request_f2c: + +MPI_Request_f2c +=============== + .. include_body + +.. include:: ../man3/MPI_Comm_f2c.3.rst + :start-after: .. include_body + diff --git a/docs/man-openmpi/man3/MPI_Request_free.3.rst b/docs/man-openmpi/man3/MPI_Request_free.3.rst new file mode 100644 index 00000000000..cbf52795a56 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Request_free.3.rst @@ -0,0 +1,136 @@ +.. _mpi_request_free: + + +MPI_Request_free +================ + +.. include_body + +:ref:`MPI_Request_free` - Frees a communication request object. + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Request_free(MPI_Request *request) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_REQUEST_FREE(REQUEST, IERROR) + INTEGER REQUEST, IERROR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Request_free(request, ierror) + TYPE(MPI_Request), INTENT(INOUT) :: request + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT/OUTPUT PARAMETER +---------------------- +* ``request``: Communication request (handle). + +DESCRIPTION +----------- + +This operation allows a request object to be deallocated without waiting +for the associated communication to complete. + +:ref:`MPI_Request_free` marks the request object for deallocation and sets +request to MPI_REQUEST_NULL. Any ongoing communication that is +associated with the request will be allowed to complete. The request +will be deallocated only after its completion. + + +NOTES +----- + +Once a request is freed by a call to :ref:`MPI_Request_free`, it is not +possible to check for the successful completion of the associated +communication with calls to :ref:`MPI_Wait` or :ref:`MPI_Test`. Also, if an error +occurs subsequently during the communication, an error code cannot be +returned to the user -- such an error must be treated as fatal. +Questions arise as to how one knows when the operations have completed +when using :ref:`MPI_Request_free`. Depending on the program logic, there may +be other ways in which the program knows that certain operations have +completed and this makes usage of :ref:`MPI_Request_free` practical. For +example, an active send request could be freed when the logic of the +program is such that the receiver sends a reply to the message sent -- +the arrival of the reply informs the sender that the send has completed +and the send buffer can be reused. An active receive request should +never be freed, as the receiver will have no way to verify that the +receive has completed and the receive buffer can be reused. + +**Example:** + +:: + + CALL MPI_COMM_RANK(MPI_COMM_WORLD, rank) + IF(rank.EQ.0) THEN + DO i=1, n + CALL MPI_ISEND(outval, 1, MPI_REAL, 1, 0, req, ierr) + CALL MPI_REQUEST_FREE(req, ierr) + CALL MPI_IRECV(inval, 1, MPI_REAL, 1, 0, req, ierr) + CALL MPI_WAIT(req, status, ierr) + END DO + ELSE ! rank.EQ.1 + CALL MPI_IRECV(inval, 1, MPI_REAL, 0, 0, req, ierr) + CALL MPI_WAIT(req, status) + DO I=1, n-1 + CALL MPI_ISEND(outval, 1, MPI_REAL, 0, 0, req, ierr) + CALL MPI_REQUEST_FREE(req, ierr) + CALL MPI_IRECV(inval, 1, MPI_REAL, 0, 0, req, ierr) + CALL MPI_WAIT(req, status, ierr) + END DO + CALL MPI_ISEND(outval, 1, MPI_REAL, 0, 0, req, ierr) + CALL MPI_WAIT(req, status) + END IF + +This routine is normally used to free persistent requests created with +either :ref:`MPI_Recv_init` or :ref:`MPI_Send_init` and friends. However, it can +be used to free a request created with :ref:`MPI_Irecv` or :ref:`MPI_Isend` and +friends; in that case the use can not use the test/wait routines on the +request. + +It **is** permitted to free an active request. However, once freed, you +can not use the request in a wait or test routine (e.g., :ref:`MPI_Wait` ). + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. + + +.. seealso:: + :ref:`MPI_Isend` :ref:`MPI_Irecv` :ref:`MPI_Issend` :ref:`MPI_Ibsend` :ref:`MPI_Irsend` :ref:`MPI_Recv_init` + :ref:`MPI_Send_init` :ref:`MPI_Ssend_init` :ref:`MPI_Rsend_init` :ref:`MPI_Test` :ref:`MPI_Wait` + :ref:`MPI_Waitall` :ref:`MPI_Waitany` :ref:`MPI_Waitsome` :ref:`MPI_Testall` :ref:`MPI_Testany` + :ref:`MPI_Testsome` diff --git a/docs/man-openmpi/man3/MPI_Request_get_status.3.rst b/docs/man-openmpi/man3/MPI_Request_get_status.3.rst new file mode 100644 index 00000000000..ae7aad683ce --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Request_get_status.3.rst @@ -0,0 +1,86 @@ +.. _mpi_request_get_status: + + +MPI_Request_get_status +====================== + +.. include_body + +:ref:`MPI_Request_get_status` - Access information associated with a +request without freeing the request. + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Request_get_status(MPI_Request request, int *flag, MPI_Status *status) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_REQUEST_GET_STATUS(REQUEST, FLAG, STATUS, IERROR) + INTEGER REQUEST, STATUS(MPI_STATUS_SIZE), IERROR + LOGICAL FLAG + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Request_get_status(request, flag, status, ierror) + TYPE(MPI_Request), INTENT(IN) :: request + LOGICAL, INTENT(OUT) :: flag + TYPE(MPI_Status) :: status + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETER +--------------- +* ``request``: Communication request (handle). + +OUTPUT PARAMETERS +----------------- +* ``flag``: Boolean flag, same as from MPI_Test (logical). +* ``status``: MPI_Status object if flag is true (status). + +DESCRIPTION +----------- + +:ref:`MPI_Request_get_status` sets *flag*\ =\ *true* if the operation is +complete or sets *flag*\ =\ *false* if it is not complete. If the +operation is complete, it returns in *status* the request status. It +does not deallocate or inactivate the request; a subsequent call to +test, wait, or free should be executed with that request. + +If your application does not need to examine the *status* field, you can +save resources by using the predefined constant MPI_STATUS_IGNORE as a +special value for the *status* argument. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. diff --git a/docs/man-openmpi/man3/MPI_Rget.3.rst b/docs/man-openmpi/man3/MPI_Rget.3.rst new file mode 100644 index 00000000000..02f3d6b5fd3 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Rget.3.rst @@ -0,0 +1,9 @@ +.. _mpi_rget: + +MPI_Rget +======== + .. include_body + +.. include:: ../man3/MPI_Get.3.rst + :start-after: .. include_body + diff --git a/docs/man-openmpi/man3/MPI_Rget_accumulate.3.rst b/docs/man-openmpi/man3/MPI_Rget_accumulate.3.rst new file mode 100644 index 00000000000..a72257643b1 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Rget_accumulate.3.rst @@ -0,0 +1,9 @@ +.. _mpi_rget_accumulate: + +MPI_Rget_accumulate +=================== + .. include_body + +.. include:: ../man3/MPI_Get_accumulate.3.rst + :start-after: .. include_body + diff --git a/docs/man-openmpi/man3/MPI_Rput.3.rst b/docs/man-openmpi/man3/MPI_Rput.3.rst new file mode 100644 index 00000000000..01fca45572f --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Rput.3.rst @@ -0,0 +1,9 @@ +.. _mpi_rput: + +MPI_Rput +======== + .. include_body + +.. include:: ../man3/MPI_Put.3.rst + :start-after: .. include_body + diff --git a/docs/man-openmpi/man3/MPI_Rsend.3.rst b/docs/man-openmpi/man3/MPI_Rsend.3.rst new file mode 100644 index 00000000000..1835dec1fb6 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Rsend.3.rst @@ -0,0 +1,85 @@ +.. _mpi_rsend: + + +MPI_Rsend +========= + +.. include_body + +:ref:`MPI_Rsend` - Ready send. + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Rsend(const void *buf, int count, MPI_Datatype datatype, int dest, + int tag, MPI_Comm comm) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_RSEND(BUF, COUNT, DATATYPE, DEST, TAG, COMM, IERROR) + BUF(*) + INTEGER COUNT, DATATYPE, DEST, TAG, COMM, IERROR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Rsend(buf, count, datatype, dest, tag, comm, ierror) + TYPE(*), DIMENSION(..), INTENT(IN) :: buf + INTEGER, INTENT(IN) :: count, dest, tag + TYPE(MPI_Datatype), INTENT(IN) :: datatype + TYPE(MPI_Comm), INTENT(IN) :: comm + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETERS +---------------- +* ``buf``: Initial address of send buffer (choice). +* ``count``: Number of elements in send buffer (nonnegative integer). +* ``datatype``: Datatype of each send buffer element (handle). +* ``dest``: Rank of destination (integer). +* ``tag``: Message tag (integer). +* ``comm``: Communicator (handle). + +OUTPUT PARAMETER +---------------- +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +A ready send may only be called if the user can guarantee that a receive +is already posted. It is an error if the receive is not posted before +the ready send is called. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. diff --git a/docs/man-openmpi/man3/MPI_Rsend_init.3.rst b/docs/man-openmpi/man3/MPI_Rsend_init.3.rst new file mode 100644 index 00000000000..e39ffba7833 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Rsend_init.3.rst @@ -0,0 +1,95 @@ +.. _mpi_rsend_init: + + +MPI_Rsend_init +============== + +.. include_body + +:ref:`MPI_Rsend_init` - Builds a handle for a ready send. + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Rsend_init(const void *buf, int count, MPI_Datatype datatype, + int dest, int tag, MPI_Comm comm, MPI_Request *request) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_RSEND_INIT(BUF, COUNT, DATATYPE, DEST, TAG, COMM, REQUEST, + IERROR) + BUF(*) + INTEGER COUNT, DATATYPE, DEST, TAG, COMM, REQUEST, IERROR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Rsend_init(buf, count, datatype, dest, tag, comm, request, ierror) + TYPE(*), DIMENSION(..), INTENT(IN), ASYNCHRONOUS :: buf + INTEGER, INTENT(IN) :: count, dest, tag + TYPE(MPI_Datatype), INTENT(IN) :: datatype + TYPE(MPI_Comm), INTENT(IN) :: comm + TYPE(MPI_Request), INTENT(OUT) :: request + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETERS +---------------- +* ``buf``: Initial address of send buffer (choice). +* ``count``: Number of elements sent (integer). +* ``datatype``: Type of each element (handle). +* ``dest``: Rank of destination (integer). +* ``tag``: Message tag (integer). +* ``comm``: Communicator (handle). + +OUTPUT PARAMETERS +----------------- +* ``request``: Communication request (handle). +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +Creates a persistent communication object for a ready mode send +operation, and binds to it all the arguments of a send operation. + +A communication (send or receive) that uses a persistent request is +initiated by the function :ref:`MPI_Start`. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. + + +.. seealso:: + :ref:`MPI_Bsend_init` :ref:`MPI_Send_init` MPI_Sssend_init :ref:`MPI_Recv_init` :ref:`MPI_Start` + :ref:`MPI_Startall` :ref:`MPI_Request_free` diff --git a/docs/man-openmpi/man3/MPI_Scan.3.rst b/docs/man-openmpi/man3/MPI_Scan.3.rst new file mode 100644 index 00000000000..63089b890d2 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Scan.3.rst @@ -0,0 +1,255 @@ +.. _mpi_scan: + + +MPI_Scan +======== + +.. include_body + +:ref:`MPI_Scan`, :ref:`MPI_Iscan`, :ref:`MPI_Scan_init` - Computes an inclusive scan +(partial reduction) + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Scan(const void *sendbuf, void *recvbuf, int count, + MPI_Datatype datatype, MPI_Op op, MPI_Comm comm) + + int MPI_Iscan(const void *sendbuf, void *recvbuf, int count, + MPI_Datatype datatype, MPI_Op op, MPI_Comm comm, + MPI_Request *request) + + int MPI_Scan_init(const void *sendbuf, void *recvbuf, int count, + MPI_Datatype datatype, MPI_Op op, MPI_Comm comm, + MPI_Info info, MPI_Request *request) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_SCAN(SENDBUF, RECVBUF, COUNT, DATATYPE, OP, COMM, IERROR) + SENDBUF(*), RECVBUF(*) + INTEGER COUNT, DATATYPE, OP, COMM, IERROR + + MPI_ISCAN(SENDBUF, RECVBUF, COUNT, DATATYPE, OP, COMM, REQUEST, IERROR) + SENDBUF(*), RECVBUF(*) + INTEGER COUNT, DATATYPE, OP, COMM, REQUEST, IERROR + + MPI_SCAN_INIT(SENDBUF, RECVBUF, COUNT, DATATYPE, OP, COMM, INFO, REQUEST, IERROR) + SENDBUF(*), RECVBUF(*) + INTEGER COUNT, DATATYPE, OP, COMM, INFO, REQUEST, IERROR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Scan(sendbuf, recvbuf, count, datatype, op, comm, ierror) + TYPE(*), DIMENSION(..), INTENT(IN) :: sendbuf + TYPE(*), DIMENSION(..) :: recvbuf + INTEGER, INTENT(IN) :: count + TYPE(MPI_Datatype), INTENT(IN) :: datatype + TYPE(MPI_Op), INTENT(IN) :: op + TYPE(MPI_Comm), INTENT(IN) :: comm + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + MPI_Iscan(sendbuf, recvbuf, count, datatype, op, comm, request, ierror) + TYPE(*), DIMENSION(..), INTENT(IN), ASYNCHRONOUS :: sendbuf + TYPE(*), DIMENSION(..), ASYNCHRONOUS :: recvbuf + INTEGER, INTENT(IN) :: count + TYPE(MPI_Datatype), INTENT(IN) :: datatype + TYPE(MPI_Op), INTENT(IN) :: op + TYPE(MPI_Comm), INTENT(IN) :: comm + TYPE(MPI_Request), INTENT(OUT) :: request + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + MPI_Scan_init(sendbuf, recvbuf, count, datatype, op, comm, info, request, ierror) + TYPE(*), DIMENSION(..), INTENT(IN), ASYNCHRONOUS :: sendbuf + TYPE(*), DIMENSION(..), ASYNCHRONOUS :: recvbuf + INTEGER, INTENT(IN) :: count + TYPE(MPI_Datatype), INTENT(IN) :: datatype + TYPE(MPI_Op), INTENT(IN) :: op + TYPE(MPI_Comm), INTENT(IN) :: comm + TYPE(MPI_Info), INTENT(IN) :: info + TYPE(MPI_Request), INTENT(OUT) :: request + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETERS +---------------- +* ``sendbuf``: Send buffer (choice). +* ``count``: Number of elements in input buffer (integer). +* ``datatype``: Data type of elements of input buffer (handle). +* ``op``: Operation (handle). +* ``comm``: Communicator (handle). +* ``info``: Info (handle, persistent only) + +OUTPUT PARAMETERS +----------------- +* ``recvbuf``: Receive buffer (choice). +* ``request``: Request (handle, non-blocking only). +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +:ref:`MPI_Scan` is used to perform an inclusive prefix reduction on data +distributed across the calling processes. The operation returns, in the +*recvbuf* of the process with rank i, the reduction (calculated +according to the function *op*) of the values in the *sendbuf*\ s of +processes with ranks 0, ..., i (inclusive). The type of operations +supported, their semantics, and the constraints on send and receive +buffers are as for :ref:`MPI_Reduce`. + + +EXAMPLE +------- + +This example uses a user-defined operation to produce a segmented scan. +A segmented scan takes, as input, a set of values and a set of logicals, +where the logicals delineate the various segments of the scan. For +example, + +:: + + values v1 v2 v3 v4 v5 v6 v7 v8 + logicals 0 0 1 1 1 0 0 1 + result v1 v1+v2 v3 v3+v4 v3+v4+v5 v6 v6+v7 v8 + +The result for rank j is thus the sum v(i) + ... + v(j), where i is the +lowest rank such that for all ranks n, i <= n <= j, logical(n) = +logical(j). The operator that produces this effect is + +:: + + [ u ] [ v ] [ w ] + [ ] o [ ] = [ ] + [ i ] [ j ] [ j ] + +where + +( u + v if i = j w = ( ( v if i != j + +Note that this is a noncommutative operator. C code that implements it +is given below. + +:: + + typedef struct { + double val; + int log; + } SegScanPair; + + /* + * the user-defined function + */ + void segScan(SegScanPair *in, SegScanPair *inout, int *len, + MPI_Datatype *dptr) + { + int i; + SegScanPair c; + + for (i = 0; i < *len; ++i) { + if (in->log == inout->log) + c.val = in->val + inout->val; + else + c.val = inout->val; + + c.log = inout->log; + *inout = c; + in++; + inout++; + } + } + +Note that the inout argument to the user-defined function corresponds to +the right-hand operand of the operator. When using this operator, we +must be careful to specify that it is noncommutative, as in the +following: + +:: + + int i, base; + SeqScanPair a, answer; + MPI_Op myOp; + MPI_Datatype type[2] = {MPI_DOUBLE, MPI_INT}; + MPI_Aint disp[2]; + int blocklen[2] = {1, 1}; + MPI_Datatype sspair; + + /* + * explain to MPI how type SegScanPair is defined + */ + MPI_Get_address(a, disp); + MPI_Get_address(a.log, disp + 1); + base = disp[0]; + for (i = 0; i < 2; ++i) + disp[i] -= base; + MPI_Type_struct(2, blocklen, disp, type, &sspair); + MPI_Type_commit(&sspair); + + /* + * create the segmented-scan user-op + * noncommutative - set commute (arg 2) to 0 + */ + MPI_Op_create((MPI_User_function *)segScan, 0, &myOp); + ... + MPI_Scan(a, answer, 1, sspair, myOp, comm); + + +USE OF IN-PLACE OPTION +---------------------- + +When the communicator is an intracommunicator, you can perform a +scanning operation in place (the output buffer is used as the input +buffer). Use the variable MPI_IN_PLACE as the value of the *sendbuf* +argument. The input data is taken from the receive buffer and replaced +by the output data. + + +NOTES ON COLLECTIVE OPERATIONS +------------------------------ + +The reduction functions of type MPI_Op do not return an error value. As +a result, if the functions detect an error, all they can do is either +call :ref:`MPI_Abort` or silently skip the problem. Thus, if the error handler +is changed from MPI_ERRORS_ARE_FATAL to something else (e.g., +MPI_ERRORS_RETURN), then no error may be indicated. + +The reason for this is the performance problems in ensuring that all +collective routines return the same error value. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. + +See the MPI man page for a full list of MPI error codes. + + +.. seealso:: + :ref:`MPI_Exscan` :ref:`MPI_Op_create` :ref:`MPI_Reduce` diff --git a/docs/man-openmpi/man3/MPI_Scan_init.3.rst b/docs/man-openmpi/man3/MPI_Scan_init.3.rst new file mode 100644 index 00000000000..a3d53e0f7e2 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Scan_init.3.rst @@ -0,0 +1,9 @@ +.. _mpi_scan_init: + +MPI_Scan_init +============= + .. include_body + +.. include:: ../man3/MPI_Scan.3.rst + :start-after: .. include_body + diff --git a/docs/man-openmpi/man3/MPI_Scatter.3.rst b/docs/man-openmpi/man3/MPI_Scatter.3.rst new file mode 100644 index 00000000000..849acfa09b3 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Scatter.3.rst @@ -0,0 +1,223 @@ +.. _mpi_scatter: + + +MPI_Scatter +=========== + +.. include_body + +:ref:`MPI_Scatter`, :ref:`MPI_Iscatter`, :ref:`MPI_Scatter_init` - Sends data from one +task to all tasks in a group. + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Scatter(const void *sendbuf, int sendcount, MPI_Datatype sendtype, + void *recvbuf, int recvcount, MPI_Datatype recvtype, int root, + MPI_Comm comm) + + int MPI_Iscatter(const void *sendbuf, int sendcount, MPI_Datatype sendtype, + void *recvbuf, int recvcount, MPI_Datatype recvtype, int root, + MPI_Comm comm, MPI_Request *request) + + int MPI_Scatter_init(const void *sendbuf, int sendcount, MPI_Datatype sendtype, + void *recvbuf, int recvcount, MPI_Datatype recvtype, int root, + MPI_Comm comm, MPI_Info info, MPI_Request *request) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_SCATTER(SENDBUF, SENDCOUNT, SENDTYPE, RECVBUF, RECVCOUNT, + RECVTYPE, ROOT, COMM, IERROR) + SENDBUF(*), RECVBUF(*) + INTEGER SENDCOUNT, SENDTYPE, RECVCOUNT, RECVTYPE, ROOT + INTEGER COMM, IERROR + + MPI_ISCATTER(SENDBUF, SENDCOUNT, SENDTYPE, RECVBUF, RECVCOUNT, + RECVTYPE, ROOT, COMM, REQUEST, IERROR) + SENDBUF(*), RECVBUF(*) + INTEGER SENDCOUNT, SENDTYPE, RECVCOUNT, RECVTYPE, ROOT + INTEGER COMM, REQUEST, IERROR + + MPI_SCATTER_INIT(SENDBUF, SENDCOUNT, SENDTYPE, RECVBUF, RECVCOUNT, + RECVTYPE, ROOT, COMM, INFO, REQUEST, IERROR) + SENDBUF(*), RECVBUF(*) + INTEGER SENDCOUNT, SENDTYPE, RECVCOUNT, RECVTYPE, ROOT + INTEGER COMM, INFO, REQUEST, IERROR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Scatter(sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype, + root, comm, ierror) + TYPE(*), DIMENSION(..), INTENT(IN) :: sendbuf + TYPE(*), DIMENSION(..) :: recvbuf + INTEGER, INTENT(IN) :: sendcount, recvcount, root + TYPE(MPI_Datatype), INTENT(IN) :: sendtype, recvtype + TYPE(MPI_Comm), INTENT(IN) :: comm + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + MPI_Iscatter(sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype, + root, comm, request, ierror) + TYPE(*), DIMENSION(..), INTENT(IN), ASYNCHRONOUS :: sendbuf + TYPE(*), DIMENSION(..), ASYNCHRONOUS :: recvbuf + INTEGER, INTENT(IN) :: sendcount, recvcount, root + TYPE(MPI_Datatype), INTENT(IN) :: sendtype, recvtype + TYPE(MPI_Comm), INTENT(IN) :: comm + TYPE(MPI_Request), INTENT(OUT) :: request + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + MPI_Scatter_init(sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype, + root, comm, info, request, ierror) + TYPE(*), DIMENSION(..), INTENT(IN), ASYNCHRONOUS :: sendbuf + TYPE(*), DIMENSION(..), ASYNCHRONOUS :: recvbuf + INTEGER, INTENT(IN) :: sendcount, recvcount, root + TYPE(MPI_Datatype), INTENT(IN) :: sendtype, recvtype + TYPE(MPI_Comm), INTENT(IN) :: comm + TYPE(MPI_Info), INTENT(IN) :: info + TYPE(MPI_Request), INTENT(OUT) :: request + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETERS +---------------- +* ``sendbuf``: Address of send buffer (choice, significant only at root). +* ``sendcount``: Number of elements sent to each process (integer, significant only at root). +* ``sendtype``: Datatype of send buffer elements (handle, significant only at root). +* ``recvcount``: Number of elements in receive buffer (integer). +* ``recvtype``: Datatype of receive buffer elements (handle). +* ``root``: Rank of sending process (integer). +* ``comm``: Communicator (handle). +* ``info``: Info (handle, persistent). + +OUTPUT PARAMETERS +----------------- +* ``recvbuf``: Address of receive buffer (choice). +* ``request``: Request (handle, non-blocking only). +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +:ref:`MPI_Scatter` is the inverse operation to :ref:`MPI_Gather`. + +The outcome is as if the root executed n send operations, + +:: + + MPI_Send(sendbuf + i * sendcount * extent(sendtype), sendcount, + sendtype, i, ...) + +and each process executed a receive, + +:: + + MPI_Recv(recvbuf, recvcount, recvtype, i, ...). + +An alternative description is that the root sends a message with +MPI_Send(sendbuf, *sendcount* \* *n*, *sendtype*, ...). This message +is split into *n* equal segments, the ith segment is sent to the ith +process in the group, and each process receives this message as above. + +The send buffer is ignored for all nonroot processes. + +The type signature associated with *sendcount*, *sendtype* at the root +must be equal to the type signature associated with *recvcount*, +*recvtype* at all processes (however, the type maps may be different). +This implies that the amount of data sent must be equal to the amount of +data received, pairwise between each process and the root. Distinct type +maps between sender and receiver are still allowed. + +All arguments to the function are significant on process *root*, while +on other processes, only arguments *recvbuf*, *recvcount*, *recvtype*, +*root*, *comm* are significant. The arguments *root* and *comm* must +have identical values on all processes. + +The specification of counts and types should not cause any location on +the root to be read more than once. + +**Rationale:** Though not needed, the last restriction is imposed so as +to achieve symmetry with :ref:`MPI_Gather`, where the corresponding restriction +(a multiple-write restriction) is necessary. + +**Example:** The reverse of Example 1 in the :ref:`MPI_Gather` manpage. Scatter +sets of 100 ints from the root to each process in the group. + +:: + + MPI_Comm comm; + int gsize,*sendbuf; + int root, rbuf[100]; + ... + MPI_Comm_size(comm, &gsize); + sendbuf = (int *)malloc(gsize*100*sizeof(int)); + ... + MPI_Scatter(sendbuf, 100, MPI_INT, rbuf, 100, + MPI_INT, root, comm); + + +USE OF IN-PLACE OPTION +---------------------- + +When the communicator is an intracommunicator, you can perform a scatter +operation in-place (the output buffer is used as the input buffer). Use +the variable MPI_IN_PLACE as the value of the root process *recvbuf*. In +this case, *recvcount* and *recvtype* are ignored, and the root process +sends no data to itself. + +Note that MPI_IN_PLACE is a special kind of value; it has the same +restrictions on its use as MPI_BOTTOM. + +Because the in-place option converts the receive buffer into a +send-and-receive buffer, a Fortran binding that includes INTENT must +mark these as INOUT, not OUT. + + +WHEN COMMUNICATOR IS AN INTER-COMMUNICATOR +------------------------------------------ + +When the communicator is an inter-communicator, the root process in the +first group sends data to all processes in the second group. The first +group defines the root process. That process uses MPI_ROOT as the value +of its *root* argument. The remaining processes use MPI_PROC_NULL as the +value of their *root* argument. All processes in the second group use +the rank of that root process in the first group as the value of their +*root* argument. The receive buffer argument of the root process in the +first group must be consistent with the receive buffer argument of the +processes in the second group. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. + + +.. seealso:: + :ref:`MPI_Scatterv` :ref:`MPI_Gather` :ref:`MPI_Gatherv` diff --git a/docs/man-openmpi/man3/MPI_Scatter_init.3.rst b/docs/man-openmpi/man3/MPI_Scatter_init.3.rst new file mode 100644 index 00000000000..7cb2a398390 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Scatter_init.3.rst @@ -0,0 +1,9 @@ +.. _mpi_scatter_init: + +MPI_Scatter_init +================ + .. include_body + +.. include:: ../man3/MPI_Scatter.3.rst + :start-after: .. include_body + diff --git a/docs/man-openmpi/man3/MPI_Scatterv.3.rst b/docs/man-openmpi/man3/MPI_Scatterv.3.rst new file mode 100644 index 00000000000..6b1cab65469 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Scatterv.3.rst @@ -0,0 +1,268 @@ +.. _mpi_scatterv: + + +MPI_Scatterv +============ + +.. include_body + +:ref:`MPI_Scatterv`, :ref:`MPI_Iscatterv`, :ref:`MPI_Scatterv_init` - Scatters a buffer +in parts to all tasks in a group. + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Scatterv(const void *sendbuf, const int sendcounts[], const int displs[], + MPI_Datatype sendtype, void *recvbuf, int recvcount, + MPI_Datatype recvtype, int root, MPI_Comm comm) + + int MPI_Iscatterv(const void *sendbuf, const int sendcounts[], const int displs[], + MPI_Datatype sendtype, void *recvbuf, int recvcount, + MPI_Datatype recvtype, int root, MPI_Comm comm, MPI_Request *request) + + int MPI_Scatterv_init(const void *sendbuf, const int sendcounts[], const int displs[], + MPI_Datatype sendtype, void *recvbuf, int recvcount, + MPI_Datatype recvtype, int root, MPI_Comm comm, MPI_Info info, MPI_Request *request) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_SCATTERV(SENDBUF, SENDCOUNTS, DISPLS, SENDTYPE, RECVBUF, + RECVCOUNT, RECVTYPE, ROOT, COMM, IERROR) + SENDBUF(*), RECVBUF(*) + INTEGER SENDCOUNTS(*), DISPLS(*), SENDTYPE + INTEGER RECVCOUNT, RECVTYPE, ROOT, COMM, IERROR + + MPI_ISCATTERV(SENDBUF, SENDCOUNTS, DISPLS, SENDTYPE, RECVBUF, + RECVCOUNT, RECVTYPE, ROOT, COMM, REQUEST, IERROR) + SENDBUF(*), RECVBUF(*) + INTEGER SENDCOUNTS(*), DISPLS(*), SENDTYPE + INTEGER RECVCOUNT, RECVTYPE, ROOT, COMM, REQUEST, IERROR + + MPI_SCATTERV_INIT(SENDBUF, SENDCOUNTS, DISPLS, SENDTYPE, RECVBUF, + RECVCOUNT, RECVTYPE, ROOT, COMM, INFO, REQUEST, IERROR) + SENDBUF(*), RECVBUF(*) + INTEGER SENDCOUNTS(*), DISPLS(*), SENDTYPE + INTEGER RECVCOUNT, RECVTYPE, ROOT, COMM, INFO, REQUEST, IERROR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Scatterv(sendbuf, sendcounts, displs, sendtype, recvbuf, recvcount, + recvtype, root, comm, ierror) + TYPE(*), DIMENSION(..), INTENT(IN) :: sendbuf + TYPE(*), DIMENSION(..) :: recvbuf + INTEGER, INTENT(IN) :: sendcounts(*), displs(*), recvcount, root + TYPE(MPI_Datatype), INTENT(IN) :: sendtype, recvtype + TYPE(MPI_Comm), INTENT(IN) :: comm + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + MPI_Iscatterv(sendbuf, sendcounts, displs, sendtype, recvbuf, recvcount, + recvtype, root, comm, request, ierror) + TYPE(*), DIMENSION(..), INTENT(IN), ASYNCHRONOUS :: sendbuf + TYPE(*), DIMENSION(..), ASYNCHRONOUS :: recvbuf + INTEGER, INTENT(IN), ASYNCHRONOUS :: sendcounts(*), displs(*) + INTEGER, INTENT(IN) :: recvcount, root + TYPE(MPI_Datatype), INTENT(IN) :: sendtype, recvtype + TYPE(MPI_Comm), INTENT(IN) :: comm + TYPE(MPI_Request), INTENT(OUT) :: request + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + MPI_scatterv_init(sendbuf, sendcounts, displs, sendtype, recvbuf, recvcount, + recvtype, root, comm, info, request, ierror) + TYPE(*), DIMENSION(..), INTENT(IN), ASYNCHRONOUS :: sendbuf + TYPE(*), DIMENSION(..), ASYNCHRONOUS :: recvbuf + INTEGER, INTENT(IN), ASYNCHRONOUS :: sendcounts(*), displs(*) + INTEGER, INTENT(IN) :: recvcount, root + TYPE(MPI_Datatype), INTENT(IN) :: sendtype, recvtype + TYPE(MPI_Comm), INTENT(IN) :: comm + TYPE(MPI_Info), INTENT(IN) :: info + TYPE(MPI_Request), INTENT(OUT) :: request + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETERS +---------------- +* ``sendbuf``: Address of send buffer (choice, significant only at root). +* ``sendcounts``: Integer array (of length group size) specifying the number of elements to send to each processor. +* ``displs``: Integer array (of length group size). Entry i specifies the displacement (relative to sendbuf) from which to take the outgoing data to process i. +* ``sendtype``: Datatype of send buffer elements (handle). +* ``recvcount``: Number of elements in receive buffer (integer). +* ``recvtype``: Datatype of receive buffer elements (handle). +* ``root``: Rank of sending process (integer). +* ``comm``: Communicator (handle). +* ``info``: Info (handle, persistent only). + +OUTPUT PARAMETERS +----------------- +* ``recvbuf``: Address of receive buffer (choice). +* ``request``: Request (handle, non-blocking only). +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +:ref:`MPI_Scatterv` is the inverse operation to :ref:`MPI_Gatherv`. + +:ref:`MPI_Scatterv` extends the functionality of :ref:`MPI_Scatter` by allowing a +varying count of data to be sent to each process, since *sendcounts* is +now an array. It also allows more flexibility as to where the data is +taken from on the root, by providing the new argument, *displs*. + +The outcome is as if the root executed *n* send operations, + +:: + + MPI_Send(sendbuf + displs[i] * extent(sendtype), \ + sendcounts[i], sendtype, i, ...) + + and each process executed a receive, + + MPI_Recv(recvbuf, recvcount, recvtype, root, ...) + + The send buffer is ignored for all nonroot processes. + +The type signature implied by *sendcount*\ [*i*], *sendtype* at the root +must be equal to the type signature implied by *recvcount*, *recvtype* +at process *i* (however, the type maps may be different). This implies +that the amount of data sent must be equal to the amount of data +received, pairwise between each process and the root. Distinct type maps +between sender and receiver are still allowed. + +All arguments to the function are significant on process *root*, while +on other processes, only arguments *recvbuf*, *recvcount*, *recvtype*, +*root*, *comm* are significant. The arguments *root* and *comm* must +have identical values on all processes. + +The specification of counts, types, and displacements should not cause +any location on the root to be read more than once. + +**Example 1:** The reverse of Example 5 in the :ref:`MPI_Gatherv` manpage. We +have a varying stride between blocks at sending (root) side, at the +receiving side we receive 100 - *i* elements into the *i*\ th column of +a 100 x 150 C array at process *i*. + +.. code-block:: c + + MPI_Comm comm; + int gsize,recvarray[100][150],*rptr; + int root, *sendbuf, myrank, bufsize, *stride; + MPI_Datatype rtype; + int i, *displs, *scounts, offset; + ... + MPI_Comm_size( comm, &gsize); + MPI_Comm_rank( comm, &myrank ); + + stride = (int *)malloc(gsize*sizeof(int)); + ... + /* stride[i] for i = 0 to gsize-1 is set somehow + * sendbuf comes from elsewhere + */ + ... + displs = (int *)malloc(gsize*sizeof(int)); + scounts = (int *)malloc(gsize*sizeof(int)); + offset = 0; + for (i=0; i= 100. + +:: + + MPI_Comm comm; + int gsize,*sendbuf; + int root, rbuf[100], i, *displs, *scounts; + + ... + + MPI_Comm_size(comm, &gsize); + sendbuf = (int *)malloc(gsize*stride*sizeof(int)); + ... + displs = (int *)malloc(gsize*sizeof(int)); + scounts = (int *)malloc(gsize*sizeof(int)); + for (i=0; i + + int MPI_Send(const void *buf, int count, MPI_Datatype datatype, int dest, + int tag, MPI_Comm comm) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_SEND(BUF, COUNT, DATATYPE, DEST, TAG, COMM, IERROR) + BUF(*) + INTEGER COUNT, DATATYPE, DEST, TAG, COMM, IERROR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Send(buf, count, datatype, dest, tag, comm, ierror) + TYPE(*), DIMENSION(..), INTENT(IN) :: buf + INTEGER, INTENT(IN) :: count, dest, tag + TYPE(MPI_Datatype), INTENT(IN) :: datatype + TYPE(MPI_Comm), INTENT(IN) :: comm + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETERS +---------------- +* ``buf``: Initial address of send buffer (choice). +* ``count``: Number of elements send (nonnegative integer). +* ``datatype``: Datatype of each send buffer element (handle). +* ``dest``: Rank of destination (integer). +* ``tag``: Message tag (integer). +* ``comm``: Communicator (handle). + +OUTPUT PARAMETER +---------------- +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +:ref:`MPI_Send` performs a standard-mode, blocking send. + + +NOTE +---- + +This routine will block until the message is sent to the destination. +For an in-depth explanation of the semantics of the standard-mode send, +refer to the MPI-1 Standard. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. + + +.. seealso:: + :ref:`MPI_Isend` :ref:`MPI_Bsend` :ref:`MPI_Recv` diff --git a/docs/man-openmpi/man3/MPI_Send_init.3.rst b/docs/man-openmpi/man3/MPI_Send_init.3.rst new file mode 100644 index 00000000000..32f5b60fa2a --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Send_init.3.rst @@ -0,0 +1,96 @@ +.. _mpi_send_init: + + +MPI_Send_init +============= + +.. include_body + +:ref:`MPI_Send_init` - Builds a handle for a standard send. + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Send_init(const void *buf, int count, MPI_Datatype datatype, + int dest, int tag, MPI_Comm comm, MPI_Request *request) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_SEND_INIT(BUF, COUNT, DATATYPE, DEST, TAG, COMM, REQUEST, + IERROR) + BUF(*) + INTEGER REQUEST, COUNT, DATATYPE, DEST, TAG + INTEGER COMM, REQUEST, IERROR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Send_init(buf, count, datatype, dest, tag, comm, request, ierror) + TYPE(*), DIMENSION(..), INTENT(IN), ASYNCHRONOUS :: buf + INTEGER, INTENT(IN) :: count, dest, tag + TYPE(MPI_Datatype), INTENT(IN) :: datatype + TYPE(MPI_Comm), INTENT(IN) :: comm + TYPE(MPI_Request), INTENT(OUT) :: request + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETERS +---------------- +* ``buf``: Initial address of send buffer (choice). +* ``count``: Number of elements to send (integer). +* ``datatype``: Type of each element (handle). +* ``dest``: Rank of destination (integer). +* ``tag``: Message tag (integer). +* ``comm``: Communicator (handle). + +OUTPUT PARAMETERS +----------------- +* ``request``: Communication request (handle). +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +Creates a persistent communication request for a standard mode send +operation, and binds to it all the arguments of a send operation. + +A communication (send or receive) that uses a persistent request is +initiated by the function :ref:`MPI_Start` or :ref:`MPI_Startall`. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. + + +.. seealso:: + :ref:`MPI_Bsend_init` :ref:`MPI_Ssend_init` :ref:`MPI_Rsend_init` :ref:`MPI_Recv_init` :ref:`MPI_Start` + :ref:`MPI_Startall` :ref:`MPI_Request_free` diff --git a/docs/man-openmpi/man3/MPI_Sendrecv.3.rst b/docs/man-openmpi/man3/MPI_Sendrecv.3.rst new file mode 100644 index 00000000000..6ce37bd15da --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Sendrecv.3.rst @@ -0,0 +1,129 @@ +.. _mpi_sendrecv: + + +MPI_Sendrecv +============ + +.. include_body + +:ref:`MPI_Sendrecv` - Sends and receives a message. + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Sendrecv(const void *sendbuf, int sendcount, MPI_Datatype sendtype, + int dest, int sendtag, void *recvbuf, int recvcount, + MPI_Datatype recvtype, int source, int recvtag, + MPI_Comm comm, MPI_Status *status) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_SENDRECV(SENDBUF, SENDCOUNT, SENDTYPE, DEST, SENDTAG, + RECVBUF, RECVCOUNT, RECVTYPE, SOURCE, RECVTAG, COMM, + STATUS, IERROR) + SENDBUF(*), RECVBUF(*) + INTEGER SENDCOUNT, SENDTYPE, DEST, SENDTAG + INTEGER RECVCOUNT, RECVTYPE, SOURCE, RECVTAG, COMM + INTEGER STATUS(MPI_STATUS_SIZE), IERROR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Sendrecv(sendbuf, sendcount, sendtype, dest, sendtag, recvbuf, + recvcount, recvtype, source, recvtag, comm, status, ierror) + TYPE(*), DIMENSION(..), INTENT(IN) :: sendbuf + TYPE(*), DIMENSION(..) :: recvbuf + INTEGER, INTENT(IN) :: sendcount, dest, sendtag, recvcount, source, + recvtag + TYPE(MPI_Datatype), INTENT(IN) :: sendtype, recvtype + TYPE(MPI_Comm), INTENT(IN) :: comm + TYPE(MPI_Status) :: status + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETERS +---------------- +* ``sendbuf``: Initial address of send buffer (choice). +* ``sendcount``: Number of elements to send (integer). +* ``sendtype``: Type of elements in send buffer (handle). +* ``dest``: Rank of destination (integer). +* ``sendtag``: Send tag (integer). +* ``recvcount``: Maximum number of elements to receive (integer). +* ``recvtype``: Type of elements in receive buffer (handle). +* ``source``: Rank of source (integer). +* ``recvtag``: Receive tag (integer). +* ``comm``: Communicator (handle). + +OUTPUT PARAMETERS +----------------- +* ``recvbuf``: Initial address of receive buffer (choice). +* ``status``: Status object (status). This refers to the receive operation. +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +The send-receive operations combine in one call the sending of a message +to one destination and the receiving of another message, from another +process. The two (source and destination) are possibly the same. A +send-receive operation is useful for executing a shift operation across +a chain of processes. If blocking sends and receives are used for such a +shift, then one needs to order the sends and receives correctly (for +example, even processes send, then receive; odd processes receive first, +then send) in order to prevent cyclic dependencies that may lead to +deadlock. When a send-receive operation is used, the communication +subsystem takes care of these issues. The send-receive operation can be +used in conjunction with the functions described in Chapter 6 of the +MPI-1 Standard, "Process Topologies," in order to perform shifts on +various logical topologies. Also, a send-receive operation is useful for +implementing remote procedure calls. + +A message sent by a send-receive operation can be received by a regular +receive operation or probed by a probe operation; a send-receive +operation can receive a message sent by a regular send operation. + +:ref:`MPI_Sendrecv` executes a blocking send and receive operation. Both send +and receive use the same communicator, but possibly different tags. The +send buffer and receive buffers must be disjoint, and may have different +lengths and datatypes. + +If your application does not need to examine the *status* field, you can +save resources by using the predefined constant MPI_STATUS_IGNORE as a +special value for the *status* argument. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. + + +.. seealso:: + :ref:`MPI_Sendrecv_replace` diff --git a/docs/man-openmpi/man3/MPI_Sendrecv_replace.3.rst b/docs/man-openmpi/man3/MPI_Sendrecv_replace.3.rst new file mode 100644 index 00000000000..a6eb215de60 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Sendrecv_replace.3.rst @@ -0,0 +1,125 @@ +.. _mpi_sendrecv_replace: + + +MPI_Sendrecv_replace +==================== + +.. include_body + +:ref:`MPI_Sendrecv_replace` - Sends and receives a message using a single +buffer. + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Sendrecv_replace(void *buf, int count, MPI_Datatype datatype, + int dest, int sendtag, int source, int recvtag, MPI_Comm comm, + MPI_Status *status) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_SENDRECV_REPLACE(BUF, COUNT, DATATYPE, DEST, SENDTAG, SOURCE, + RECVTAG, COMM, STATUS, IERROR) + BUF(*) + INTEGER COUNT, DATATYPE, DEST, SENDTAG + INTEGER SOURCE, RECVTAG, COMM + INTEGER STATUS(MPI_STATUS_SIZE), IERROR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Sendrecv_replace(buf, count, datatype, dest, sendtag, source, recvtag, + comm, status, ierror) + TYPE(*), DIMENSION(..) :: buf + INTEGER, INTENT(IN) :: count, dest, sendtag, source, recvtag + TYPE(MPI_Datatype), INTENT(IN) :: datatype + TYPE(MPI_Comm), INTENT(IN) :: comm + TYPE(MPI_Status) :: status + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT/OUTPUT PARAMETER +---------------------- +* ``buf``: Initial address of send and receive buffer (choice). + +INPUT PARAMETERS +---------------- +* ``count``: Number of elements in send and receive buffer (integer). +* ``datatype``: Type of elements to send and receive (handle). +* ``dest``: Rank of destination (integer). +* ``sendtag``: Send message tag (integer). +* ``source``: Rank of source (integer). +* ``recvtag``: Receive message tag (integer). +* ``comm``: Communicator (handle). + +OUTPUT PARAMETERS +----------------- +* ``status``: Status object (status). +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +The send-receive operations combine in one call the sending of a message +to one destination and the receiving of another message, from another +process. The two (source and destination) are possibly the same. A +send-receive operation is useful for executing a shift operation across +a chain of processes. If blocking sends and receives are used for such a +shift, then one needs to order the sends and receives correctly (for +example, even processes send, then receive; odd processes receive first, +then send) in order to prevent cyclic dependencies that may lead to +deadlock. When a send-receive operation is used, the communication +subsystem takes care of these issues. The send-receive operation can be +used in conjunction with the functions described in Chapter 6 of the MPI +Standard, "Process Topologies," in order to perform shifts on various +logical topologies. Also, a send-receive operation is useful for +implementing remote procedure calls. + +A message sent by a send-receive operation can be received by a regular +receive operation or probed by a probe operation; a send-receive +operation can receive a message sent by a regular send operation. + +:ref:`MPI_Sendrecv_replace` executes a blocking send and receive. The same +buffer is used both for the send and for the receive, so that the +message sent is replaced by the message received. + +The semantics of a send-receive operation is what would be obtained if +the caller forked two concurrent threads, one to execute the send, and +one to execute the receive, followed by a join of these two threads. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. + + +.. seealso:: + :ref:`MPI_Sendrecv` diff --git a/docs/man-openmpi/man3/MPI_Session_create_errhandler.3.rst b/docs/man-openmpi/man3/MPI_Session_create_errhandler.3.rst new file mode 100644 index 00000000000..877777e9244 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Session_create_errhandler.3.rst @@ -0,0 +1,93 @@ +.. _mpi_session_create_errhandler: + +MPI_Session_create_errhandler +============================= + +.. include_body + +:ref:`MPI_Session_create_errhandler` - Creates an error handler that can be +attached to sessions + +Syntax +------ + +C Syntax +^^^^^^^^ + +.. code:: c + + #include + + int MPI_Session_create_errhandler(MPI_Session_errhandler_function *function, + MPI_Errhandler *errhandler) + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + + MPI_SESSION_CREATE_ERRHANDLER(FUNCTION, ERRHANDLER, IERROR) + EXTERNAL FUNCTION + INTEGER ERRHANDLER, IERROR + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code:: fortran + + USE mpi_f08 + + MPI_Session_create_errhandler(session_errhandler_fn, errhandler, ierror) + PROCEDURE(MPI_Session_errhandler_function) :: session_errhandler_fn + TYPE(MPI_Errhandler), INTENT(OUT) :: errhandler + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + +Input Parameter +--------------- + +- ``function`` : User-defined error handling procedure (function). + +Output Parameters +----------------- + +- ``errhandler`` : MPI error handler (handle). +- ``IERROR`` : Fortran only: Error status (integer). + +Description +----------- + +:ref:`MPI_Session_create_errhandler` creates an error handler that can be +attached to sessions. This ``function`` is identical to +:ref:`MPI_Errhandler_create`, the use of which is deprecated. In C, the +user routine should be a ``function`` of type +MPI_Session_errhandler_function, which is defined as + +.. code:: c + + typedef void MPI_Session_errhandler_function(MPI_Session *, int *, ...); + +The first argument is the session in use. The second is the error code +to be returned by the MPI routine that raised the error. This typedef +replaces ``MPI_Handler_function``, the use of which is deprecated. In +Fortran, the user routine should be of this form: + +.. code:: fortran + + SUBROUTINE SESSION_ERRHANDLER_FUNCTION(SESSION, ERROR_CODE, ...) + INTEGER SESSION, ERROR_CODE + +Errors +------ + +Almost all MPI routines return an error value; C routines as the value +of the ``function`` and Fortran routines in the last argument. Before +the error value is returned, the current MPI error handler is called. By +default, this error handler aborts the MPI job, except for I/O +``function`` errors. The error handler may be changed with +MPI_Session_set_errhandler; the predefined error handler +MPI_ERRORS_RETURN may be used to cause error values to be returned. +Note that MPI does not guarantee that an MPI program can continue past +an error. diff --git a/docs/man-openmpi/man3/MPI_Session_f2c.3.rst b/docs/man-openmpi/man3/MPI_Session_f2c.3.rst new file mode 100644 index 00000000000..b675d830d51 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Session_f2c.3.rst @@ -0,0 +1,53 @@ +.. _mpi_session_f2c: + +MPI_Session_f2c +=============== + +.. include_body + +MPI_Session_c2f, :ref:`MPI_Session_f2c` - Translates a C session handle into a +Fortran INTEGER-style session handle, or vice versa. + +SYNTAX +------ + +C Syntax +^^^^^^^^ + +.. code:: c + + #include + + int MPI_Session_f2c(const MPI_Fint *f_session, MPI_Session *c_session) + int MPI_Session_c2f(const MPI_Session *c_session, MPI_Fint *f_session) + +PARAMETERS +---------- + +- ``f_session``: ``mpi``-style ``INTEGER`` MPI session object +- ``c_session``: C-style MPI session object + +DESCRIPTION +----------- + +These two procedures are provided in C to convert from a Fortran session +(which is an array of integers) to a C session (which is a structure), +and vice versa. The conversion occurs on all the information in +``session``, including that which is hidden. That is, no session +information is lost in the conversion. + +When using MPI_Session_f2c(), if ``f_session`` is a valid Fortran +session, then MPI_Session_f2c() returns in ``c_session`` a valid C +session with the same content. If ``f_session`` is the Fortran value of +MPI_SESSION_NULL, or if ``f_session`` is not a valid Fortran +session, then the call is erroneous. + +When using MPI_Session_c2f(), the opposite conversion is applied. If +``c_session`` is MPI_SESSION_NULL, or if ``c_session`` is not a +valid C session, then the call is erroneous. + +NOTES +----- + +These functions are only available in C; they are not available in any +of the Fortran MPI interfaces. diff --git a/docs/man-openmpi/man3/MPI_Session_finalize.3.rst b/docs/man-openmpi/man3/MPI_Session_finalize.3.rst new file mode 100644 index 00000000000..5579742e031 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Session_finalize.3.rst @@ -0,0 +1,95 @@ +.. _mpi_session_finalize: + +MPI_Session_finalize +==================== + +.. include_body + +:ref:`MPI_Session_finalize` - releases all MPI state associated with a session + +Syntax +------ + +C Syntax +^^^^^^^^ + +.. code:: c + + #include + + int MPI_Session_finalize(MPI_Session *session) + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + + MPI_SESSION_FINALIZE(SESSION, IERROR) + INTEGER SESSION, IERROR + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code:: fortran + + USE mpi_f08 + + MPI_Session_finalize(session, ierror) + TYPE(MPI_Session), INTENT(IN) :: session + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + +Input Parameters +---------------- + +- session : session to be finalized (handle) + +Output Parameters +----------------- + +- IERROR : Fortran only: Error status (integer). + +Description +----------- + +:ref:`MPI_Session_finalize` releases all MPI state associated with the supplied +session. Every instantiated session must be finalized using +:ref:`MPI_Session_finalize`. The handle session is set to MPI_SESSION_NULL by +the call. + +Notes +----- + +Before an MPI process invokes :ref:`MPI_Session_finalize`, the process must +perform all MPI calls needed to complete its involvement in MPI +communications: it must locally complete all MPI operations that it +initiated and it must execute matching calls needed to complete MPI +communications initiated by other processes. The call to +:ref:`MPI_Session_finalize` does not free objects created by MPI calls; these +objects are freed using MPI_XXX_FREE calls. :ref:`MPI_Session_finalize` may be +synchronizing on any or all of the groups associated with communicators, +windows, or  les derived from the session and not disconnected, freed, +or closed, respectively, before the call to :ref:`MPI_Session_finalize` +procedure. :ref:`MPI_Session_finalize` behaves as if all such synchronizations +occur concurrently. As :ref:`MPI_Comm_free` may mark a communicator for freeing +later, :ref:`MPI_Session_finalize` may be synchronizing on the group associated +with a communicator that is only freed (with MPI_Comm_free) rather than +disconnected (with MPI_Comm_disconnect). + +Errors +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. Before the +error value is returned, the current MPI error handler is called. By +default, this error handler aborts the MPI job, except for I/O function +errors. The error handler may be changed with +MPI_Session_set_errhandler; the predefined error handler +MPI_ERRORS_RETURN may be used to cause error values to be returned. Note +that MPI does not guarantee that an MPI program can continue past an +error. + + +.. seealso:: :ref:`MPI_Session_init` diff --git a/docs/man-openmpi/man3/MPI_Session_get_info.3.rst b/docs/man-openmpi/man3/MPI_Session_get_info.3.rst new file mode 100644 index 00000000000..210b95b9cf4 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Session_get_info.3.rst @@ -0,0 +1,88 @@ +.. _mpi_session_get_info: + +MPI_Session_get_info +==================== + +.. include_body + +:ref:`MPI_Session_get_info` - Returns an info object containing the hints of an +MPI Session + +Syntax +------ + +C Syntax +^^^^^^^^ + +.. code:: c + + #include + + int MPI_Session_get_info(MPI_Session session, MPI_Info *info_used) + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + + MPI_SESSION_GET_INFO(SESSION, INFO_USED) + INTEGER SESSION, INFO_USED + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code:: fortran + + USE mpi_f08 + + MPI_Session_get_info(session, info_used) + TYPE(MPI_Session), INTENT(IN) :: session + TYPE(MPI_Info), INTENT(OUT) :: info_used + +Input Parameters +---------------- + +- session : session (handle) + +Output Parameters +----------------- + +- info_used: info object (handle) +- IERROR : Fortran only: Error status (integer). + +Description +----------- + +:ref:`MPI_Session_get_info` returns a new info object containing the hints of +the MPI Session associated with session. The current setting of all +hints related to this MPI Session is returned in info_used. An MPI +implementation is required to return all hints that are supported by the +implementation and have default values specified; any user-supplied +hints that were not ignored by the implementation; and any additional +hints that were set by the implementation. If no such hints exist, a +handle to a newly created info object is returned that contains no +key/value pair. + +Notes +----- + +The user is responsible for freeing info_used via :ref:`MPI_Info_free`. + +Errors +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. Before the +error value is returned, the current MPI error handler is called. By +default, this error handler aborts the MPI job, except for I/O function +errors. The error handler may be changed with +MPI_Session_set_errhandler; the predefined error handler +MPI_ERRORS_RETURN may be used to cause error values to be returned. Note +that MPI does not guarantee that an MPI program can continue past an +error. + + +.. seealso:: :ref:`MPI_Session_init` diff --git a/docs/man-openmpi/man3/MPI_Session_get_nth_pset.3.rst b/docs/man-openmpi/man3/MPI_Session_get_nth_pset.3.rst new file mode 100644 index 00000000000..d805d1439f6 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Session_get_nth_pset.3.rst @@ -0,0 +1,102 @@ +.. _mpi_session_get_nth_pset: + +MPI_Session_get_nth_pset +======================== + +.. include_body + +:ref:`MPI_Session_get_nth_pset` - Query runtime for name of the nth process set + +Syntax +------ + +C Syntax +^^^^^^^^ + +.. code:: c + + #include + + int MPI_Session_get_nth_pset(MPI_Session session, MPI_Info info, int n, int *pset_len, char *pset_name) + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + + MPI_SESSION_GET_NTH_PSET(SESSION, INFO, N, PSET_LEN, PSET_NAME, IERROR) + INTEGER SESSION, INFO, N, PSET_LEN, IERROR + CHARACTER*(*) PSET_NAME + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code:: fortran + + USE mpi_f08 + + MPI_Session_get_nth_pset(session, info, n, pset_len, pset_name, ierror) + TYPE(MPI_Session), INTENT(IN) :: session + TYPE(MPI_Info), INTENT(IN) :: info + INTEGER, INTENT(IN) :: n + INTEGER, INTENT(INOUT) :: pset_len + CHARACTER(LEN=*), INTENT(OUT) :: pset_name + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + +Input Parameters +---------------- + +- session : session (handle) +- info: info object (handle) +- n: index of the desired process set name (integer) + +Input/Output Parameter +^^^^^^^^^^^^^^^^^^^^^^ + +- pset_len: length of the pset_name argument (integer) + +Output Parameters +----------------- + +- pset_name : name of the nth process set (string) +- IERROR : Fortran only: Error status (integer). + +Description +----------- + +:ref:`MPI_Session_get_nth_pset` returns the name of the nth process set in the +supplied pset_name buffer. pset_len is the size of the buffer needed to +store the nth process set name. If the pset_len passed into the function +is less than the actual buffer size needed for the process set name, +then the string value returned in pset_name is truncated. If pset_len is +set to 0, pset_name is not changed. On return, the value of pset_len +will be set to the required buffer size to hold the process set name. In +C, pset_len includes the required space for the null terminator. In C, +this function returns a null terminated string in all cases where the +pset_len input value is greater than 0. + +Notes +----- + +Process set names have an implementation-defined maximum length of +MPI_MAX_PSET_NAME_LEN characters. MPI_MAX_PSET_NAME_LEN shall have a +value of at least 63. + +Errors +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. Before the +error value is returned, the current MPI error handler is called. By +default, this error handler aborts the MPI job, except for I/O function +errors. The error handler may be changed with +MPI_Session_set_errhandler; the predefined error handler +MPI_ERRORS_RETURN may be used to cause error values to be returned. Note +that MPI does not guarantee that an MPI program can continue past an +error. + + +.. seealso:: :ref:`MPI_Session_init` diff --git a/docs/man-openmpi/man3/MPI_Session_get_num_psets.3.rst b/docs/man-openmpi/man3/MPI_Session_get_num_psets.3.rst new file mode 100644 index 00000000000..9414bd44a87 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Session_get_num_psets.3.rst @@ -0,0 +1,95 @@ +.. _mpi_session_get_num_psets: + +MPI_Session_get_num_psets +========================= + +.. include_body + +:ref:`MPI_Session_get_num_psets` - Query runtime for number of available +process sets + +Syntax +------ + +C Syntax +^^^^^^^^ + +.. code:: c + + #include + + int MPI_Session_get_num_psets(MPI_Session session, MPI_Info info, int *npset_names) + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + + MPI_SESSION_GET_NUM_PSETS(SESSION, INFO, NPSET_NAMES, IERROR) + INTEGER SESSION, INFO, SESSION, IERROR + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code:: fortran + + USE mpi_f08 + + MPI_Session_get_num_psets(session, info, npset_names, ierror) + TYPE(MPI_Session), INTENT(IN) :: session + TYPE(MPI_Info), INTENT(IN) :: info + INTEGER, INTENT(OUT) :: npset_names + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + +Input Parameters +---------------- + +- session : session (handle) +- info: info object (handle) + +Output Parameters +----------------- + +- npset_names : number of available process sets (non-negtive integer) +- IERROR : Fortran only: Error status (integer). + +Description +----------- + +:ref:`MPI_Session_get_num_psets` is used to query the runtime for the number of +available process sets in which the calling MPI process is a member. An +MPI implementation is allowed to increase the number of available +process sets during the execution of an MPI application when new process +sets become available. However, MPI implementations are not allowed to +change the index of a particular process set name, or to change the name +of the process set at a particular index, or to delete a process set +name once it has been added. + +Notes +----- + +When a process set becomes invalid, for example, when some processes +become unreachable due to failures in the communication system, +subsequent usage of the process set name may raise an error. For +example, creating an MPI_Group from such a process set might succeed +because it is a local operation, but creating an MPI_Comm from that +group and attempting collective communication may raise an error. + +Errors +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. Before the +error value is returned, the current MPI error handler is called. By +default, this error handler aborts the MPI job, except for I/O function +errors. The error handler may be changed with +MPI_Session_set_errhandler; the predefined error handler +MPI_ERRORS_RETURN may be used to cause error values to be returned. Note +that MPI does not guarantee that an MPI program can continue past an +error. + + +.. seealso:: :ref:`MPI_Session_init` diff --git a/docs/man-openmpi/man3/MPI_Session_get_pset_info.3.rst b/docs/man-openmpi/man3/MPI_Session_get_pset_info.3.rst new file mode 100644 index 00000000000..a9deb2abfea --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Session_get_pset_info.3.rst @@ -0,0 +1,89 @@ +.. _mpi_session_get_pset_info: + +MPI_Session_get_pset_info +========================= + +.. include_body + +:ref:`MPI_Session_get_pset_info` - Returns an info object containing properties +of a specific process set + +Syntax +------ + +C Syntax +^^^^^^^^ + +.. code:: c + + #include + + int MPI_Session_get_pset_info(MPI_Session session, const char *pset_name, MPI_Info *info) + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + + MPI_SESSION_GET_PSET_INFO(SESSION, PSET_NAME, INFO, IERROR) + INTEGER SESSION, INFO, IERROR + CHARACTER*(*) PSET_NAME + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code:: fortran + + USE mpi_f08 + + MPI_Session_get_pset_info(session, pset_name, info, ierror) + TYPE(MPI_Session), INTENT(IN) :: session + CHARACTER(LEN=*), INTENT(IN) :: pset_name + TYPE(MPI_Info), INTENT(OUT) :: info + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + +Input Parameters +---------------- + +- session : session (handle) +- pset_name : name of process set (string) + +Output Parameters +----------------- + +- info: info object (handle) +- IERROR : Fortran only: Error status (integer). + +Description +----------- + +:ref:`MPI_Session_get_pset_info` is used to query properties of a specific +process set. The returned info object can be queried with existing MPI +info object query functions. One key/value pair must be de ned, +"mpi_size". The value of the "mpi_size" key specifies the number of MPI +processes in the process set. + +Notes +----- + +The user is responsible for freeing the returned info object via +:ref:`MPI_Info_free`. + +Errors +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. Before the +error value is returned, the current MPI error handler is called. By +default, this error handler aborts the MPI job, except for I/O function +errors. The error handler may be changed with +MPI_Session_set_errhandler; the predefined error handler +MPI_ERRORS_RETURN may be used to cause error values to be returned. Note +that MPI does not guarantee that an MPI program can continue past an +error. + + +.. seealso:: :ref:`MPI_Session_init` diff --git a/docs/man-openmpi/man3/MPI_Session_init.3.rst b/docs/man-openmpi/man3/MPI_Session_init.3.rst new file mode 100644 index 00000000000..3d66fae8bd5 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Session_init.3.rst @@ -0,0 +1,89 @@ +.. _mpi_session_init: + +MPI_Session_init +================ + +.. include_body + +:ref:`MPI_Session_init` - Creates a new session handle + +Syntax +------ + +C Syntax +^^^^^^^^ + +.. code:: c + + #include + + int MPI_Session_init(MPI_Info info, MPI_Errhandler errhandler, MPI_Session *session) + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + + MPI_SESSION_INIT(INFO, ERRHANDLER, SESSION, IERROR) + INTEGER INFO, ERRHANDLER, SESSION, IERROR + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code:: fortran + + USE mpi_f08 + + MPI_Session_init(info, errhandler, session, ierror) + TYPE(MPI_Info), INTENT(IN) :: info + TYPE(MPI_Errhandler), INTENT(IN) :: errhandler + TYPE(MPI_Session), INTENT(OUT) :: session + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + +Input Parameters +---------------- + +- info : info object (handle) +- errhandler : error handler to be attached to the returned session + (handle) + +Output Parameters +----------------- + +- session : New session (handle). +- IERROR : Fortran only: Error status (integer). + +Description +----------- + +:ref:`MPI_Session_init` is used to instantiate an MPI Session. The returned +session handle can be used to query the runtime system about +characteristics of the job within which the process is running, as well +as other system resources. An application can make multiple calls to +:ref:`MPI_Session_init` and the related :ref:`MPI_Session_finalize` routine. + +Notes +----- + +The info argument is used to request MPI functionality requirements and +possible MPI implementation specific capabilities. + +The errhandler argument specifies an error handler to invoke in the +event that the Session instantiation call encounters an error. + +Errors +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. Before the +error value is returned, the current MPI error handler is called. By +default, this error handler aborts the MPI job, except for I/O function +errors. The predefined error handler MPI_ERRORS_RETURN may be used to +cause error values to be returned. Note that MPI does not guarantee that +an MPI program can continue past an error. + + +.. seealso:: :ref:`MPI_Session_get_num_psets` MPI_Session_group_from_pset diff --git a/docs/man-openmpi/man3/MPI_Sizeof.3.rst b/docs/man-openmpi/man3/MPI_Sizeof.3.rst new file mode 100644 index 00000000000..0ad4b99a146 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Sizeof.3.rst @@ -0,0 +1,78 @@ +.. _mpi_sizeof: + + +MPI_Sizeof +========== + +.. include_body + +:ref:`MPI_Sizeof` - Returns the size, in bytes, of the given type + + +SYNTAX +------ + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_SIZEOF(X, SIZE, IERROR) + X + INTEGER SIZE, IERROR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Sizeof(x, size, ierror) + TYPE(*), DIMENSION(..) :: x + INTEGER, INTENT(OUT) :: size + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETER +--------------- +* ``X``: A Fortran variable of numeric intrinsic type (choice). + +OUTPUT PARAMETERS +----------------- +* ``SIZE``: Size of machine representation of that type (integer). +* ``IERROR``: Error status (integer). + +DESCRIPTION +----------- + +:ref:`MPI_SIZEOF` returns the size (in bytes) of the machine representation of +the given variable. It is a generic Fortran type and has a Fortran +binding only. This routine is similar to the sizeof builtin in C. +However, if given an array argument, it returns the size of the base +element, not the size of the whole array. + + +NOTES +----- + +This function is not available in C because it is not necessary. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. + +See the MPI man page for a full list of MPI error codes. diff --git a/docs/man-openmpi/man3/MPI_Ssend.3.rst b/docs/man-openmpi/man3/MPI_Ssend.3.rst new file mode 100644 index 00000000000..e6b0d41b3ac --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Ssend.3.rst @@ -0,0 +1,84 @@ +.. _mpi_ssend: + + +MPI_Ssend +========= + +.. include_body + +:ref:`MPI_Ssend` - Standard synchronous send. + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Ssend(const void *buf, int count, MPI_Datatype datatype, int dest, + int tag, MPI_Comm comm) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_SSEND(BUF, COUNT, DATATYPE, DEST, TAG, COMM, IERROR) + BUF(*) + INTEGER COUNT, DATATYPE, DEST, TAG, COMM, IERROR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Ssend(buf, count, datatype, dest, tag, comm, ierror) + TYPE(*), DIMENSION(..), INTENT(IN) :: buf + INTEGER, INTENT(IN) :: count, dest, tag + TYPE(MPI_Datatype), INTENT(IN) :: datatype + TYPE(MPI_Comm), INTENT(IN) :: comm + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETERS +---------------- +* ``buf``: Initial address of send buffer (choice). +* ``count``: Number of elements in send buffer (nonnegative integer). +* ``datatype``: Datatype of each send buffer element (handle). +* ``dest``: Rank of destination (integer). +* ``tag``: Message tag (integer). +* ``comm``: Communicator (handle). + +OUTPUT PARAMETER +---------------- +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +:ref:`MPI_Ssend` performs a synchronous-mode, blocking send. See the MPI-1 +Standard for more detailed information about such sends. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. diff --git a/docs/man-openmpi/man3/MPI_Ssend_init.3.rst b/docs/man-openmpi/man3/MPI_Ssend_init.3.rst new file mode 100644 index 00000000000..6e610fa0006 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Ssend_init.3.rst @@ -0,0 +1,95 @@ +.. _mpi_ssend_init: + + +MPI_Ssend_init +============== + +.. include_body + +:ref:`MPI_Ssend_init` - Builds a handle for a synchronous send. + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Ssend_init(const void *buf, int count, MPI_Datatype datatype, + int dest, int tag, MPI_Comm comm, MPI_Request *request) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_SSEND_INIT(BUF, COUNT, DATATYPE, DEST, TAG, COMM, REQUEST, + IERROR) + BUF(*) + INTEGER COUNT, DATATYPE, DEST, TAG, COMM, REQUEST, IERROR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Ssend_init(buf, count, datatype, dest, tag, comm, request, ierror) + TYPE(*), DIMENSION(..), INTENT(IN), ASYNCHRONOUS :: buf + INTEGER, INTENT(IN) :: count, dest, tag + TYPE(MPI_Datatype), INTENT(IN) :: datatype + TYPE(MPI_Comm), INTENT(IN) :: comm + TYPE(MPI_Request), INTENT(OUT) :: request + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETERS +---------------- +* ``buf``: Initial address of send buffer (choice). +* ``count``: Number of elements to send (integer). +* ``datatype``: Type of each element (handle). +* ``dest``: Rank of destination (integer). +* ``tag``: Message tag (integer). +* ``comm``: Communicator (handle). + +OUTPUT PARAMETERS +----------------- +* ``request``: Communication request (handle). +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +Creates a persistent communication object for a synchronous mode send +operation, and binds to it all the arguments of a send operation. + +A communication (send or receive) that uses a persistent request is +initiated by the function :ref:`MPI_Start`. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. + + +.. seealso:: + :ref:`MPI_Bsend_init` :ref:`MPI_Send_init` :ref:`MPI_Rsend_init` :ref:`MPI_Recv_init` :ref:`MPI_Start` + :ref:`MPI_Startall` :ref:`MPI_Ssend` diff --git a/docs/man-openmpi/man3/MPI_Start.3.rst b/docs/man-openmpi/man3/MPI_Start.3.rst new file mode 100644 index 00000000000..64a98d5e9d6 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Start.3.rst @@ -0,0 +1,99 @@ +.. _mpi_start: + + +MPI_Start +========= + +.. include_body + +:ref:`MPI_Start` - Initiates a communication using a persistent request +handle. + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Start(MPI_Request *request) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_START(REQUEST, IERROR) + INTEGER REQUEST, IERROR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Start(request, ierror) + TYPE(MPI_Request), INTENT(INOUT) :: request + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETER +--------------- +* ``request``: Communication request (handle). + +OUTPUT PARAMETER +---------------- +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +A communication (send or receive) that uses a persistent request is +initiated by the function :ref:`MPI_Start`. + +The argument, request, is a handle returned by one of the persistent +communication-request initialization functions (:ref:`MPI_Send_init`, +:ref:`MPI_Bsend_init`, :ref:`MPI_Ssend_init`, :ref:`MPI_Rsend_init`, MPI_Recv_init). The +associated request should be inactive and becomes active once the call +is made. + +If the request is for a send with ready mode, then a matching receive +should be posted before the call is made. From the time the call is made +until after the operation completes, the communication buffer should not +be accessed. + +The call is local, with semantics similar to the nonblocking +communication operations (see Section 3.7 in the MPI-1 Standard, +"Nonblocking Communication.") That is, a call to :ref:`MPI_Start` with a +request created by :ref:`MPI_Send_init` starts a communication in the same +manner as a call to :ref:`MPI_Isend`; a call to :ref:`MPI_Start` with a request +created by :ref:`MPI_Bsend_init` starts a communication in the same manner as a +call to :ref:`MPI_Ibsend`; and so on. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. + + +.. seealso:: + :ref:`MPI_Bsend_init` :ref:`MPI_Rsend_init` :ref:`MPI_Send_init` MPI_Sssend_init + :ref:`MPI_Recv_init` :ref:`MPI_Startall` diff --git a/docs/man-openmpi/man3/MPI_Startall.3.rst b/docs/man-openmpi/man3/MPI_Startall.3.rst new file mode 100644 index 00000000000..f53058f324f --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Startall.3.rst @@ -0,0 +1,118 @@ +.. _mpi_startall: + + +MPI_Startall +============ + +.. include_body + +:ref:`MPI_Startall` - Starts a collection of requests. + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Startall(int count, MPI_Request array_of_requests[]) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_STARTALL(COUNT, ARRAY_OF_REQUESTS, IERROR) + INTEGER COUNT, ARRAY_OF_REQUESTS(*), IERROR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Startall(count, array_of_requests, ierror) + INTEGER, INTENT(IN) :: count + TYPE(MPI_Request), INTENT(INOUT) :: array_of_requests(count) + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETER +--------------- +* ``count``: List length (integer). + +INPUT/OUTPUT PARAMETER +---------------------- +* ``array_of_requests``: Array of requests (array of handle). + +OUTPUT PARAMETER +---------------- +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +Starts all communications associated with requests in array_of_requests. +A call to MPI_Startall(count, array_of_requests) has the same effect as +calls to :ref:`MPI_Start` (&array_of_requests[i]), executed for i=0 ,..., +count-1, in some arbitrary order. + +A communication started with a call to :ref:`MPI_Start` or :ref:`MPI_Startall` is +completed by a call to :ref:`MPI_Wait`, :ref:`MPI_Test`, or one of the derived +functions :ref:`MPI_Waitany`, :ref:`MPI_Testany`, :ref:`MPI_Waitall`, :ref:`MPI_Testall`, +:ref:`MPI_Waitsome`, :ref:`MPI_Testsome` (these are described in Section 3.7.5 of the +MPI-1 Standard, "Multiple Completions"). The request becomes inactive +after successful completion by such a call. The request is not +deallocated, and it can be activated anew by another :ref:`MPI_Start` or +:ref:`MPI_Startall` call. + +A persistent request is deallocated by a call to :ref:`MPI_Request_free` (see +Section 3.7.3 of the MPI-1 Standard, "Communication Completion"). + +| The call to :ref:`MPI_Request_free` can occur at any point in the program + after the persistent request was created. However, the request will be + deallocated only after it becomes inactive. Active receive requests + should not be freed. Otherwise, it will not be possible to check that + the receive has completed. It is preferable, in general, to free + requests when they are inactive. If this rule is followed, then the + persistent communication request functions will be invoked in a + sequence of the form, + +| Create (Start Complete)\* Free + +where \* indicates zero or more repetitions. If the same communication +object is used in several concurrent threads, it is the user's +responsibility to coordinate calls so that the correct sequence is +obeyed. + +A send operation initiated with :ref:`MPI_Start` can be matched with any +receive operation and, likewise, a receive operation initiated with +:ref:`MPI_Start` can receive messages generated by any send operation. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. + + +.. seealso:: + :ref:`MPI_Bsend_init` :ref:`MPI_Rsend_init` :ref:`MPI_Send_init` :ref:`MPI_Ssend_init` :ref:`MPI_Recv_init` + :ref:`MPI_Start` :ref:`MPI_Request_free` diff --git a/docs/man-openmpi/man3/MPI_Status_c2f.3.rst b/docs/man-openmpi/man3/MPI_Status_c2f.3.rst new file mode 100644 index 00000000000..0a6cb6c1dc5 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Status_c2f.3.rst @@ -0,0 +1,9 @@ +.. _mpi_status_c2f: + +MPI_Status_c2f +============== + .. include_body + +.. include:: ../man3/MPI_Status_f2c.3.rst + :start-after: .. include_body + diff --git a/docs/man-openmpi/man3/MPI_Status_c2f08.3.rst b/docs/man-openmpi/man3/MPI_Status_c2f08.3.rst new file mode 100644 index 00000000000..8f14f6b0c59 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Status_c2f08.3.rst @@ -0,0 +1,9 @@ +.. _mpi_status_c2f08: + +MPI_Status_c2f08 +================ + .. include_body + +.. include:: ../man3/MPI_Status_f082c.3.rst + :start-after: .. include_body + diff --git a/docs/man-openmpi/man3/MPI_Status_f082c.3.rst b/docs/man-openmpi/man3/MPI_Status_f082c.3.rst new file mode 100644 index 00000000000..3dd388539a7 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Status_f082c.3.rst @@ -0,0 +1,64 @@ +.. _mpi_status_f082c: + +MPI_Status_f082c +================ + +.. include_body + +:ref:`MPI_Status_f082c`, :ref:`MPI_Status_c2f08` - Translates a C status into a +Fortran 2008 status, or vice versa. + +SYNTAX +------ + +C Syntax +^^^^^^^^ + +.. code:: c + + #include + + int MPI_Status_f082c(const MPI_F08_status *f08_status, MPI_Status *c_status) + int MPI_Status_c2f08(const MPI_Status *c_status, MPI_F08_status *f08_status) + +PARAMETERS +---------- + +- f08_status: mpi_f08-style MPI status object +- c_status: C-style MPI status object + +DESCRIPTION +----------- + +These two procedures are provided in C to convert from a Fortran 2008 +status (which is a derived type made of integers) to a C status (which +is a structure), and vice versa. The conversion occurs on all the +information in status, including that which is hidden. That is, no +status information is lost in the conversion. + +When using MPI_Status_f082c(), if f08_status is a valid Fortran status, +but not the Fortran value of MPI_F08_STATUS_IGNORE or +MPI_F08_STATUSES_IGNORE, then MPI_Status_f082c() returns in c_status a +valid C status with the same content. If f08_status is the Fortran value +of MPI_STATUS_IGNORE or MPI_STATUSES_IGNORE, or if f08_status is not a +valid Fortran status, then the call is erroneous. + +When using MPI_Status_c2f08(), the opposite conversion is applied. If +c_status is MPI_STATUS_IGNORE or MPI_STATUSES_IGNORE, or if c_status is +not a valid C status, then the call is erroneous. + +The input status has the same source, tag and error code values as the +output status, and returns the same answers when queried for count, +elements, and cancellation. The conversion function may be called with +an input status argument that has an undefined error field, in which +case the value of the error field in the output status argument is +undefined. + +NOTES +----- + +These functions are only available in C; they are not available in any +of the Fortran MPI interfaces. + + +.. seealso:: :ref:`MPI_Status_c2f` :ref:`MPI_Status_f2f08` diff --git a/docs/man-openmpi/man3/MPI_Status_f082f.3.rst b/docs/man-openmpi/man3/MPI_Status_f082f.3.rst new file mode 100644 index 00000000000..4eca8d21143 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Status_f082f.3.rst @@ -0,0 +1,99 @@ +.. _mpi_status_f082f: + +MPI_Status_f082f +================ + +.. include_body + +:ref:`MPI_Status_f082f`, :ref:`MPI_Status_c2f08` - Translates a Fortran 2008 status +into a Fortran INTEGER-style status, or vice versa. + +SYNTAX +------ + +C Syntax +^^^^^^^^ + +.. code:: c + + #include + + int MPI_Status_f082f(const MPI_F08_status *f08_status, MPI_Fint *f_status) + int MPI_Status_f2f08(const MPI_Fint *f_status, MPI_F08_status *f08_status) + +Fortran mpi Module Syntax +^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. code:: fortran + + USE MPI + + MPI_STATUS_F082F(F08_STATUS, F_STATUS, IERROR) + TYPE(MPI_Status) :: F08_STATUS + INTEGER :: STATUS(MPI_STATUS_SIZE), IERROR + + MPI_STATUS_F2F08(F_STATUS, F08_STATUS, IERROR) + INTEGER :: F_STATUS(MPI_STATUS_SIZE), IERROR + TYPE(MPI_Status) :: F08_STATUS + +Fortran mpi_f08 Module Syntax +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. code:: fortran + + USE mpi_f08 + + MPI_Status_f082f(f08_status, f_status, ierror) + TYPE(MPI_Status), INTENT(IN) :: f08_status + INTEGER, INTENT(OUT) :: f_status(MPI_STATUS_SIZE) + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + MPI_Status_f2f08(f_status, f08_status, ierror) + INTEGER, INTENT(IN) :: f_status(MPI_STATUS_SIZE) + TYPE(MPI_Status), INTENT(OUT) :: f08_status + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + +PARAMETERS +---------- + +- f08_status: mpi_f08-style MPI status object +- f_status: mpi-style INTEGER MPI status object + +DESCRIPTION +----------- + +These two procedures are provided to convert from a Fortran 2008 status +(which is a derived datatype made of integers) to a Fortran status +(which is an array of integers), and vice versa. The conversion occurs +on all the information in status, including that which is hidden. That +is, no status information is lost in the conversion. + +When using MPI_Status_f082f(), if f08_status is a valid Fortran status, +but not the Fortran value of MPI_F08_STATUS_IGNORE (in C), +MPI_STATUS_IGNORE (in Fortran) or MPI_F08_STATUSES_IGNORE (in C) or +MPI_STATUSES_IGNORE (in Fortran), then MPI_Status_f082f() returns in +f_status a valid array with the same content. If f08_status is the C +value of MPI_F08_STATUS_IGNORE or MPI_F08_STATUSES_IGNORE or the Fortran +value of MPI_STATUS_IGNORE or MPI_STATUSES_IGNORE, or if f08_status is +not a valid Fortran status, then the call is erroneous. + +When using MPI_Status_f2f08(), the opposite conversion is applied. If +f_status is MPI_STATUS_IGNORE or MPI_STATUSES_IGNORE, or if f_status is +not a valid Fortran status, then the call is erroneous. + +The input status has the same source, tag and error code values as the +output status, and returns the same answers when queried for count, +elements, and cancellation. The conversion function may be called with +an input status argument that has an undefined error field, in which +case the value of the error field in the output status argument is +undefined. + +NOTES +----- + +The Fortran subroutines for these MPI routines are only available in the +mpi and mpi_f08 modules (including the type specification for +TYPE(MPI_Status); they are (intentionally) not available in mpif.h. + + +.. seealso:: :ref:`MPI_Status_c2f` :ref:`MPI_Status_c2f08` diff --git a/docs/man-openmpi/man3/MPI_Status_f2c.3.rst b/docs/man-openmpi/man3/MPI_Status_f2c.3.rst new file mode 100644 index 00000000000..821e3893473 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Status_f2c.3.rst @@ -0,0 +1,64 @@ +.. _mpi_status_f2c: + +MPI_Status_f2c +============== + +.. include_body + +:ref:`MPI_Status_f2c`, :ref:`MPI_Status_f2c` - Translates a C status into a Fortran +INTEGER-style status, or vice versa. + +SYNTAX +------ + +C Syntax +^^^^^^^^ + +.. code:: c + + #include + + int MPI_Status_f2c(const MPI_Fint *f_status, MPI_Status *c_status) + int MPI_Status_c2f(const MPI_Status *c_status, MPI_Fint *f_status) + +PARAMETERS +---------- + +- f_status: mpi-style INTEGER MPI status object +- c_status: C-style MPI status object + +DESCRIPTION +----------- + +These two procedures are provided in C to convert from a Fortran status +(which is an array of integers) to a C status (which is a structure), +and vice versa. The conversion occurs on all the information in status, +including that which is hidden. That is, no status information is lost +in the conversion. + +When using MPI_Status_f2c(), if f_status is a valid Fortran status, but +not the Fortran value of MPI_STATUS_IGNORE or MPI_STATUSES_IGNORE, then +MPI_Status_f2c() returns in c_status a valid C status with the same +content. If f_status is the Fortran value of MPI_STATUS_IGNORE or +MPI_STATUSES_IGNORE, or if f_status is not a valid Fortran status, then +the call is erroneous. + +When using MPI_Status_c2f(), the opposite conversion is applied. If +c_status is MPI_STATUS_IGNORE or MPI_STATUSES_IGNORE, or if c_status is +not a valid C status, then the call is erroneous. + +The input status has the same source, tag and error code values as the +output status, and returns the same answers when queried for count, +elements, and cancellation. The conversion function may be called with +an input status argument that has an undefined error field, in which +case the value of the error field in the output status argument is +undefined. + +NOTES +----- + +These functions are only available in C; they are not available in any +of the Fortran MPI interfaces. + + +.. seealso:: :ref:`MPI_Status_f082c` :ref:`MPI_Status_f2f08` diff --git a/docs/man-openmpi/man3/MPI_Status_f2f08.3.rst b/docs/man-openmpi/man3/MPI_Status_f2f08.3.rst new file mode 100644 index 00000000000..e597beb24f5 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Status_f2f08.3.rst @@ -0,0 +1,9 @@ +.. _mpi_status_f2f08: + +MPI_Status_f2f08 +================ + .. include_body + +.. include:: ../man3/MPI_Status_f082f.3.rst + :start-after: .. include_body + diff --git a/docs/man-openmpi/man3/MPI_Status_set_cancelled.3.rst b/docs/man-openmpi/man3/MPI_Status_set_cancelled.3.rst new file mode 100644 index 00000000000..805d107f223 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Status_set_cancelled.3.rst @@ -0,0 +1,96 @@ +.. _mpi_status_set_cancelled: + + +MPI_Status_set_cancelled +======================== + +.. include_body + +:ref:`MPI_Status_set_cancelled` - Sets *status* to indicate a request has +been canceled. + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Status_set_cancelled(MPI_Status *status, int flag) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_STATUS_SET_CANCELLED(STATUS, FLAG, IERROR) + INTEGER STATUS(MPI_STATUS_SIZE), IERROR + LOGICAL FLAG + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Status_set_cancelled(status, flag, ierror) + TYPE(MPI_Status), INTENT(INOUT) :: status + LOGICAL, INTENT(OUT) :: flag + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT/OUTPUT PARAMETER +---------------------- +* ``status``: Status with which to associate cancel flag (status). + +INPUT PARAMETER +--------------- +* ``flag``: If true, indicates request was canceled (logical). + +OUTPUT PARAMETER +---------------- +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +If *flag* is set to true, then a subsequent call to +MPI_Test_cancelled(status, flag*) will also return *flag* = true; +otherwise it will return false. + + +NOTES +----- + +Users are advised not to reuse the status fields for values other than +those for which they were intended. Doing so may lead to unexpected +results when using the status object. For example, calling +:ref:`MPI_Get_elements` may cause an error if the value is out of range, or it +may be impossible to detect such an error. The *extra_state* argument +provided with a generalized request can be used to return information +that does not logically belong in *status*. Furthermore, modifying the +values in a status set internally by MPI, such as :ref:`MPI_Recv`, may lead to +unpredictable results and is strongly discouraged. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. diff --git a/docs/man-openmpi/man3/MPI_Status_set_elements.3.rst b/docs/man-openmpi/man3/MPI_Status_set_elements.3.rst new file mode 100644 index 00000000000..0772ae76815 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Status_set_elements.3.rst @@ -0,0 +1,127 @@ +.. _mpi_status_set_elements: + + +MPI_Status_set_elements +======================= + +.. include_body + +:ref:`MPI_Status_set_elements`, :ref:`MPI_Status_set_elements_x` - Modifies +opaque part of *status* to allow :ref:`MPI_Get_elements` to return *count*. + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Status_set_elements(MPI_Status *status, MPI_Datatype datatype, int count) + int MPI_Status_set_elements_x(MPI_Status *status, MPI_Datatype datatype, MPI_Count count) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_STATUS_SET_ELEMENTS(STATUS, DATATYPE, COUNT, IERROR) + INTEGER STATUS(MPI_STATUS_SIZE), DATATYPE, COUNT, IERROR + MPI_STATUS_SET_ELEMENTS_X(STATUS, DATATYPE, COUNT, IERROR) + INTEGER STATUS(MPI_STATUS_SIZE), DATATYPE + INTEGER(KIND=MPI_COUNT_KIND) COUNT + INTEGER IERROR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Status_set_elements(status, datatype, count, ierror) + TYPE(MPI_Status), INTENT(INOUT) :: status + TYPE(MPI_Datatype), INTENT(IN) :: datatype + INTEGER, INTENT(IN) :: count + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + MPI_Status_set_elements_x(status, datatype, count, ierror) + TYPE(MPI_Status), INTENT(INOUT) :: status + TYPE(MPI_Datatype), INTENT(IN) :: datatype + INTEGER(KIND = MPI_COUNT_KIND), INTENT(IN) :: count + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT/OUTPUT PARAMETER +---------------------- +* ``status``: Status to associate with *count (status).* + +INPUT PARAMETERS +---------------- +* ``datatype``: Data type associated with *count (handle).* +* ``count``: Number of elements to associate with *status (integer).* + +OUTPUT PARAMETER +---------------- +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +:ref:`MPI_Status_set_elements` modifies the opaque part of *status so that a +call to :ref:`MPI_Get_elements` or :ref:`MPI_Get_elements_x` will return count. +:ref:`MPI_Get_count` will return a compatible value.* + +A subsequent call to MPI_Get_count(status, datatype, count), to +MPI_Get_elements(status, datatype, count), or to +MPI_Get_elements_x(status, datatype, count) must use a data-type +argument that has the same type signature as the data-type argument that +was used in the call to :ref:`MPI_Status_set_elements`.* + + +NOTES +----- + +Users are advised not to reuse the status fields for values other than +those for which they were intended. Doing so may lead to unexpected +results when using the status object. For example, calling +:ref:`MPI_Get_elements` may cause an error if the value is out of range, or it +may be impossible to detect such an error. The *extra_state argument +provided with a generalized request can be used to return information +that does not logically belong in status. Furthermore, modifying the +values in a status set internally by MPI, such as :ref:`MPI_Recv`, may lead to +unpredictable results and is strongly discouraged.* + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. + + +FORTRAN 77 NOTES +---------------- + +The MPI standard prescribes portable Fortran syntax for the *COUNT +argument of :ref:`MPI_Status_set_elements_x` only for Fortran 90. FORTRAN 77 +users may use the non-portable syntax* + +:: + + INTEGER*MPI_COUNT_KIND COUNT + + where MPI_COUNT_KIND is a constant defined in mpif.h and gives the length of the declared integer in bytes. diff --git a/docs/man-openmpi/man3/MPI_Status_set_elements_x.3.rst b/docs/man-openmpi/man3/MPI_Status_set_elements_x.3.rst new file mode 100644 index 00000000000..c7f0a34bb5c --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Status_set_elements_x.3.rst @@ -0,0 +1,9 @@ +.. _mpi_status_set_elements_x: + +MPI_Status_set_elements_x +========================= + .. include_body + +.. include:: ../man3/MPI_Status_set_elements.3.rst + :start-after: .. include_body + diff --git a/docs/man-openmpi/man3/MPI_T_category_changed.3.rst b/docs/man-openmpi/man3/MPI_T_category_changed.3.rst new file mode 100644 index 00000000000..1628c13d9ae --- /dev/null +++ b/docs/man-openmpi/man3/MPI_T_category_changed.3.rst @@ -0,0 +1,44 @@ +.. _mpi_t_category_changed: + + +MPI_T_category_changed +====================== + +.. include_body + +:ref:`MPI_T_category_changed` - Get a timestamp for the categories + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_T_category_changed(int *stamp) + + +INPUT PARAMETERS +---------------- +* ``stamp``: A virtual time stamp to indicate the last change to the categories. + +DESCRIPTION +----------- + +If two subsequent calls to this routine return the same timestamp, it is +guaranteed that no categories have been changed or added. If the +timestamp from the second call is higher than some categories have been +added or changed. + + +ERRORS +------ + +:ref:`MPI_T_category_changed` will fail if: + +* ``MPI_T_ERR_NOT_INITIALIZED``: The MPI Tools interface not initialized diff --git a/docs/man-openmpi/man3/MPI_T_category_get_categories.3.rst b/docs/man-openmpi/man3/MPI_T_category_get_categories.3.rst new file mode 100644 index 00000000000..843066102a3 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_T_category_get_categories.3.rst @@ -0,0 +1,50 @@ +.. _mpi_t_category_get_categories: + + +MPI_T_category_get_categories +============================= + +.. include_body + +:ref:`MPI_T_category_get_categories` - Query which categories are in a +category + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_T_category_get_categories(int cat_index, int len, int indices[]) + + +INPUT PARAMETERS +---------------- +* ``cat_index``: Index of the category to be queried. +* ``len``: The length of the indices array. + +OUTPUT PARAMETERS +----------------- +* ``indices``: An integer array of size len, indicating category indices. + +DESCRIPTION +----------- + +:ref:`MPI_T_category_get_categories` can be used to query which other +categories are in a category. + + +ERRORS +------ + +:ref:`MPI_T_category_get_categories` will fail if: + +* ``MPI_T_ERR_NOT_INITIALIZED``: The MPI Tools interface not initialized + +* ``MPI_T_ERR_INVALID_INDEX``: The category index is invalid diff --git a/docs/man-openmpi/man3/MPI_T_category_get_cvars.3.rst b/docs/man-openmpi/man3/MPI_T_category_get_cvars.3.rst new file mode 100644 index 00000000000..3a389dfa707 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_T_category_get_cvars.3.rst @@ -0,0 +1,50 @@ +.. _mpi_t_category_get_cvars: + + +MPI_T_category_get_cvars +======================== + +.. include_body + +:ref:`MPI_T_category_get_cvars` - Query which control variables are in a +category + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_T_category_get_cvars(int cat_index, int len, int indices[]) + + +INPUT PARAMETERS +---------------- +* ``cat_index``: Index of the category to be queried. +* ``len``: The length of the indices array. + +OUTPUT PARAMETERS +----------------- +* ``indices``: An integer array of size len, indicating control variable indices. + +DESCRIPTION +----------- + +:ref:`MPI_T_category_get_cvars` can be used to query which control variables +are contained in a particular category. + + +ERRORS +------ + +:ref:`MPI_T_category_get_cvars` will fail if: + +* ``MPI_T_ERR_NOT_INITIALIZED``: The MPI Tools interface not initialized + +* ``MPI_T_ERR_INVALID_INDEX``: The category index is invalid diff --git a/docs/man-openmpi/man3/MPI_T_category_get_info.3.rst b/docs/man-openmpi/man3/MPI_T_category_get_info.3.rst new file mode 100644 index 00000000000..389d765411c --- /dev/null +++ b/docs/man-openmpi/man3/MPI_T_category_get_info.3.rst @@ -0,0 +1,74 @@ +.. _mpi_t_category_get_info: + + +MPI_T_category_get_info +======================= + +.. include_body + +:ref:`MPI_T_category_get_info` - Query information from a category + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_T_category_get_info(int cat_index, char *name, int *name_len, + char *desc, int *desc_len, int *num_cvars, int *num_pvars, + int *num_categories) + + +INPUT PARAMETERS +---------------- +* ``cat_index``: Index of the category to be queried. + +INPUT/OUTPUT PARAMETERS +----------------------- +* ``name_len``: Length of the string and/or buffer for name. +* ``desc_len``: Length of the string and/or buffer for desc. + +OUTPUT PARAMETERS +----------------- +* ``name``: Buffer to return the string containing the name of the category. +* ``desc``: Buffer to return the string containing the description of the category. +* ``num_cvars``: Number of control variables in the category. +* ``num_pvars``: Number of performance variables in the category. +* ``num_categories``: Number of categories contained in the category. + +DESCRIPTION +----------- + +:ref:`MPI_T_category_get_info` can be used to query information from a +category. The function returns the number of control variables, +performance variables, and sub-categories in the queried category in the +arguments *num_cvars*, *num_pvars*, and *num_categories*, respectively. + + +NOTES +----- + +This MPI tool interface function returns two strings. This function +takes two argument for each string: a buffer to store the string, and a +length which must initially specify the size of the buffer. If the +length passed is n then this function will copy at most n - 1 characters +of the string into the corresponding buffer and set the length to the +number of characters copied - 1. If the length argument is NULL or the +value specified in the length is 0 the corresponding string buffer is +ignored and the string is not returned. + + +ERRORS +------ + +:ref:`MPI_T_category_get_info` will fail if: + +* ``MPI_T_ERR_NOT_INITIALIZED``: The MPI Tools interface not initialized + +* ``MPI_T_ERR_INVALID_INDEX``: The category index is invalid diff --git a/docs/man-openmpi/man3/MPI_T_category_get_num.3.rst b/docs/man-openmpi/man3/MPI_T_category_get_num.3.rst new file mode 100644 index 00000000000..2033a69d396 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_T_category_get_num.3.rst @@ -0,0 +1,42 @@ +.. _mpi_t_category_get_num: + + +MPI_T_category_get_num +====================== + +.. include_body + +:ref:`MPI_T_category_get_num` - Query the number of categories + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_T_category_get_num(int *num_cat) + + +OUTPUT PARAMETERS +----------------- +* ``num_cat``: Current number of categories + +DESCRIPTION +----------- + +:ref:`MPI_T_category_get_num` can be used to query the current number of +categories. + + +ERRORS +------ + +:ref:`MPI_T_category_get_num` will fail if: + +* ``MPI_T_ERR_NOT_INITIALIZED``: The MPI Tools interface not initialized diff --git a/docs/man-openmpi/man3/MPI_T_category_get_pvars.3.rst b/docs/man-openmpi/man3/MPI_T_category_get_pvars.3.rst new file mode 100644 index 00000000000..814783ae19f --- /dev/null +++ b/docs/man-openmpi/man3/MPI_T_category_get_pvars.3.rst @@ -0,0 +1,51 @@ +.. _mpi_t_category_get_pvars: + + +MPI_T_category_get_pvars +======================== + +.. include_body + +:ref:`MPI_T_category_get_pvars` - Query which performance variables are in +a category + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_T_category_get_pvars(int cat_index, int len, int indices[]) + + +INPUT PARAMETERS +---------------- +* ``cat_index``: Index of the category to be queried. +* ``len``: The length of the indices array. + +OUTPUT PARAMETERS +----------------- +* ``indices``: An integer array of size len, indicating performance variable indices. + +DESCRIPTION +----------- + +:ref:`MPI_T_category_get_pvars` can be used to query which performance +variables are contained in a particular category. A category contains +zero or more performance variables. + + +ERRORS +------ + +:ref:`MPI_T_category_get_pvars` will fail if: + +* ``MPI_T_ERR_NOT_INITIALIZED``: The MPI Tools interface not initialized + +* ``MPI_T_ERR_INVALID_INDEX``: The category index is invalid diff --git a/docs/man-openmpi/man3/MPI_T_cvar_get_info.3.rst b/docs/man-openmpi/man3/MPI_T_cvar_get_info.3.rst new file mode 100644 index 00000000000..1f34cc0e505 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_T_cvar_get_info.3.rst @@ -0,0 +1,153 @@ +MPI_T see MPI-3 section 14.3.5. + +.. _mpi_t_cvar_get_info: + + +MPI_T_cvar_get_info +=================== + +.. include_body + +:ref:`MPI_T_cvar_get_info` - Query information from a control variable + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_T_cvar_get_info(int cvar_index, char *name, int *name_len, + int *verbosity, MPI_Datatype *datatype, MPI_T_enum *enumtype, + const *desc, int *desc_len, int *bind, int *scope) + + +INPUT PARAMETERS +---------------- +* ``cvar_index``: Index of the control variable to be queried. + +INPUT/OUTPUT PARAMETERS +----------------------- +* ``name_len``: Length of the string and/or buffer for name. +* ``desc_len``: Length of the string and/or buffer for desc. + +OUTPUT PARAMETERS +----------------- +* ``name``: Buffer to return the string containing the name of the control variable. +* ``verbosity``: Verbosity level of this variable. +* ``datatype``: MPI datatype of the information stored in the control variable. +* ``enumtype``: Optional descriptor for enumeration information. +* ``desc``: Buffer to return the string containing the description of the control variable. +* ``bind``: Type of MPI object to which this variable must be bound. +* ``scope``: Scope of when changes to this variable are possible. + +DESCRIPTION +----------- + +:ref:`MPI_T_cvar_get_info` can be used to query information about a control +variable. The function returns the verbosity, datatype, enumeration +type, binding, and scope of the queried control variable in the +arguments *verbosity*, *datatype*, *enumtype*, *bind*, and *scope*, +respectively. Control variables in Open MPI are the same as MCA +parameters. + + +VERBOSITY +--------- + +As Open MPI exposes a very large number of MCA parameters (control +variables), control variables are categorized into nine verbosity +levels corresponding to the equivalent :ref:`ompi_info +` level. The nine levels are (in increasing order): + +#. ``MPI_T_VERBOSITY_USER_BASIC``: Basic information of interest to users + +#. ``MPI_T_VERBOSITY_USER_DETAIL``: Detailed information of interest to users + +#. ``MPI_T_VERBOSITY_USER_ALL``: All remaining information of interest to users + +#. ``MPI_T_VERBOSITY_TUNER_BASIC``: Basic information required for tuning + +#. ``MPI_T_VERBOSITY_TUNER_DETAIL``: Detailed information required for tuning + +#. ``MPI_T_VERBOSITY_TUNER_ALL``: All remaining information required for tuning + +#. ``MPI_T_VERBOSITY_MPIDEV_BASIC``: Basic information for MPI implementors + +#. ``MPI_T_VERBOSITY_MPIDEV_DETAIL``: Detailed information for MPI implementors + +#. ``MPI_T_VERBOSITY_MPIDEV_ALL``: All remaining information for MPI implementors + +For more information see MPI-3 section 14.3.1. + + +DATATYPE +-------- + +The datatype returned by :ref:`MPI_T_cvar_get_info` is restricted to +one of the following datatypes: ``MPI_INT``, ``MPI_UNSIGNED``, +``MPI_UNSIGNED_LONG``, ``MPI_UNSIGNED_LONG_LONG``, ``MPI_COUNT``, +``MPI_CHAR``, and ``MPI_DOUBLE``. For more information on datatypes in +MPI_T see MPI-3 section 14.3.5. + + +SCOPE +----- + +The scope describes when and how changes can be made to a control +variable. From MPI-3 section 14.3.6, the scope may be any of the following: + +* ``MPI_T_SCOPE_CONSTANT``: read-only, value is constant + +* ``MPI_T_SCOPE_READONLY``: read-only, cannot be written, but can change + +* ``MPI_T_SCOPE_LOCAL``: may be writeable, writing is a local operation + +* ``MPI_T_SCOPE_GROUP``: may be writeable, must be done to a group of + processes, all processes in a group must be set to consistent + values + +* ``MPI_T_SCOPE_GROUP_EQ``: may be writeable, must be done to a group + of processes, all processes in a group must be set to the same value + +* ``MPI_T_SCOPE_ALL``: may be writeable, must be done to all + processes, all connected processes must be set to consistent values + +* ``MPI_T_SCOPE_ALL_EQ``: may be writeable, must be done to all + processes, all connected processes must be set to the same value + +For more information see MPI-3 section 14.3.6 Table 14.4. + + +NOTES +----- + +This MPI tool interface function returns two strings. This function +takes two argument for each string: a buffer to store the string, and a +length which must initially specify the size of the buffer. If the +length passed is n then this function will copy at most n - 1 characters +of the string into the corresponding buffer and set the length to the +number of characters copied - 1. If the length argument is NULL or the +value specified in the length is 0 the corresponding string buffer is +ignored and the string is not returned. + +Open MPI does not currently support binding control variables to MPI +objects. + + +ERRORS +------ + +:ref:`MPI_T_cvar_get_info` will fail if: + +* ``MPI_T_ERR_NOT_INITIALIZED``: The MPI Tools interface not initialized + +* ``MPI_T_ERR_INVALID_INDEX``: The control variable index is invalid + + +.. seealso:: :ref:`ompi_info ` diff --git a/docs/man-openmpi/man3/MPI_T_cvar_get_num.3.rst b/docs/man-openmpi/man3/MPI_T_cvar_get_num.3.rst new file mode 100644 index 00000000000..978bf19a2d5 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_T_cvar_get_num.3.rst @@ -0,0 +1,43 @@ +.. _mpi_t_cvar_get_num: + + +MPI_T_cvar_get_num +================== + +.. include_body + +:ref:`MPI_T_cvar_get_num` - Query the number of control variables + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_T_cvar_get_num(int *num_cvar) + + +OUTPUT PARAMETERS +----------------- +* ``num_cvar``: Current number of control variables. + +DESCRIPTION +----------- + +:ref:`MPI_T_cvar_get_num` can be used to query the current number of control +variables. The number of control variables may increase throughout the +execution of the process but will never decrease. + + +ERRORS +------ + +:ref:`MPI_T_cvar_get_num` will fail if: + +* ``MPI_T_ERR_NOT_INITIALIZED``: The MPI Tools interface not initialized diff --git a/docs/man-openmpi/man3/MPI_T_cvar_handle_alloc.3.rst b/docs/man-openmpi/man3/MPI_T_cvar_handle_alloc.3.rst new file mode 100644 index 00000000000..3cec41630b1 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_T_cvar_handle_alloc.3.rst @@ -0,0 +1,72 @@ +.. _mpi_t_cvar_handle_alloc: + + +MPI_T_cvar_handle_alloc +======================= + +.. include_body + +:ref:`MPI_T_cvar_handle_alloc`, :ref:`MPI_T_cvar_handle_free` - Allocate/free +contol variable handles + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_T_cvar_handle_alloc(int cvar_index, void *obj_handle, + MPI_T_cvar_handle *handle, int *count) + + int MPI_T_cvar_handle_free(MPI_T_cvar_handle *handle) + + +DESCRIPTION +----------- + +:ref:`MPI_T_cvar_handle_alloc` binds the control variable specified in +*cvar_index* to the MPI object specified in *obj_handle*. If +:ref:`MPI_T_cvar_get_info` returns MPI_T_BIND_NO_OBJECT as the binding of the +variable the *obj_handle* argument is ignored. The number of values +represented by this control variable is returned in the *count* +parameter. If the control variable represents a string then *count* will +be the maximum length of the string. + +:ref:`MPI_T_cvar_handle_free` frees a handle allocated by +:ref:`MPI_T_cvar_handle_alloc` and sets the *handle* argument to +MPI_T_CVAR_HANDLE_NULL. + + +NOTES +----- + +Open MPI does not currently support binding MPI objects to control +variables so the *obj_handle* argument is always ignored. + + +ERRORS +------ + +:ref:`MPI_T_cvar_handle_alloc` will fail if: + +* ``MPI_T_ERR_NOT_INITIALIZED``: The MPI Tools interface not initialized + +* ``MPI_T_ERR_INVALID_INDEX``: The control variable index is invalid + +* ``MPI_T_ERR_OUT_OF_HANDLES``: No more handles available + +:ref:`MPI_T_cvar_handle_free` will fail if: + +* ``MPI_T_ERR_NOT_INITIALIZED``: The MPI Tools interface not initialized + +* ``MPI_T_ERR_INVALID_HANDLE``: The handle is invalid + + +.. seealso:: + :ref:`MPI_T_cvar_get_info` diff --git a/docs/man-openmpi/man3/MPI_T_cvar_handle_free.3.rst b/docs/man-openmpi/man3/MPI_T_cvar_handle_free.3.rst new file mode 100644 index 00000000000..3b6647015a1 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_T_cvar_handle_free.3.rst @@ -0,0 +1,9 @@ +.. _mpi_t_cvar_handle_free: + +MPI_T_cvar_handle_free +====================== + .. include_body + +.. include:: ../man3/MPI_T_cvar_handle_alloc.3.rst + :start-after: .. include_body + diff --git a/docs/man-openmpi/man3/MPI_T_cvar_read.3.rst b/docs/man-openmpi/man3/MPI_T_cvar_read.3.rst new file mode 100644 index 00000000000..68801b6ff07 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_T_cvar_read.3.rst @@ -0,0 +1,52 @@ +.. _mpi_t_cvar_read: + + +MPI_T_cvar_read +=============== + +.. include_body + +:ref:`MPI_T_cvar_read` - Read the value of a control variable + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_T_cvar_read(MPI_T_cvar_handle handle, const void *buf) + + +INPUT PARAMETERS +---------------- +* ``handle``: Handle of the control variable to be read. +* ``buf``: Initial address of storage location for variable value. + +DESCRIPTION +----------- + +:ref:`MPI_T_cvar_read` reads the value of the control variable identified by +the handle specified in *handle* and stores the value in the buffer +pointed to by *buf*. The caller must ensure that the buffer pointed to +by *buf* is large enough to hold the entire value of the control +variable. + + +ERRORS +------ + +:ref:`MPI_T_cvar_read` will fail if: + +* ``MPI_T_ERR_NOT_INITIALIZED``: The MPI Tools interface not initialized + +* ``MPI_T_ERR_INVALID_HANDLE``: The handle is invalid + + +.. seealso:: + :ref:`MPI_T_cvar_handle_alloc` :ref:`MPI_T_cvar_get_info` diff --git a/docs/man-openmpi/man3/MPI_T_cvar_write.3.rst b/docs/man-openmpi/man3/MPI_T_cvar_write.3.rst new file mode 100644 index 00000000000..8a0a64a3644 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_T_cvar_write.3.rst @@ -0,0 +1,58 @@ +.. _mpi_t_cvar_write: + + +MPI_T_cvar_write +================ + +.. include_body + +:ref:`MPI_T_cvar_write` - Write the value of a bound control variable + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_T_cvar_write(MPI_T_cvar_handle handle, const void *buf) + + +INPUT PARAMETERS +---------------- +* ``handle``: Handle of the control variable to be written. +* ``buf``: Initial address of storage location for variable value. + +DESCRIPTION +----------- + +:ref:`MPI_T_cvar_write` sets the value the control variable identified +by the handle specified in *handle* from the buffer provided in +*buf*. The caller must ensure that the buffer specified in *buf* is +large enough to hold the entire value of the control variable. If the +variable has global scope, any write call must be issued on all +connected MPI processes. For more information see MPI-3 section +14.3.6. + + +ERRORS +------ + +:ref:`MPI_T_cvar_write` will fail if: + +* ``MPI_T_ERR_NOT_INITIALIZED``: The MPI Tools interface not initialized + +* ``MPI_T_ERR_INVALID_HANDLE``: The handle is invalid + +* ``MPI_T_ERR_CVAR_SET_NOT_NOW``: Variable cannot be set at this moment + +* ``MPI_T_ERR_CVAR_SET_NEVER``: Variable cannot be set until end of execution + + +.. seealso:: + :ref:`MPI_T_cvar_handle_alloc` :ref:`MPI_T_cvar_get_info` diff --git a/docs/man-openmpi/man3/MPI_T_enum_get_info.3.rst b/docs/man-openmpi/man3/MPI_T_enum_get_info.3.rst new file mode 100644 index 00000000000..094b246a0df --- /dev/null +++ b/docs/man-openmpi/man3/MPI_T_enum_get_info.3.rst @@ -0,0 +1,68 @@ +.. _mpi_t_enum_get_info: + + +MPI_T_enum_get_info +=================== + +.. include_body + +:ref:`MPI_T_enum_get_info` - Query information about an enumerator + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_T_enum_get_info(MPI_T_enum enumtype, int *num, char *name, int *name_len) + + +INPUT PARAMETERS +---------------- +* ``enumtype``: Enumerator to be queried. + +INPUT/OUTPUT PARAMETERS +----------------------- +* ``name_len``: Length of the string and/or buffer for name. + +OUTPUT PARAMETERS +----------------- +* ``num``: number of discrete values represented by this enumeration. +* ``name``: Buffer to return the string containing the name of the category. + +DESCRIPTION +----------- + +:ref:`MPI_T_enum_get_info` can be used to query information about an +enumerator. The function returns the number of discrete values +represented by this enumerator in the *num* parameter. + + +NOTES +----- + +This MPI tool interface function returns the name of the enumeration as +a string. This function takes two argument for the string: *name* which +specifies a buffer where the name of the should be stored, and +*name_len* which must initially specify the size of the buffer pointed +to by *name*. This function will copy at most *name_len* - 1 characters +of the name and sets *name_len* to the number of characters returned + +1. If *name_len* is NULL or the value specified in *name_len* is 0 the +*name* buffer is ignored and the name of the enumeration is not +returned. + + +ERRORS +------ + +:ref:`MPI_T_enum_get_info` will fail if: + +* ``MPI_T_ERR_NOT_INITIALIZED``: The MPI Tools interface not initialized + +* ``MPI_T_ERR_INVALID_INDEX``: The enumeration is invalid or has been deleted diff --git a/docs/man-openmpi/man3/MPI_T_enum_get_item.3.rst b/docs/man-openmpi/man3/MPI_T_enum_get_item.3.rst new file mode 100644 index 00000000000..e9354a35610 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_T_enum_get_item.3.rst @@ -0,0 +1,70 @@ +.. _mpi_t_enum_get_item: + + +MPI_T_enum_get_item +=================== + +.. include_body + +:ref:`MPI_T_enum_get_item` - Query information about an enumerator + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_T_enum_get_item(MPI_T_enum enumtype, int index, int *value, char *name, + int *name_len) + + +INPUT PARAMETERS +---------------- +* ``enumtype``: Enumeration to be queried. +* ``index``: Number of the value to be queried in this enumeration. + +INPUT/OUTPUT PARAMETERS +----------------------- +* ``name_len``: Length of the string and/or buffer for name. + +OUTPUT PARAMETERS +----------------- +* ``value``: Variable value. +* ``name``: Buffer to return the string containing the name of the category. + +DESCRIPTION +----------- + +:ref:`MPI_T_enum_get_item` can be used to query information about an item in an +enumerator. This function returns the enumeration value in the *value* +parameter. + + +NOTES +----- + +This MPI tool interface function returns the name of the item as a +string. This function takes two arguments for the string: a buffer to +store the string, and a length which must initially specify the size +of the buffer. If the length passed is n then this function will copy +at most n - 1 characters of the string into the buffer and sets the +length to the number of characters copied - 1. If the length argument +is NULL or the value specified in the length is 0 the string buffer is +ignored and the string is not returned. For more information see MPI-3 +section 14.3.3. + + +ERRORS +------ + +:ref:`MPI_T_enum_get_item` will fail if: + +* ``MPI_T_ERR_NOT_INITIALIZED``: The MPI Tools interface not initialized + +* ``MPI_T_ERR_INVALID_INDEX``: The enumeration is invalid or has been deleted diff --git a/docs/man-openmpi/man3/MPI_T_finalize.3.rst b/docs/man-openmpi/man3/MPI_T_finalize.3.rst new file mode 100644 index 00000000000..f26c0812490 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_T_finalize.3.rst @@ -0,0 +1,56 @@ +.. _mpi_t_finalize: + + +MPI_T_finalize +============== + +.. include_body + +:ref:`MPI_T_finalize` - Finalize the MPI tool information interface + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_T_finalize(void) + + +DESCRIPTION +----------- + +MPI_T_finalize() finalizes the MPI tool information interface and must +be called the same number of times as MPI_T_init_thread() by the end of +execution. Calls to MPI tool functions are allowed at any point in +execution as long as MPI_T_init_thread() has been called at least once +and the number of calls to MPI_T_init_thread() is greater than the +number of calls to MPI_T_finalize(). If at any point in execution the +number of calls to MPI_T_finalize() equals the number of calls to +MPI_T_init_thread() the MPI tool interface will no longer be available +until another call to MPI_T_init_thread(). + + +NOTES +----- + +Before the end of execution the number of calls to MPI_T_init_thread() +and :ref:`MPI_T_finalize` must be the same. + + +ERRORS +------ + +:ref:`MPI_T_finalize` will fail if: + +* ``MPI_T_ERR_NOT_INITIALIZED``: The MPI Tools interface not initialized + + +.. seealso:: + :ref:`MPI_T_init_thread` diff --git a/docs/man-openmpi/man3/MPI_T_init_thread.3.rst b/docs/man-openmpi/man3/MPI_T_init_thread.3.rst new file mode 100644 index 00000000000..c353e6f5931 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_T_init_thread.3.rst @@ -0,0 +1,77 @@ +.. _mpi_t_init_thread: + +MPI_T_init_thread +================= + +.. include_body + +:ref:`MPI_T_init_thread` - Initializes the MPI Tool information interface + +SYNTAX +------ + +C Syntax +^^^^^^^^ + +.. code:: c + + #include + + int MPI_T_init_thread(int required, int *provided) + +INPUT PARAMETERS +---------------- + +- required: Desired level of thread support (integer). + +OUTPUT PARAMETERS +----------------- + +- provided: Available level of thread support (integer). + +DESCRIPTION +----------- + +MPI_T_init_thread() initializes the MPI tool information interface. +Calls to MPI tool functions are allowed at any point in execution +(including before MPI_Init() and after MPI_Finalize()) as long +as MPI_T_init_thread() has been called at least once and the number +of calls to MPI_T_init_thread() is greater than the number of calls +to MPI_T_finalize(). If at any point in execution the number of +calls to MPI_T_finalize() equals the number of calls to +MPI_T_init_thread() the MPI tool interface will no longer be +available until another call to MPI_T_init_thread(). + +MPI_T_init_thread(), like MPI_Init_thread(), has a provision to +request a certain level of thread support in ``required``: + +* ``MPI_THREAD_SINGLE``: Only one thread will execute. +* ``MPI_THREAD_FUNNELED``: If the process is multithreaded, only the + thread that called :ref:`MPI_Init_thread` will make MPI calls. +* ``MPI_THREAD_SERIALIZED``: If the process is multithreaded, only one + thread will make MPI library calls at one time. +* ``MPI_THREAD_MULTIPLE``: If the process is multithreaded, multiple + threads may call MPI at once with no restrictions. + +The level of thread support available to the program is set in +``provided``. In Open MPI, the value is dependent on how the library was +configured and built. Note that there is no guarantee that ``provided`` +will be greater than or equal to ``required``. + +NOTES +----- + +It is the caller's responsibility to check the value of ``provided``, as +it may be less than what was requested in ``required``. + +ERRORS +------ + +:ref:`MPI_T_init_thread` will fail if: + +* ``MPI_T_ERR_MEMORY``: Out of memory +* ``MPI_T_ERR_CANNOT_INIT``: Interface not in the state to be + initialized + + +.. seealso:: :ref:`MPI_T` :ref:`MPI_Init` :ref:`MPI_Init_thread` diff --git a/docs/man-openmpi/man3/MPI_T_pvar_get_info.3.rst b/docs/man-openmpi/man3/MPI_T_pvar_get_info.3.rst new file mode 100644 index 00000000000..d05bee4b226 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_T_pvar_get_info.3.rst @@ -0,0 +1,197 @@ +.. _mpi_t_pvar_get_info: + + +MPI_T_pvar_get_info +=================== + +.. include_body + +:ref:`MPI_T_pvar_get_info` - Query information from a performance variable + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_T_pvar_get_info(int pvar_index, char *name, int *name_len, + int *verbosity, int *var_class, MPI_Datatype *datatype, MPI_T_enum *enumtype, + char *desc, int *desc_len, int *bind, int *readonly, int *continuous, + int *atomic) + + +INPUT PARAMETERS +---------------- +* ``pvar_index``: Index of the performance variable to be queried. + +INPUT/OUTPUT PARAMETERS +----------------------- +* ``name_len``: Length of the string and/or buffer for name. +* ``desc_len``: Length of the string and/or buffer for desc. + +OUTPUT PARAMETERS +----------------- +* ``name``: Buffer to return the string containing the name of the performance variable. +* ``verbosity``: Verbosity level of this variable. +* ``var_class``: Class of performance variable. +* ``datatype``: MPI datatype of the information stored in the performance variable. +* ``enumtype``: Optional descriptor for enumeration information. +* ``desc``: Buffer to return the string containing the description of the performance variable. +* ``bind``: Type of MPI object to which this variable must be bound. +* ``readonly``: Flag indicating whether the variable can be written/reset. +* ``continuous``: Flag indicating whether the variable can be started and stopped or is continuously active. +* ``atomic``: Flag indicating whether the variable can be atomically read and reset. + +DESCRIPTION +----------- + +:ref:`MPI_T_pvar_get_info` can be used to query information from a performance +variable. The function returns the verbosity, class, datatype, +enumeration type, and binding of the queried control variable in the +arguments *verbosity*, *var_class*, *datatype*, *enumtype*, and *bind* +respectively. Flags indicating whether the variable is read-only, +continuous, or atomic are returns in *readonly*, *continuous*, and +*atomic* accordingly. See MPI-3 section 14.3.7 for more information. See the +man page for :ref:`MPI_T_cvar_get_info` for information on variable verbosity. + + +VARIABLE CLASS +-------------- + +Performance variables are categorized into classes which describe their +initial value, valid types, and behavior. The class returned in the +*var_class* parameter may be one of the following: + +* ``MPI_T_PVAR_CLASS_STATE``: Variable represents a set of discrete + states that may be described by an enumerator. Variables of this + class must be represented by an MPI_INT. The starting value is the + current state of the variable. + +* ``MPI_T_PVAR_CLASS_LEVEL``: Variable represents the current + utilization level of a resource. Variables of this class must be + represented by an MPI_UNSIGNED, MPI_UNSIGNED_LONG, + MPI_UNSIGNED_LONG_LONG, or MPI_DOUBLE. The starting value is the + current utilization level of the resource. + +* ``MPI_T_PVAR_CLASS_SIZE``: Variable represents the fixed size of a + resource. Variables of this class are represented by an + MPI_UNSIGNED, MPI_UNSIGNED_LONG, MPI_UNSIGNED_LONG_LONG, or + MPI_DOUBLE. The starting value is the current size of the resource. + +* ``MPI_T_PVAR_CLASS_PERCENTAGE``: Variable represents the current + precentage utilization level of a resource. Variables of this class + are represented by an MPI_DOUBLE. The starting value is the current + percentage utilization of the resource. + +* ``MPI_T_PVAR_CLASS_HIGHWATERMARK``: Variable represents the high + watermark of the utilization of a resource. Variables of this class + are represented by an MPI_UNSIGNED, MPI_UNSIGNED_LONG, + MPI_UNSIGNED_LONG_LONG, or MPI_DOUBLE. The starting value is the + current utilization of the resource. + +* ``MPI_T_PVAR_CLASS_HIGHWATERMARK``: Variable represents the low + watermark of the utilization of a resource. Variables of this class + are represented by an MPI_UNSIGNED, MPI_UNSIGNED_LONG, + MPI_UNSIGNED_LONG_LONG, or MPI_DOUBLE. The starting value is the + current utilization of the resource. + +* ``MPI_T_PVAR_CLASS_COUNTER``: Variable represents a count of the + number of occurrences of a specific event. Variables of this class + are represented by an MPI_UNSIGNED, MPI_UNSIGNED_LONG, or + MPI_UNSIGNED_LONG_LONG. The starting value is 0. + +* ``MPI_T_PVAR_CLASS_COUNTER``: Variable represents an aggregated + value that represents a sum of arguments processed during a specific + event. Variables of this class are represented by an MPI_UNSIGNED, + MPI_UNSIGNED_LONG, MPI_UNSIGNED_LONG_LONG, or MPI_DOUBLE. The + starting value is 0. + +* ``MPI_T_PVAR_CLASS_TIMER``: Variable represents the aggregated time + spent by the MPI implementation while processing an event, type of + event, or section of code. Variables of this class are represented + by an MPI_UNSIGNED, MPI_UNSIGNED_LONG, MPI_UNSIGNED_LONG_LONG, or + MPI_DOUBLE. If the variable is represented by an MPI_DOUBLE the + units will be the same as those used by MPI_Wtime(). The starting + value is 0. + +* ``MPI_T_PVAR_CLASS_GENERIC``: Variable does not fit into any other + class. Can by represented by an type supported by the MPI tool + information interface (see DATATYPE). Starting value is variable + specific. + +For more information see MPI-3 section 14.3.7. + + +DATATYPE +-------- + +The datatype returned by :ref:`MPI_T_pvar_get_info` is restricted to one of the +following datatypes: MPI_INT, MPI_UNSIGNED, MPI_UNSIGNED_LONG, +MPI_UNSIGNED_LONG_LONG, MPI_COUNT, MPI_CHAR, and MPI_DOUBLE. For more +information on datatypes in the MPI Tool information interface see MPI-3 +section 14.3.5. + + +BINDING +------- + +Performance variables may be bound to an MPI object. The binding +returned in the *bind* parameter may be one of the following: + +* ``MPI_T_BIND_NO_OBJECT``: No object + +* ``MPI_T_BIND_MPI_COMM``: MPI communicator + +* ``MPI_T_BIND_MPI_DATATYPE``: MPI datatype + +* ``MPI_T_BIND_MPI_ERRHANDLER``: MPI error handler + +* ``MPI_T_BIND_MPI_FILE``: MPI file handle + +* ``MPI_T_BIND_MPI_GROUP``: MPI group + +* ``MPI_T_BIND_MPI_OP``: MPI reduction operator + +* ``MPI_T_BIND_MPI_REQUEST``: MPI request + +* ``MPI_T_BIND_MPI_WIN``: MPI window for one-sided communication + +* ``MPI_T_BIND_MPI_MESSAGE``: MPI message object + +* ``MPI_T_BIND_MPI_INFO``: MPI info object + +For more information see MPI-3 section 14.3.2. + + +NOTES +----- + +This MPI tool interface function returns two strings. This function +takes two argument for each string: a buffer to store the string, and +a length which must initially specify the size of the buffer. If the +length passed is n then this function will copy at most n - 1 +characters of the string into the corresponding buffer and set the +length to the number of characters copied - 1. If the length argument +is NULL or the value specified in the length is 0 the corresponding +string buffer is ignored and the string is not returned. For more +information see MPI-3 section 14.3.3. + + +ERRORS +------ + +:ref:`MPI_T_pvar_get_info` will fail if: + +* ``MPI_T_ERR_NOT_INITIALIZED``: The MPI Tools interface not initialized + +* ``MPI_T_ERR_INVALID_INDEX``: The performance variable index is invalid + + +.. seealso:: + :ref:`MPI_T_cvar_get_info` diff --git a/docs/man-openmpi/man3/MPI_T_pvar_get_num.3.rst b/docs/man-openmpi/man3/MPI_T_pvar_get_num.3.rst new file mode 100644 index 00000000000..a2211b36bdc --- /dev/null +++ b/docs/man-openmpi/man3/MPI_T_pvar_get_num.3.rst @@ -0,0 +1,43 @@ +.. _mpi_t_pvar_get_num: + + +MPI_T_pvar_get_num +================== + +.. include_body + +:ref:`MPI_T_pvar_get_num` - Query the number of performance variables + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_T_pvar_get_num(int *num_pvar) + + +OUTPUT PARAMETERS +----------------- +* ``num_pvar``: Current number of performance variables. + +DESCRIPTION +----------- + +:ref:`MPI_T_pvar_get_num` can be used to query the current number of +performance variables. The number of performance variables may increase +throughout the exection of the process but will never decrease. + + +ERRORS +------ + +:ref:`MPI_T_pvar_get_num` will fail if: + +* ``MPI_T_ERR_NOT_INITIALIZED``: The MPI Tools interface not initialized diff --git a/docs/man-openmpi/man3/MPI_T_pvar_handle_alloc.3.rst b/docs/man-openmpi/man3/MPI_T_pvar_handle_alloc.3.rst new file mode 100644 index 00000000000..a5fe1710935 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_T_pvar_handle_alloc.3.rst @@ -0,0 +1,75 @@ +.. _mpi_t_pvar_handle_alloc: + + +MPI_T_pvar_handle_alloc +======================= + +.. include_body + +:ref:`MPI_T_pvar_handle_alloc`, :ref:`MPI_T_pvar_handle_free` - Allocate/free +MPI performance variable handles + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_T_pvar_handle_alloc(int session, int pvar_index, void *obj_handle, + MPI_T_pvar_handle *handle, int *count) + + int MPI_T_pvar_handle_free(int session, MPI_T_pvar_handle *handle) + + +DESCRIPTION +----------- + +:ref:`MPI_T_pvar_handle_alloc` binds the performance variable specified in +*pvar_index* to the MPI object specified in *obj_handle* in the session +identified by the parameter *session*. The object is passed in the +argument *obj_handle* as an address to a local variable that stores the +object’s handle. If :ref:`MPI_T_pvar_get_info` returns MPI_T_BIND_NO_OBJECT as +the binding for the variable the *obj_handle* argument is ignored. The +handle allocated to reference the variable is returned in the argument +*handle*. Upon successful return, *count* contains the number of +elements (of the datatype returned by a previous :ref:`MPI_T_PVAR_GET_INFO` +call) used to represent this variable. + +The value of *pvar_index* should be in the range 0 to *num_pvar - 1*, +where *num_pvar* is the number of available performance variables as +determined from a prior call to :ref:`MPI_T_PVAR_GET_NUM`. The type of the +MPI object it references must be consistent with the type returned in +the bind argument in a prior call to :ref:`MPI_T_PVAR_GET_INFO`. + +:ref:`MPI_T_pvar_handle_free` frees a handle allocated by +:ref:`MPI_T_pvar_handle_alloc` and sets the *handle* argument to +MPI_T_PVAR_HANDLE_NULL. + + +ERRORS +------ + +:ref:`MPI_T_pvar_handle_alloc` will fail if: + +* ``MPI_T_ERR_NOT_INITIALIZED``: The MPI Tools interface not initialized + +* ``MPI_T_ERR_INVALID_INDEX``: The performance variable index is invalid + +* ``MPI_T_ERR_OUT_OF_HANDLES``: No more handles available + +:ref:`MPI_T_pvar_handle_free` will fail if: + +* ``MPI_T_ERR_NOT_INITIALIZED``: The MPI Tools interface not initialized + +* ``MPI_T_ERR_INVALID_HANDLE``: The handle is invalid or the handle + argument passed in is not associated with the session argument + + +.. seealso:: + :ref:`MPI_T_pvar_get_info` :ref:`MPI_T_pvar_get_num` diff --git a/docs/man-openmpi/man3/MPI_T_pvar_handle_free.3.rst b/docs/man-openmpi/man3/MPI_T_pvar_handle_free.3.rst new file mode 100644 index 00000000000..3d0f94b2e96 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_T_pvar_handle_free.3.rst @@ -0,0 +1,9 @@ +.. _mpi_t_pvar_handle_free: + +MPI_T_pvar_handle_free +====================== + .. include_body + +.. include:: ../man3/MPI_T_pvar_handle_alloc.3.rst + :start-after: .. include_body + diff --git a/docs/man-openmpi/man3/MPI_T_pvar_read.3.rst b/docs/man-openmpi/man3/MPI_T_pvar_read.3.rst new file mode 100644 index 00000000000..f44c4ec9045 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_T_pvar_read.3.rst @@ -0,0 +1,55 @@ +.. _mpi_t_pvar_read: + + +MPI_T_pvar_read +=============== + +.. include_body + +:ref:`MPI_T_pvar_read` - Read the value of a performance variable + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_T_pvar_read(MPI_T_pvar_session session, MPI_T_pvar_handle handle, const void *buf) + + +INPUT PARAMETERS +---------------- +* ``session``: Performance experiment session. +* ``handle``: Performance variable handle. +* ``buf``: Initial address of storage location for variable value. + +DESCRIPTION +----------- + +:ref:`MPI_T_pvar_read` queries the value of a performance variable identified +by the handle specified in *handle* in the session specified in +*session*. The result is stored in the buffer pointed to by *buf*. The +caller must ensure that the buffer pointed to by *buf* is large enough +to hold the entire value of the performance variable. + + +ERRORS +------ + +:ref:`MPI_T_pvar_read` will fail if: + +* ``MPI_T_ERR_NOT_INITIALIZED``: The MPI Tools interface not initialized + +* ``MPI_T_ERR_INVALID_HANDLE``: The handle is invalid or not associated with the session + +* ``MPI_T_ERR_INVALID_SESSION``: Session argument is not a valid session + + +.. seealso:: + :ref:`MPI_T_pvar_handle_alloc` :ref:`MPI_T_pvar_get_info` :ref:`MPI_T_pvar_session_create` diff --git a/docs/man-openmpi/man3/MPI_T_pvar_readreset.3.rst b/docs/man-openmpi/man3/MPI_T_pvar_readreset.3.rst new file mode 100644 index 00000000000..9963d40038c --- /dev/null +++ b/docs/man-openmpi/man3/MPI_T_pvar_readreset.3.rst @@ -0,0 +1,63 @@ +.. _mpi_t_pvar_readreset: + + +MPI_T_pvar_readreset +==================== + +.. include_body + +:ref:`MPI_T_pvar_readreset` - Atomically read and reset the value of a +performance variable + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_T_pvar_readreset(MPI_T_pvar_session session, MPI_T_pvar_handle handle, const void *buf) + + +INPUT PARAMETERS +---------------- +* ``session``: Performance experiment session. +* ``handle``: Performance variable handle. +* ``buf``: Initial address of storage location for variable value. + +DESCRIPTION +----------- + +:ref:`MPI_T_pvar_readreset` atomically queries and resets the value of a +performance variable bound to the handle specified by *handle* in the +session specified by *session*. The result is stored in the buffer +pointed to by *buf*. This function can only be used with performance +variables that are atomic and not readonly. The caller must ensure that +the buffer pointed to by *buf* is large enough to hold the entire value +of the performance variable. + + +ERRORS +------ + +:ref:`MPI_T_pvar_readreset` will fail if: + +* ``MPI_T_ERR_NOT_INITIALIZED``: The MPI Tools interface not initialized + +* ``MPI_T_ERR_INVALID_HANDLE``: The handle is invalid or not associated with the session + +* ``MPI_T_ERR_INVALID_SESSION``: Session argument is not a valid session + +* ``MPI_T_ERR_PVAR_NO_ATOMIC``: Variable cannot be read and written atomically + +* ``MPI_T_ERR_PVAR_NO_WRITE``: Variable cannot be reset + + +.. seealso:: + :ref:`MPI_T_pvar_handle_alloc` :ref:`MPI_T_pvar_get_info` :ref:`MPI_T_pvar_session_create` + :ref:`MPI_T_pvar_read` :ref:`MPI_T_pvar_reset` diff --git a/docs/man-openmpi/man3/MPI_T_pvar_reset.3.rst b/docs/man-openmpi/man3/MPI_T_pvar_reset.3.rst new file mode 100644 index 00000000000..0a4bc106501 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_T_pvar_reset.3.rst @@ -0,0 +1,56 @@ +.. _mpi_t_pvar_reset: + + +MPI_T_pvar_reset +================ + +.. include_body + +:ref:`MPI_T_pvar_reset` - Reset the value of a performance variable + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_T_pvar_reset(MPI_T_pvar_session session, MPI_T_pvar_handle handle) + + +INPUT PARAMETERS +---------------- +* ``session``: Performance experiment session. +* ``handle``: Performance variable handle or MPI_T_PVAR_ALL_HANDLES. + +DESCRIPTION +----------- + +:ref:`MPI_T_pvar_reset` sets the performance variable specified by the handle +in *handle* to its initial value. The special value +MPI_T_PVAR_ALL_HANDLES can be passed in *handle* to reset all read-write +handles in the session specified in *session*. + + +ERRORS +------ + +:ref:`MPI_T_pvar_reset` will fail if: + +* ``MPI_T_ERR_NOT_INITIALIZED``: The MPI Tools interface not initialized + +* ``MPI_T_ERR_INVALID_HANDLE``: The handle is invalid + +* ``MPI_T_ERR_INVALID_SESSION``: Session argument is not a valid session + +* ``MPI_T_ERR_PVAR_NO_WRITE``: Variable cannot be reset + + +.. seealso:: + :ref:`MPI_T_pvar_handle_alloc` :ref:`MPI_T_pvar_get_info` :ref:`MPI_T_pvar_session_create` + :ref:`MPI_T_pvar_write` diff --git a/docs/man-openmpi/man3/MPI_T_pvar_session_create.3.rst b/docs/man-openmpi/man3/MPI_T_pvar_session_create.3.rst new file mode 100644 index 00000000000..2c07cea4937 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_T_pvar_session_create.3.rst @@ -0,0 +1,55 @@ +.. _mpi_t_pvar_session_create: + + +MPI_T_pvar_session_create +========================= + +.. include_body + +:ref:`MPI_T_pvar_session_create`, :ref:`MPI_T_pvar_session_free` - Create/free +performance variable session + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_T_pvar_session_create(MPI_T_pvar_session *session) + + int MPI_T_pvar_session_free(MPI_T_pvar_session *session) + + +DESCRIPTION +----------- + +:ref:`MPI_T_pvar_session_create` creates a session for accessing performance +variables. The new session is returned in the *session* parameter. + +:ref:`MPI_T_pvar_session_free` releases a session allocated by +:ref:`MPI_T_pvar_session_create` and sets the *session* parameter to +MPI_T_PVAR_SESSION_NULL. + + +ERRORS +------ + +:ref:`MPI_T_pvar_session_create` will fail if: + +* ``MPI_T_ERR_NOT_INITIALIZED``: The MPI Tools interface not initialized + +* ``MPI_T_ERR_MEMORY``: Out of memory + +* ``MPI_T_ERR_OUT_OF_SESSIONS``: No more sessions available + +:ref:`MPI_T_pvar_session_free` will fail if: + +* ``MPI_T_ERR_NOT_INITIALIZED``: The MPI Tools interface not initialized + +* ``MPI_T_ERR_INVALID_SESSION``: The session parameter is not a valid session diff --git a/docs/man-openmpi/man3/MPI_T_pvar_session_free.3.rst b/docs/man-openmpi/man3/MPI_T_pvar_session_free.3.rst new file mode 100644 index 00000000000..45caea61ed7 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_T_pvar_session_free.3.rst @@ -0,0 +1,9 @@ +.. _mpi_t_pvar_session_free: + +MPI_T_pvar_session_free +======================= + .. include_body + +.. include:: ../man3/MPI_T_pvar_session_create.3.rst + :start-after: .. include_body + diff --git a/docs/man-openmpi/man3/MPI_T_pvar_start.3.rst b/docs/man-openmpi/man3/MPI_T_pvar_start.3.rst new file mode 100644 index 00000000000..ee8ab5aaaf1 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_T_pvar_start.3.rst @@ -0,0 +1,65 @@ +.. _mpi_t_pvar_start: + + +MPI_T_pvar_start +================ + +.. include_body + +:ref:`MPI_T_pvar_start`, :ref:`MPI_T_pvar_stop` - Start/stop a performance +variable + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_T_pvar_start(MPI_T_pvar_session session, MPI_T_pvar_handle handle) + + int MPI_T_pvar_stop(MPI_T_pvar_session session, MPI_T_pvar_handle handle) + + +INPUT PARAMETERS +---------------- +* ``session``: Performance experiment session. +* ``handle``: Performance variable handle. + +DESCRIPTION +----------- + +:ref:`MPI_T_pvar_start` starts the performance variable with the handle +specified in *handle*. The special value MPI_T_PVAR_ALL_HANDLES can be +passed in *handle* to start all non-continuous handles in the session +specified in *session*. + +:ref:`MPI_T_pvar_stop` stops the performance variable with the handle specified +in *handle*. The special value MPI_T_PVAR_ALL_HANDLES can be passed in +*handle* to stop all non-continuous handles in the session specified in +*session*. + +Continuous performance variables can neither be started nor stopped. + + +ERRORS +------ + +:ref:`MPI_T_pvar_start` and MPI_T_pvar_stop() will fail if: + +* ``MPI_T_ERR_NOT_INITIALIZED``: The MPI Tools interface not initialized + +* ``MPI_T_ERR_INVALID_SESSION``: Session parameter is not a valid session + +* ``MPI_T_ERR_INVALID_HANDLE``: Invalid handle or handle not associated with the session + +* ``MPI_T_ERR_PVAR_NO_STARTSTOP``: The variable cannot be started or stopped + + +.. seealso:: + :ref:`MPI_T_pvar_get_info` diff --git a/docs/man-openmpi/man3/MPI_T_pvar_stop.3.rst b/docs/man-openmpi/man3/MPI_T_pvar_stop.3.rst new file mode 100644 index 00000000000..1ef3f878c70 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_T_pvar_stop.3.rst @@ -0,0 +1,9 @@ +.. _mpi_t_pvar_stop: + +MPI_T_pvar_stop +=============== + .. include_body + +.. include:: ../man3/MPI_T_pvar_start.3.rst + :start-after: .. include_body + diff --git a/docs/man-openmpi/man3/MPI_T_pvar_write.3.rst b/docs/man-openmpi/man3/MPI_T_pvar_write.3.rst new file mode 100644 index 00000000000..acfcffe50fc --- /dev/null +++ b/docs/man-openmpi/man3/MPI_T_pvar_write.3.rst @@ -0,0 +1,57 @@ +.. _mpi_t_pvar_write: + + +MPI_T_pvar_write +================ + +.. include_body + +:ref:`MPI_T_pvar_write` - Write the value of a control variable + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_T_pvar_write(MPI_T_pvar_session session, MPI_T_pvar_handle handle, const void *buf) + + +INPUT PARAMETERS +---------------- +* ``session``: Performance experiment session. +* ``handle``: Performance variable handle. +* ``buf``: Initial address of storage location for variable value. + +DESCRIPTION +----------- + +:ref:`MPI_T_pvar_write` attempts to set the value of the performance variable +identified by the handle specified in *handle* in the session specified +in *session*. The value to be written is specified in *buf*. The caller +must ensure that the buffer specified in *buf* is large enough to hold +the entire value of the performance variable. + + +ERRORS +------ + +:ref:`MPI_T_pvar_write` will fail if: + +* ``MPI_T_ERR_NOT_INITIALIZED``: The MPI Tools interface not initialized + +* ``MPI_T_ERR_INVALID_HANDLE``: The handle is invalid or not associated with the session + +* ``MPI_T_ERR_INVALID_SESSION``: Session argument is not a valid session + +* ``MPI_T_ERR_PVAR_NO_WRITE``: Variable cannot be written + + +.. seealso:: + :ref:`MPI_T_pvar_handle_alloc` :ref:`MPI_T_pvar_get_info` :ref:`MPI_T_pvar_session_create` diff --git a/docs/man-openmpi/man3/MPI_Test.3.rst b/docs/man-openmpi/man3/MPI_Test.3.rst new file mode 100644 index 00000000000..21e3a970b14 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Test.3.rst @@ -0,0 +1,125 @@ +.. _mpi_test: + + +MPI_Test +======== + +.. include_body + +:ref:`MPI_Test` - Tests for the completion of a specific send or receive. + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Test(MPI_Request *request, int *flag, MPI_Status *status) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_TEST(REQUEST, FLAG, STATUS, IERROR) + LOGICAL FLAG + INTEGER REQUEST, STATUS(MPI_STATUS_SIZE), IERROR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Test(request, flag, status, ierror) + TYPE(MPI_Request), INTENT(INOUT) :: request + LOGICAL, INTENT(OUT) :: flag + TYPE(MPI_Status) :: status + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETER +--------------- +* ``request``: Communication request (handle). + +OUTPUT PARAMETERS +----------------- +* ``flag``: True if operation completed (logical). +* ``status``: Status object (status). +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +A call to :ref:`MPI_Test` returns flag = true if the operation identified by +request is complete. In such a case, the status object is set to contain +information on the completed operation; if the communication object was +created by a nonblocking send or receive, then it is deallocated and the +request handle is set to MPI_REQUEST_NULL. The call returns flag = +false, otherwise. In this case, the value of the status object is +undefined. :ref:`MPI_Test` is a local operation. + +The return status object for a receive operation carries information +that can be accessed as described in Section 3.2.5 of the MPI-1 +Standard, "Return Status." The status object for a send operation +carries information that can be accessed by a call to :ref:`MPI_Test_cancelled` +(see Section 3.8 of the MPI-1 Standard, "Probe and Cancel"). + +If your application does not need to examine the *status* field, you can +save resources by using the predefined constant MPI_STATUS_IGNORE as a +special value for the *status* argument. + +One is allowed to call :ref:`MPI_Test` with a null or inactive *request* +argument. In such a case the operation returns with *flag* = true and +empty *status*. + +The functions :ref:`MPI_Wait` and :ref:`MPI_Test` can be used to complete both sends +and receives. + + +NOTES +----- + +The use of the nonblocking :ref:`MPI_Test` call allows the user to schedule +alternative activities within a single thread of execution. An +event-driven thread scheduler can be emulated with periodic calls to +:ref:`MPI_Test`. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`, :ref:`MPI_File_set_errhandler`, or +:ref:`MPI_Win_set_errhandler` (depending on the type of MPI handle that +generated the request); the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. + +Note that per MPI-1 section 3.2.5, MPI errors on requests passed to +:ref:`MPI_TEST` do not set the status.MPI_ERROR field in the returned status. +The error code is passed to the back-end error handler and may be passed +back to the caller through the return value of :ref:`MPI_TEST` if the back-end +error handler returns it. The pre-defined MPI error handler +MPI_ERRORS_RETURN exhibits this behavior, for example. + + +.. seealso:: + :ref:`MPI_Comm_set_errhandler` :ref:`MPI_File_set_errhandler` :ref:`MPI_Testall` :ref:`MPI_Testany` + :ref:`MPI_Testsome` :ref:`MPI_Wait` :ref:`MPI_Waitall` :ref:`MPI_Waitany` :ref:`MPI_Waitsome` + :ref:`MPI_Win_set_errhandler` diff --git a/docs/man-openmpi/man3/MPI_Test_cancelled.3.rst b/docs/man-openmpi/man3/MPI_Test_cancelled.3.rst new file mode 100644 index 00000000000..e620a289309 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Test_cancelled.3.rst @@ -0,0 +1,88 @@ +.. _mpi_test_cancelled: + + +MPI_Test_cancelled +================== + +.. include_body + +:ref:`MPI_Test_cancelled` - Tests whether a request was canceled. + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Test_cancelled(const MPI_Status *status, int *flag) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_TEST_CANCELLED(STATUS, FLAG, IERROR) + LOGICAL FLAG + INTEGER STATUS(MPI_STATUS_SIZE), IERROR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Test_cancelled(status, flag, ierror) + TYPE(MPI_Status), INTENT(IN) :: status + LOGICAL, INTENT(OUT) :: flag + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETER +--------------- +* ``status``: Status object (status). + +OUTPUT PARAMETERS +----------------- +* ``flag``: True if operation was cancelled (logical). +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +Returns *flag* = true if the communication associated with the status +object was canceled successfully. In such a case, all other fields of +status (such as *count* or *tag*) are undefined. Otherwise, returns +*flag* = false. If a receive operation might be canceled, one should +call :ref:`MPI_Test_cancelled` first, to check whether the operation was +canceled, before checking on the other fields of the return status. + + +NOTES +----- + +Cancel can be an expensive operation that should be used only +exceptionally. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. diff --git a/docs/man-openmpi/man3/MPI_Testall.3.rst b/docs/man-openmpi/man3/MPI_Testall.3.rst new file mode 100644 index 00000000000..b6f8c5d9989 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Testall.3.rst @@ -0,0 +1,126 @@ +.. _mpi_testall: + + +MPI_Testall +=========== + +.. include_body + +:ref:`MPI_Testall` - Tests for the completion of all previously initiated +communications in a list. + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Testall(int count, MPI_Request array_of_requests[], + int *flag, MPI_Status array_of_statuses[]) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_TESTALL(COUNT, ARRAY_OF_REQUESTS, FLAG, ARRAY_OF_STATUSES, + IERROR) + LOGICAL FLAG + INTEGER COUNT, ARRAY_OF_REQUESTS(*) + INTEGER ARRAY_OF_STATUSES(MPI_STATUS_SIZE,*), IERROR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Testall(count, array_of_requests, flag, array_of_statuses, ierror) + INTEGER, INTENT(IN) :: count + TYPE(MPI_Request), INTENT(INOUT) :: array_of_requests(count) + LOGICAL, INTENT(OUT) :: flag + TYPE(MPI_Status) :: array_of_statuses(*) + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETERS +---------------- +* ``count``: Lists length (integer). +* ``array_of_requests``: Array of requests (array of handles). + +OUTPUT PARAMETERS +----------------- +* ``flag``: True if previously initiated communications are complete (logical.) +* ``array_of_statuses``: Array of status objects (array of status). +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +Returns *flag* = true if all communications associated with active +handles in the array have completed (this includes the case where no +handle in the list is active). In this case, each status entry that +corresponds to an active handle request is set to the status of the +corresponding communication; if the request was allocated by a +nonblocking communication call then it is deallocated, and the handle is +set to MPI_REQUEST_NULL. Each status entry that corresponds to a null or +inactive handle is set to empty. + +Otherwise, *flag* = false is returned, no request is modified and the +values of the status entries are undefined. This is a local operation. + +If your application does not need to examine the *array_of_statuses* +field, you can save resources by using the predefined constant +MPI_STATUSES_IGNORE can be used as a special value for the +*array_of_statuses* argument. + +Errors that occurred during the execution of :ref:`MPI_Testall` are handled in +the same manner as errors in :ref:`MPI_Waitall`. + + +NOTE +---- + +*flag* is true only if all requests have completed. Otherwise, *flag* is +false, and neither *array_of_requests* nor *array_of_statuses* is +modified. + + +ERRORS +------ + +For each invocation of :ref:`MPI_Testall`, if one or more requests generate an +MPI error, only the *first* MPI request that caused an error will be +passed to its corresponding error handler. No other error handlers will +be invoked (even if multiple requests generated errors). However, *all* +requests that generate an error will have a relevant error code set in +the corresponding status.MPI_ERROR field (unless MPI_STATUSES_IGNORE was +used). + +The default error handler aborts the MPI job, except for I/O function +errors. The error handler may be changed with :ref:`MPI_Comm_set_errhandler`, +:ref:`MPI_File_set_errhandler`, or :ref:`MPI_Win_set_errhandler` (depending on the +type of MPI handle that generated the MPI request); the predefined error +handler MPI_ERRORS_RETURN may be used to cause error values to be +returned. Note that MPI does not guarantee that an MPI program can +continue past an error. + +If the invoked error handler allows :ref:`MPI_Testall` to return to the caller, +the value MPI_ERR_IN_STATUS will be returned in the C and Fortran +bindings. + + +.. seealso:: + :ref:`MPI_Comm_set_errhandler` :ref:`MPI_File_set_errhandler` :ref:`MPI_Test` :ref:`MPI_Testany` + :ref:`MPI_Testsome` :ref:`MPI_Wait` :ref:`MPI_Waitall` :ref:`MPI_Waitany` :ref:`MPI_Waitsome` + :ref:`MPI_Win_set_errhandler` diff --git a/docs/man-openmpi/man3/MPI_Testany.3.rst b/docs/man-openmpi/man3/MPI_Testany.3.rst new file mode 100644 index 00000000000..4ccc1ad34a6 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Testany.3.rst @@ -0,0 +1,125 @@ +.. _mpi_testany: + + +MPI_Testany +=========== + +.. include_body + +:ref:`MPI_Testany` - Tests for completion of any one previously initiated +communication in a list. + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Testany(int count, MPI_Request array_of_requests[], + int *index, int *flag, MPI_Status *status) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_TESTANY(COUNT, ARRAY_OF_REQUESTS, INDEX, FLAG, STATUS, IERROR) + LOGICAL FLAG + INTEGER COUNT, ARRAY_OF_REQUESTS(*), INDEX + INTEGER STATUS(MPI_STATUS_SIZE), IERROR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Testany(count, array_of_requests, index, flag, status, ierror) + INTEGER, INTENT(IN) :: count + TYPE(MPI_Request), INTENT(INOUT) :: array_of_requests(count) + INTEGER, INTENT(OUT) :: index + LOGICAL, INTENT(OUT) :: flag + TYPE(MPI_Status) :: status + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETERS +---------------- +* ``count``: List length (integer). +* ``array_of_requests``: Array of requests (array of handles). + +OUTPUT PARAMETERS +----------------- +* ``index``: Index of operation that completed, or MPI_UNDEFINED if none completed (integer). +* ``flag``: True if one of the operations is complete (logical). +* ``status``: Status object (status). +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +:ref:`MPI_Testany` tests for completion of either one or none of the operations +associated with active handles. In the former case, it returns *flag* = +true, returns in *index* the index of this request in the array, and +returns in *status* the status of that operation; if the request was +allocated by a nonblocking communication call then the request is +deallocated and the handle is set to MPI_REQUEST_NULL. (The array is +indexed from 0 in C, and from 1 in Fortran.) In the latter case (no +operation completed), it returns *flag* = false, returns a value of +MPI_UNDEFINED in *index*, and *status* is undefined. + +The array may contain null or inactive handles. If the array contains no +active handles then the call returns immediately with *flag* = true, +*index* = MPI_UNDEFINED, and an empty *status*. + +If the array of requests contains active handles then the execution of +MPI_Testany(count, array_of_requests, index, status) has the same effect +as the execution of MPI_Test(&\ *array_of_requests[i*], *flag*, +*status*), for *i*\ =0,1,...,count-1, in some arbitrary order, until one +call returns *flag* = true, or all fail. In the former case, *index* is +set to the last value of *i*, and in the latter case, it is set to +MPI_UNDEFINED. :ref:`MPI_Testany` with an array containing one active entry is +equivalent to :ref:`MPI_Test`. + +If your application does not need to examine the *status* field, you can +save resources by using the predefined constant MPI_STATUS_IGNORE as a +special value for the *status* argument. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`, :ref:`MPI_File_set_errhandler`, or +:ref:`MPI_Win_set_errhandler` (depending on the type of MPI handle that +generated the request); the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. + +Note that per MPI-1 section 3.2.5, MPI errors on requests passed to +:ref:`MPI_TESTANY` do not set the status.MPI_ERROR field in the returned +status. The error code is passed to the back-end error handler and may +be passed back to the caller through the return value of :ref:`MPI_TESTANY` if +the back-end error handler returns it. The pre-defined MPI error handler +MPI_ERRORS_RETURN exhibits this behavior, for example. + + +.. seealso:: + :ref:`MPI_Comm_set_errhandler` :ref:`MPI_File_set_errhandler` :ref:`MPI_Test` :ref:`MPI_Testall` + :ref:`MPI_Testsome` :ref:`MPI_Wait` :ref:`MPI_Waitall` :ref:`MPI_Waitany` :ref:`MPI_Waitsome` + :ref:`MPI_Win_set_errhandler` diff --git a/docs/man-openmpi/man3/MPI_Testsome.3.rst b/docs/man-openmpi/man3/MPI_Testsome.3.rst new file mode 100644 index 00000000000..80c7109a02a --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Testsome.3.rst @@ -0,0 +1,149 @@ +.. _mpi_testsome: + + +MPI_Testsome +============ + +.. include_body + +:ref:`MPI_Testsome` - Tests for completion of one or more previously +initiated communications in a list. + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Testsome(int incount, MPI_Request array_of_requests[], + int *outcount, int array_of_indices[], + MPI_Status array_of_statuses[]) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_TESTSOME(INCOUNT, ARRAY_OF_REQUESTS, OUTCOUNT, + ARRAY_OF_INDICES, ARRAY_OF_STATUSES, IERROR) + INTEGER INCOUNT, ARRAY_OF_REQUESTS(*) + INTEGER OUTCOUNT, ARRAY_OF_INDICES(*) + INTEGER ARRAY_OF_STATUSES(MPI_STATUS_SIZE,*), IERROR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Testsome(incount, array_of_requests, outcount, array_of_indices, + array_of_statuses, ierror) + INTEGER, INTENT(IN) :: incount + TYPE(MPI_Request), INTENT(INOUT) :: array_of_requests(incount) + INTEGER, INTENT(OUT) :: outcount, array_of_indices(*) + TYPE(MPI_Status) :: array_of_statuses(*) + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETERS +---------------- +* ``incount``: Length of array_of_requests (integer). +* ``array_of_requests``: Array of requests (array of handles). + +OUTPUT PARAMETERS +----------------- +* ``outcount``: Number of completed requests (integer). +* ``array_of_indices``: Array of indices of operations that completed (array of integers). +* ``array_of_statuses``: Array of status objects for operations that completed (array of status). +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +Behaves like :ref:`MPI_Waitsome`, except that it returns immediately. + +Returns in outcount the number of requests from the list +array_of_requests that have completed. Returns in the first outcount +locations of the array array_of_indices the indices of these operations +(index within the array array_of_requests; the array is indexed from 0 +in C and from 1 in Fortran). Returns in the first outcount locations of +the array array_of_status the status for these completed operations. If +a request that completed was allocated by a nonblocking communication +call, then it is deallocated, and the associated handle is set to +MPI_REQUEST_NULL. + +If no operation has completed it returns outcount = 0. If there is no +active handle in the list, it returns outcount = MPI_UNDEFINED. + +:ref:`MPI_Testsome` is a local operation, which returns immediately, whereas +:ref:`MPI_Waitsome` blocks until a communication completes, if it was passed a +list that contains at least one active handle. Both calls fulfill a +fairness requirement: If a request for a receive repeatedly appears in a +list of requests passed to :ref:`MPI_Waitsome` or :ref:`MPI_Testsome`, and a matching +send has been posted, then the receive will eventually succeed unless +the send is satisfied by another receive; send requests also fulfill +this fairness requirement. + +Errors that occur during the execution of :ref:`MPI_Testsome` are handled as +for :ref:`MPI_Waitsome`. + +If your application does not need to examine the *array_of_statuses* +field, you can save resources by using the predefined constant +MPI_STATUSES_IGNORE can be used as a special value for the +*array_of_statuses* argument. + + +NOTES +----- + +The use of :ref:`MPI_Testsome` is likely to be more efficient than the use of +:ref:`MPI_Testany`. The former returns information on all completed +communications; with the latter, a new call is required for each +communication that completes. + +A server with multiple clients can use :ref:`MPI_Waitsome` so as not to starve +any client. Clients send messages to the server with service requests. +The server calls :ref:`MPI_Waitsome` with one receive request for each client, +then handles all receives that have completed. If a call to :ref:`MPI_Waitany` +is used instead, then one client could starve while requests from +another client always sneak in first. + + +ERRORS +------ + +For each invocation of :ref:`MPI_Testsome`, if one or more requests generate an +MPI error, only the *first* MPI request that caused an error will be +passed to its corresponding error handler. No other error handlers will +be invoked (even if multiple requests generated errors). However, *all* +requests that generate an error will have a relevant error code set in +the corresponding status.MPI_ERROR field (unless MPI_STATUSES_IGNORE was +used). + +The default error handler aborts the MPI job, except for I/O function +errors. The error handler may be changed with :ref:`MPI_Comm_set_errhandler`, +:ref:`MPI_File_set_errhandler`, or :ref:`MPI_Win_set_errhandler` (depending on the +type of MPI handle that generated the MPI request); the predefined error +handler MPI_ERRORS_RETURN may be used to cause error values to be +returned. Note that MPI does not guarantee that an MPI program can +continue past an error. + +If the invoked error handler allows :ref:`MPI_Testsome` to return to the +caller, the value MPI_ERR_IN_STATUS will be returned in the C and +Fortran bindings. + + +.. seealso:: + :ref:`MPI_Comm_set_errhandler` :ref:`MPI_File_set_errhandler` :ref:`MPI_Test` :ref:`MPI_Testall` + :ref:`MPI_Testany` :ref:`MPI_Wait` :ref:`MPI_Waitall` :ref:`MPI_Waitany` :ref:`MPI_Waitsome` + :ref:`MPI_Win_set_errhandler` diff --git a/docs/man-openmpi/man3/MPI_Topo_test.3.rst b/docs/man-openmpi/man3/MPI_Topo_test.3.rst new file mode 100644 index 00000000000..d809cdc1bea --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Topo_test.3.rst @@ -0,0 +1,92 @@ +.. _mpi_topo_test: + + +MPI_Topo_test +============= + +.. include_body + +:ref:`MPI_Topo_test` - Determines the type of topology (if any) associated +with a communicator. + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Topo_test(MPI_Comm comm, int *top_type) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_TOPO_TEST(COMM, TOP_TYPE, IERROR) + INTEGER COMM, TOP_TYPE, IERROR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Topo_test(comm, status, ierror) + TYPE(MPI_Comm), INTENT(IN) :: comm + INTEGER, INTENT(OUT) :: status + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + DOUBLE PRECISION MPI_Wtick() + DOUBLE PRECISION MPI_Wtime() + + +INPUT PARAMETER +--------------- +* ``comm``: Communicator (handle). + +OUTPUT PARAMETERS +----------------- +* ``top_type``: Topology type of communicator comm (choice). +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +The function :ref:`MPI_Topo_test` returns the type of topology that is assigned +to a communicator. + +The output value *top_type* is one of the following: + +:: + + MPI_GRAPH graph topology + MPI_CART Cartesian topology + MPI_DIST_GRAPH distributed graph topology + MPI_UNDEFINED no topology + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. + + +.. seealso:: + :ref:`MPI_Graph_create` :ref:`MPI_Cart_create` diff --git a/docs/man-openmpi/man3/MPI_Type_c2f.3.rst b/docs/man-openmpi/man3/MPI_Type_c2f.3.rst new file mode 100644 index 00000000000..7147ca38411 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Type_c2f.3.rst @@ -0,0 +1,9 @@ +.. _mpi_type_c2f: + +MPI_Type_c2f +============ + .. include_body + +.. include:: ../man3/MPI_Comm_f2c.3.rst + :start-after: .. include_body + diff --git a/docs/man-openmpi/man3/MPI_Type_commit.3.rst b/docs/man-openmpi/man3/MPI_Type_commit.3.rst new file mode 100644 index 00000000000..7dac78a09b8 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Type_commit.3.rst @@ -0,0 +1,91 @@ +.. _mpi_type_commit: + + +MPI_Type_commit +=============== + +.. include_body + +:ref:`MPI_Type_commit` - Commits a data type. + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Type_commit(MPI_Datatype *datatype) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_TYPE_COMMIT(DATATYPE, IERROR) + INTEGER DATATYPE, IERROR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Type_commit(datatype, ierror) + TYPE(MPI_Datatype), INTENT(INOUT) :: datatype + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETER +--------------- +* ``datatype``: Data type (handle). + +OUTPUT PARAMETER +---------------- +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +The commit operation commits the data type. A data type is the formal +description of a communication buffer, not the content of that buffer. +After a data type has been committed, it can be repeatedly reused to +communicate the changing content of a buffer or, indeed, the content of +different buffers, with different starting addresses. + +**Example:** The following Fortran code fragment gives examples of using +:ref:`MPI_Type_commit`. + +:: + + INTEGER type1, type2 + CALL MPI_TYPE_CONTIGUOUS(5, MPI_REAL, type1, ierr) + ! new type object created + CALL MPI_TYPE_COMMIT(type1, ierr) + ! now type1 can be used for communication + +If the data type specified in *datatype* is already committed, it is +equivalent to a no-op. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. diff --git a/docs/man-openmpi/man3/MPI_Type_contiguous.3.rst b/docs/man-openmpi/man3/MPI_Type_contiguous.3.rst new file mode 100644 index 00000000000..11f5b43ae88 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Type_contiguous.3.rst @@ -0,0 +1,114 @@ +.. _mpi_type_contiguous: + + +MPI_Type_contiguous +=================== + +.. include_body + +:ref:`MPI_Type_contiguous` - Creates a contiguous datatype. + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Type_contiguous(int count, MPI_Datatype oldtype, + MPI_Datatype *newtype) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_TYPE_CONTIGUOUS(COUNT, OLDTYPE, NEWTYPE, IERROR) + INTEGER COUNT, OLDTYPE, NEWTYPE, IERROR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Type_contiguous(count, oldtype, newtype, ierror) + INTEGER, INTENT(IN) :: count + TYPE(MPI_Datatype), INTENT(IN) :: oldtype + TYPE(MPI_Datatype), INTENT(OUT) :: newtype + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETERS +---------------- +* ``count``: Replication count (nonnegative integer). +* ``oldtype``: Old datatype (handle). + +OUTPUT PARAMETERS +----------------- +* ``newtype``: New datatype (handle). +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +The simplest datatype constructor is :ref:`MPI_Type_contiguous`, which allows +replication of a datatype into contiguous locations. + +*newtype* is the datatype obtained by concatenating *count* copies of +*oldtype*. Concatenation is defined using the extent of *oldtype* as the +size of the concatenated copies. + +**Example:** Let oldtype have type map {(double, 0), (char, 8)}, with +extent 16, and let count = 3. The type map of the datatype returned by +newtype is + +:: + + {(double, 0), (char, 8), (double, 16), (char, 24), + (double, 32), (char, 40)]; + +i.e., alternating double and char elements, with displacements 0, 8, 16, +24, 32, 40. + +In general, assume that the type map of oldtype is + +:: + + {(type(0), disp(0)),...,(type(n-1), disp(n-1))}, + +with extent ex. Then newtype has a type map with count times n entries +defined by: + +:: + + {(type(0), disp(0)), ...,(type(n-1), disp(n-1)), + (type(0), disp(0) + ex), ...,(type(n-1), + disp(n-1) + ex), ...,(type(0), disp(0) + ex * (count - 1)), + ...,(type(n-1), disp(n-1) + ex * (count - 1))}. + +For more information about derived datatypes, see Section 3.12 of the +MPI-1 Standard. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. diff --git a/docs/man-openmpi/man3/MPI_Type_create_darray.3.rst b/docs/man-openmpi/man3/MPI_Type_create_darray.3.rst new file mode 100644 index 00000000000..72db1f1d3a8 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Type_create_darray.3.rst @@ -0,0 +1,172 @@ +.. _mpi_type_create_darray: + + +MPI_Type_create_darray +====================== + +.. include_body + +:ref:`MPI_Type_create_darray` - Creates a distributed array datatype; + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Type_create_darray(int size, int rank, int ndims, + const int array_of_gsizes[], const int array_of_distribs[], + const int array_of_dargs[], const int array_of_psizes[], + int order, MPI_Datatype oldtype, MPI_Datatype *newtype) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_TYPE_CREATE_DARRAY(SIZE, RANK, NDIMS, ARRAY_OF_GSIZES, + ARRAY_OF_DISTRIBS, ARRAY_OF_DARGS, ARRAY_OF_PSIZES, ORDER, + OLDTYPE, NEWTYPE, IERROR) + + INTEGER SIZE, RANK, NDIMS, ARRAY_OF_GSIZES(*), ARRAY_OF_DISTRIBS(*), + ARRAY_OF_DARGS(*), ARRAY_OF_PSIZES(*), ORDER, OLDTYPE, + NEWTYPE, IERROR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Type_create_darray(size, rank, ndims, array_of_gsizes, + array_of_distribs, array_of_dargs, array_of_psizes, order, + oldtype, newtype, ierror) + INTEGER, INTENT(IN) :: size, rank, ndims, array_of_gsizes(ndims), + array_of_distribs(ndims), array_of_dargs(ndims), + array_of_psizes(ndims), order + TYPE(MPI_Datatype), INTENT(IN) :: oldtype + TYPE(MPI_Datatype), INTENT(OUT) :: newtype + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETERS +---------------- +* ``size``: Size of process group (positive integer). +* ``rank``: Rank in process group (nonnegative integer). +* ``ndims``: Number of array dimensions as well as process grid dimensions (positive integer). +* ``array_of_gsizes``: Number of elements of type *oldtype* in each dimension of global array (array of positive integers). +* ``array_of_distribs``: Distribution of array in each dimension (array of state). +* ``array_of_dargs``: Distribution argument in each dimension (array of positive integers). +* ``array_of_psizes``: Size of process grid in each dimension (array of positive integers). +* ``order``: Array storage order flag (state). +* ``oldtype``: Old data type (handle). + +OUTPUT PARAMETERS +----------------- +* ``newtype``: New data type (handle). +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +:ref:`MPI_Type_create_darray` can be used to generate the data types +corresponding to the distribution of an ndims-dimensional array of +*oldtype* elements onto an *ndims*-dimensional grid of logical +processes. Unused dimensions of *array_of_psizes* should be set to 1. +For a call to :ref:`MPI_Type_create_darray` to be correct, the equation + +:: + + ndims-1 + pi array_of_psizes[i] = size + i=0 + +must be satisfied. The ordering of processes in the process grid is +assumed to be row-major, as in the case of virtual Cartesian process +topologies in MPI-1. + +Each dimension of the array can be distributed in one of three ways: + +:: + + - MPI_DISTRIBUTE_BLOCK - Block distribution + - MPI_DISTRIBUTE_CYCLIC - Cyclic distribution + - MPI_DISTRIBUTE_NONE - Dimension not distributed. + +The constant MPI_DISTRIBUTE_DFLT_DARG specifies a default distribution +argument. The distribution argument for a dimension that is not +distributed is ignored. For any dimension *i* in which the distribution +is MPI_DISTRIBUTE_BLOCK, it erroneous to specify *array_of_dargs[i]* +*\** *array_of_psizes[i]* < *array_of_gsizes[i]*. + +For example, the HPF layout ARRAY(CYCLIC(15)) corresponds to +MPI_DISTRIBUTE_CYCLIC with a distribution argument of 15, and the HPF +layout ARRAY(BLOCK) corresponds to MPI_DISTRIBUTE_BLOCK with a +distribution argument of MPI_DISTRIBUTE_DFLT_DARG. + +The *order* argument is used as in :ref:`MPI_TYPE_CREATE_SUBARRAY` to specify +the storage order. Therefore, arrays described by this type constructor +may be stored in Fortran (column-major) or C (row-major) order. Valid +values for order are MPI_ORDER_FORTRAN and MPI_ORDER_C. + +This routine creates a new MPI data type with a typemap defined in terms +of a function called "cyclic()" (see below). + +Without loss of generality, it suffices to define the typemap for the +MPI_DISTRIBUTE_CYCLIC case where MPI_DISTRIBUTE_DFLT_DARG is not used. + +MPI_DISTRIBUTE_BLOCK and MPI_DISTRIBUTE_NONE can be reduced to the +MPI_DISTRIBUTE_CYCLIC case for dimension *i* as follows. + +MPI_DISTRIBUTE_BLOCK with *array_of_dargs[i]* equal to +MPI_DISTRIBUTE_DFLT_DARG is equivalent to MPI_DISTRIBUTE_CYCLIC with +*array_of_dargs[i]* set to + +:: + + (array_of_gsizes[i] + array_of_psizes[i] - 1)/array_of_psizes[i] + +If *array_of_dargs[i]* is not MPI_DISTRIBUTE_DFLT_DARG, then +MPI_DISTRIBUTE_BLOCK and DISTRIBUTE_CYCLIC are equivalent. + +MPI_DISTRIBUTE_NONE is equivalent to MPI_DISTRIBUTE_CYCLIC with +*array_of_dargs[i]* set to *array_of_gsizes[i]*. + +Finally, MPI_DISTRIBUTE_CYCLIC with *array_of_dargs[i]* equal to +MPI_DISTRIBUTE_DFLT_DARG is equivalent to MPI_DISTRIBUTE_CYCLIC with +*array_of_dargs[i]* set to 1. + + +NOTES +----- + +For both Fortran and C arrays, the ordering of processes in the process +grid is assumed to be row-major. This is consistent with the ordering +used in virtual Cartesian process topologies in MPI-1. To create such +virtual process topologies, or to find the coordinates of a process in +the process grid, etc., users may use the corresponding functions +provided in MPI-1. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. diff --git a/docs/man-openmpi/man3/MPI_Type_create_f90_complex.3.rst b/docs/man-openmpi/man3/MPI_Type_create_f90_complex.3.rst new file mode 100644 index 00000000000..302d3be4c5e --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Type_create_f90_complex.3.rst @@ -0,0 +1,142 @@ +.. _mpi_type_create_f90_complex: + + +MPI_Type_create_f90_complex +=========================== + +.. include_body + +:: + + MPI_Type_create_f90_complex - Returns a bounded MPI complex datatype + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Type_create_f90_complex(int p, int r, + MPI_Datatype *newtype) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_TYPE_CREATE_F90_COMPLEX (P, R, NEWTYPE, IERROR) + INTEGER P, R, NEWTYPE, IERROR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Type_create_f90_complex(p, r, newtype, ierror) + INTEGER, INTENT(IN) :: p, r + TYPE(MPI_Datatype), INTENT(OUT) :: newtype + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETERS +---------------- +* ``p``: Precision, in decimal digits (integer). +* ``r``: Decimal exponent range (integer). + +OUTPUT PARAMETERS +----------------- +* ``newtype``: New data type (handle). +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +This function provides a way to declare KIND-parameterized COMPLEX MPI +datatypes. The arguments are interpreted in a similar fashion to the F90 +function SELECTED_REAL_KIND. The parameters *p* and *r* must be scalar +integers. The argument *p* represents the required level of numerical +precision, in decimal digits. The *r* parameter indicates the range of +exponents desired: the returned datatype will have at least one exponent +between +\ *r* and -*r* (inclusive). + +Either *p* or *r*, but not both, may be omitted from calls to +SELECTED_REAL_KIND. Similarly, either argument to +:ref:`MPI_Type_create_f90_complex` may be set to MPI_UNDEFINED. + + +NOTES +----- + +It is erroneous to supply values for *p* and *r* not supported by the +compiler. + +The Fortran function SELECTED_REAL_KIND maps a large number of (*p,r*) +pairs to a much smaller number of KIND parameters supported by the +compiler. KIND parameters are not specified by the language and are not +portable. From the point of view of the language, variables of the same +base type and KIND parameter are equivalent, even if their KIND +parameters were generated by different (*p,r*) arguments to +SELECTED_REAL_KIND. However, to help facilitate interoperability in a +heterogeneous environment, equivalency is more strictly defined for +datatypes returned by :ref:`MPI_Type_create_f90_complex`. Two MPI datatypes, +each generated by this function, will match if and only if they have +identical values for both *p* and *r*. + +The interaction between the datatypes returned by this function and the +external32 data representation - used by :ref:`MPI_Pack_external`, +:ref:`MPI_Unpack_external`, and many MPI_File functions - is subtle. The +external32 representation of returned datatypes is as follows. + +:: + + if (p > 33) and/or (r > 4931): + external32 size = n/a (undefined) + else if (p > 15) and/or (r > 307): + external32 size = 32 + else if (p > 6) and/or (r > 37): + external32 size = 16 + else: + external32 size = 8 + +If the external32 representation of a datatype is undefined, so are the +results of using that datatype in operations that require the external32 +format. Care should be taken not to use incompatible datatypes +indirectly, e.g., as part of another datatype or through a duplicated +datatype, in these functions. + +If a variable is declared specifying a nondefault KIND value that was +not obtained with SELECTED_REAL_KIND (i.e., *p* and/or *r* are unknown), +the only way to obtain a matching MPI datatype is to use the functions +:ref:`MPI_Sizeof` and :ref:`MPI_Type_match_size`. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. + +See the MPI man page for a full list of MPI error codes. + + +.. seealso:: + :ref:`MPI_Pack_external` :ref:`MPI_Sizeof` :ref:`MPI_Type_match_size` :ref:`MPI_Unpack_external` + SELECTED_REAL_KIND diff --git a/docs/man-openmpi/man3/MPI_Type_create_f90_integer.3.rst b/docs/man-openmpi/man3/MPI_Type_create_f90_integer.3.rst new file mode 100644 index 00000000000..eb6dd369193 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Type_create_f90_integer.3.rst @@ -0,0 +1,135 @@ +.. _mpi_type_create_f90_integer: + + +MPI_Type_create_f90_integer +=========================== + +.. include_body + +:: + + MPI_Type_create_f90_integer - Returns a bounded MPI integer datatype + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Type_create_f90_integer(int r, MPI_Datatype *newtype) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_TYPE_CREATE_F90_INTEGER (R, NEWTYPE, IERROR) + INTEGER R, NEWTYPE, IERROR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Type_create_f90_integer(r, newtype, ierror) + INTEGER, INTENT(IN) :: r + TYPE(MPI_Datatype), INTENT(OUT) :: newtype + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETER +--------------- +* ``r``: Precision, in decimal digits (integer). + +OUTPUT PARAMETERS +----------------- +* ``newtype``: New data type (handle). +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +This function provides a way to declare KIND-parameterized INTEGER MPI +datatypes. The argument is interpreted in a similar fashion to the F90 +function SELECTED_INT_KIND: *r* must be a scalar integer, and represents +the desired level of numerical precision, in decimal digits. + + +NOTES +----- + +It is erroneous to supply a value for *r* not supported by the compiler. + +The Fortran function SELECTED_INT_KIND maps a large number of *r* values +to a much smaller number of KIND parameters supported by the compiler. +KIND parameters are not specified by the language and are not portable. +From the point of view of the language, variables of the same base type +and KIND parameter are equivalent, even if their KIND parameters were +generated by different *r* arguments to SELECTED_INT_KIND. However, to +help facilitate interoperability in a heterogeneous environment, +equivalency is more strictly defined for datatypes returned by +:ref:`MPI_Type_create_f90_integer`. Two MPI datatypes, each generated by this +function, will match if and only if they have identical values for *r*. + +The interaction between the datatypes returned by this function and the +external32 data representation - used by :ref:`MPI_Pack_external`, +:ref:`MPI_Unpack_external` and many MPI_File functions - is subtle. The +external32 representation of returned datatypes is as follows. + +:: + + if (r > 38): + external32 size = n/a (undefined) + else if (r > 18): + external32 size = 16 + else if (r > 9): + external32 size = 8 + else if (r > 4): + external32 size = 4 + else if (r > 2): + external32 size = 2 + else: + external32 size = 1 + +If the external32 representation of a datatype is undefined, so are the +results of using that datatype in operations that require the external32 +format. Care should be taken not to use incompatible datatypes +indirectly, e.g., as part of another datatype or through a duplicated +datatype, in these functions. + +If a variable is declared specifying a nondefault KIND value that was +not obtained with SELECTED_INT_KIND (i.e., *r* is unknown), the only way +to obtain a matching MPI datatype is to use the functions :ref:`MPI_Sizeof` and +:ref:`MPI_Type_match_size`. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. + +See the MPI man page for a full list of MPI error codes. + + +.. seealso:: + :ref:`MPI_Pack_external` :ref:`MPI_Sizeof` :ref:`MPI_Type_match_size` :ref:`MPI_Unpack_external` + SELECTED_INT_KIND diff --git a/docs/man-openmpi/man3/MPI_Type_create_f90_real.3.rst b/docs/man-openmpi/man3/MPI_Type_create_f90_real.3.rst new file mode 100644 index 00000000000..3ac51726b2d --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Type_create_f90_real.3.rst @@ -0,0 +1,141 @@ +.. _mpi_type_create_f90_real: + + +MPI_Type_create_f90_real +======================== + +.. include_body + +:: + + MPI_Type_create_f90_real - Returns a bounded MPI real datatype + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Type_create_f90_real(int p, int r, MPI_Datatype *newtype) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_TYPE_CREATE_F90_REAL (P, R, NEWTYPE, IERROR) + INTEGER P, R, NEWTYPE, IERROR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Type_create_f90_real(p, r, newtype, ierror) + INTEGER, INTENT(IN) :: p, r + TYPE(MPI_Datatype), INTENT(OUT) :: newtype + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETERS +---------------- +* ``p``: Precision, in decimal digits (integer). +* ``r``: Decimal exponent range (integer). + +OUTPUT PARAMETERS +----------------- +* ``newtype``: New data type (handle). +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +This function provides a way to declare KIND-parameterized REAL MPI +datatypes. The arguments are interpreted in a similar fashion to the F90 +function SELECTED_REAL_KIND. The parameters *p* and *r* must be scalar +integers. The argument *p* represents the required level of numerical +precision, in decimal digits. The *r* parameter indicates the range of +exponents desired: the returned datatype will have at least one exponent +between +\ *r* and -*r* (inclusive). + +Either *p* or *r*, but not both, may be omitted from calls to +SELECTED_REAL_KIND. Similarly, either argument to +:ref:`MPI_Type_create_f90_real` may be set to MPI_UNDEFINED. + + +NOTES +----- + +It is erroneous to supply values for *p* and *r* not supported by the +compiler. + +The Fortran function SELECTED_REAL_KIND maps a large number of (*p,r*) +pairs to a much smaller number of KIND parameters supported by the +compiler. KIND parameters are not specified by the language and are not +portable. From the point of view of the language, variables of the same +base type and KIND parameter are equivalent, even if their KIND +parameters were generated by different (*p,r*) arguments to +SELECTED_REAL_KIND. However, to help facilitate interoperability in a +heterogeneous environment, equivalency is more strictly defined for +datatypes returned by :ref:`MPI_Type_create_f90_real`. Two MPI datatypes, each +generated by this function, will match if and only if they have +identical values for both *p* and *r*. + +The interaction between the datatypes returned by this function and the +external32 data representation - used by :ref:`MPI_Pack_external`, +:ref:`MPI_Unpack_external` and many MPI_File functions - is subtle. The +external32 representation of returned datatypes is as follows. + +:: + + if (p > 33) and/or (r > 4931): + external32 size = n/a (undefined) + else if (p > 15) and/or (r > 307): + external32 size = 16 + else if (p > 6) and/or (r > 37): + external32 size = 8 + else: + external32 size = 4 + +If the external32 representation of a datatype is undefined, so are the +results of using that datatype in operations that require the external32 +format. Care should be taken not to use incompatible datatypes +indirectly, e.g., as part of another datatype or through a duplicated +datatype, in these functions. + +If a variable is declared specifying a nondefault KIND value that was +not obtained with SELECTED_REAL_KIND (i.e., *p* and/or *r* are unknown), +the only way to obtain a matching MPI datatype is to use the functions +:ref:`MPI_Sizeof` and :ref:`MPI_Type_match_size`. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. + +See the MPI man page for a full list of MPI error codes. + + +.. seealso:: + :ref:`MPI_Pack_external` :ref:`MPI_Sizeof` :ref:`MPI_Type_match_size` :ref:`MPI_Unpack_external` + SELECTED_REAL_KIND diff --git a/docs/man-openmpi/man3/MPI_Type_create_hindexed.3.rst b/docs/man-openmpi/man3/MPI_Type_create_hindexed.3.rst new file mode 100644 index 00000000000..7f16bc04051 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Type_create_hindexed.3.rst @@ -0,0 +1,9 @@ +.. _mpi_type_create_hindexed: + +MPI_Type_create_hindexed +======================== + .. include_body + +.. include:: ../man3/MPI_Type_indexed.3.rst + :start-after: .. include_body + diff --git a/docs/man-openmpi/man3/MPI_Type_create_hindexed_block.3.rst b/docs/man-openmpi/man3/MPI_Type_create_hindexed_block.3.rst new file mode 100644 index 00000000000..ea8f0a945b5 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Type_create_hindexed_block.3.rst @@ -0,0 +1,9 @@ +.. _mpi_type_create_hindexed_block: + +MPI_Type_create_hindexed_block +============================== + .. include_body + +.. include:: ../man3/MPI_Type_create_indexed_block.3.rst + :start-after: .. include_body + diff --git a/docs/man-openmpi/man3/MPI_Type_create_hvector.3.rst b/docs/man-openmpi/man3/MPI_Type_create_hvector.3.rst new file mode 100644 index 00000000000..57aa324eb1a --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Type_create_hvector.3.rst @@ -0,0 +1,109 @@ +.. _mpi_type_create_hvector: + + +MPI_Type_create_hvector +======================= + +.. include_body + +:ref:`MPI_Type_create_hvector` - Creates a vector (strided) data type with +offset in bytes. + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Type_create_hvector(int count, int blocklength, + MPI_Aint stride, MPI_Datatype oldtype, MPI_Datatype *newtype) + + +Fortran Syntax (see FORTRAN 77 NOTES) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_TYPE_CREATE_HVECTOR(COUNT, BLOCKLENGTH, STRIDE, OLDTYPE, + NEWTYPE, IERROR) + + INTEGER COUNT, BLOCKLENGTH, OLDTYPE, NEWTYPE, IERROR + INTEGER(KIND=MPI_ADDRESS_KIND) STRIDE + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Type_create_hvector(count, blocklength, stride, oldtype, newtype, + ierror) + INTEGER, INTENT(IN) :: count, blocklength + INTEGER(KIND=MPI_ADDRESS_KIND), INTENT(IN) :: stride + TYPE(MPI_Datatype), INTENT(IN) :: oldtype + TYPE(MPI_Datatype), INTENT(OUT) :: newtype + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETERS +---------------- +* ``count``: Number of blocks (nonnegative integer). +* ``blocklength``: Number of elements in each block (nonnegative integer). +* ``stride``: Number of bytes between start of each block (integer). +* ``oldtype``: Old data type (handle). + +OUTPUT PARAMETERS +----------------- +* ``newtype``: New data type (handle). +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +:ref:`MPI_Type_create_hvector` creates a vector (strided) data type with offset +in bytes. + +NOTE - This routine replaces :ref:`MPI_Type_hvector`, which is deprecated. See +the man page :ref:`MPI_Type_hvector` for information about that routine. + + +FORTRAN 77 NOTES +---------------- + +The MPI standard prescribes portable Fortran syntax for the *STRIDE* +argument only for Fortran 90. FORTRAN 77 users may use the non-portable +syntax + +:: + + INTEGER*MPI_ADDRESS_KIND STRIDE + +where MPI_ADDRESS_KIND is a constant defined in mpif.h and gives the +length of the declared integer in bytes. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. + + +.. seealso:: + :ref:`MPI_Type_hvector` :ref:`MPI_Type_vector` diff --git a/docs/man-openmpi/man3/MPI_Type_create_indexed_block.3.rst b/docs/man-openmpi/man3/MPI_Type_create_indexed_block.3.rst new file mode 100644 index 00000000000..883716a1191 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Type_create_indexed_block.3.rst @@ -0,0 +1,109 @@ +.. _mpi_type_create_indexed_block: + + +MPI_Type_create_indexed_block +============================= + +.. include_body + +:ref:`MPI_Type_create_indexed_block`, :ref:`MPI_Type_create_hindexed_block` - +Creates an indexed data type with the same block length for all blocks. + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Type_create_indexed_block(int count, int blocklength, const int array_of_displacements[], MPI_Datatype oldtype, MPI_Datatype *newtype) + + int MPI_Type_create_hindexed_block(int count, int blocklength, const MPI_Aint array_of_displacements[], MPI_Datatype oldtype, MPI_Datatype *newtype) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_TYPE_CREATE_INDEXED_BLOCK(COUNT, BLOCKLENGTH, + ARRAY_OF_DISPLACEMENTS, OLDTYPE, NEWTYPE, IERROR) + INTEGER COUNT, BLOCKLENGTH, ARRAY_OF_DISPLACEMENTS(*), + OLDTYPE, NEWTYPE, IERROR + + MPI_TYPE_CREATE_HINDEXED_BLOCK(COUNT, BLOCKLENGTH, + ARRAY_OF_DISPLACEMENTS, OLDTYPE, NEWTYPE, IERROR) + INTEGER COUNT, BLOCKLENGTH, OLDTYPE, NEWTYPE + INTEGER(KIND=MPI_ADDRESS_KIND) ARRAY_OF_DISPLACEMENTS(*) + INTEGER IERROR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Type_create_indexed_block(count, blocklength, array_of_displacements, + oldtype, newtype, ierror) + INTEGER, INTENT(IN) :: count, blocklength, + array_of_displacements(count) + TYPE(MPI_Datatype), INTENT(IN) :: oldtype + TYPE(MPI_Datatype), INTENT(OUT) :: newtype + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + MPI_Type_create_hindexed_block(count, blocklength, array_of_displacements, + oldtype, newtype, ierror) + INTEGER, INTENT(IN) :: count, blocklength + INTEGER(KIND=MPI_ADDRESS_KIND), INTENT(IN) :: + array_of_displacements(count) + TYPE(MPI_Datatype), INTENT(IN) :: oldtype + TYPE(MPI_Datatype), INTENT(OUT) :: newtype + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETERS +---------------- +* ``count``: Length of array of displacements (integer). +* ``blocklength``: Size of block (integer). +* ``array_of_displacements``: Array of displacements (array of integers). In units of the extent of *oldtype* for MPI_Type_create_indexed_block and bytes for MPI_Type_create_hindexed_block. +* ``oldtype``: Old data type (handle). + +OUTPUT PARAMETERS +----------------- +* ``newtype``: New data type (handle). +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +:ref:`MPI_Type_create_indexed_block` and :ref:`MPI_Type_create_hindexed_block` create +an indexed data type with the same block length for all blocks. The only +difference between the two functions is :ref:`MPI_Type_create_indexed_block` +takes an array of displacements in units of the extent of *oldtype* +while :ref:`MPI_Type_create_hindexed_block` takes displacements in bytes. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. + + +.. seealso:: + :ref:`MPI_Type_indexed` diff --git a/docs/man-openmpi/man3/MPI_Type_create_keyval.3.rst b/docs/man-openmpi/man3/MPI_Type_create_keyval.3.rst new file mode 100644 index 00000000000..311209a065f --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Type_create_keyval.3.rst @@ -0,0 +1,151 @@ +.. _mpi_type_create_keyval: + + +MPI_Type_create_keyval +====================== + +.. include_body + +:ref:`MPI_Type_create_keyval` - Generates a new attribute key for caching +on data types. + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Type_create_keyval(MPI_Type_copy_attr_function *type_copy_attr_fn, + MPI_Type_delete_attr_function *type_delete_attr_fn, + int *type_keyval, void *extra_state) + + +Fortran Syntax (see FORTRAN 77 NOTES) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_TYPE_CREATE_KEYVAL(TYPE_COPY_ATTR_FN, TYPE_DELETE_ATTR_FN, + TYPE_KEYVAL, EXTRA_STATE, IERROR) + EXTERNAL TYPE_COPY_ATTR_FN, TYPE_DELETE_ATTR_FN + INTEGER TYPE_KEYVAL, IERROR + INTEGER(KIND=MPI_ADDRESS_KIND) EXTRA_STATE + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Type_create_keyval(type_copy_attr_fn, type_delete_attr_fn, type_keyval, + extra_state, ierror) + PROCEDURE(MPI_Type_copy_attr_function) :: type_copy_attr_fn + PROCEDURE(MPI_Type_delete_attr_function) :: type_delete_attr_fn + INTEGER, INTENT(OUT) :: type_keyval + INTEGER(KIND=MPI_ADDRESS_KIND), INTENT(IN) :: extra_state + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETERS +---------------- +* ``type_copy_attr_fn``: Copy callback function for *type_keyval* (function). +* ``type_delete_attr_fn``: Delete callback function for *type_keyval* (function). +* ``extra_state``: Extra state for callback functions. + +OUTPUT PARAMETERS +----------------- +* ``type_keyval``: Key value for future access (integer). +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +:ref:`MPI_Type_create_keyval` generates a new attribute key for caching on data +types. This routine partially replaces :ref:`MPI_Keyval_create`. + +The argument *type_copy_attr_fn* may be specified as +MPI_TYPE_NULL_COPY_FN or MPI_TYPE_DUP_FN from C or Fortran. +MPI_TYPE_NULL_COPY_FN is a function that does nothing other than +returning *flag* = 0 and MPI_SUCCESS. MPI_TYPE_DUP_FN is a simple-minded +copy function that sets *flag* = 1, returns the value of +*attribute_val_in* in *attribute_val_out*, and returns MPI_SUCCESS. + +The argument *type_delete_attr_fn* may be specified as +MPI_TYPE_NULL_DELETE_FN from C or Fortran. MPI_TYPE_NULL_DELETE_FN is a +function that does nothing beyond returning MPI_SUCCESS. The C callback +functions are: + +:: + + typedef int MPI_Type_copy_attr_function(MPI_Datatype oldtype, + int type_keyval, void *extra_state, void *attribute_val_in, + void *attribute_val_out, int *flag); + +and + +:: + + typedef int MPI_Type_delete_attr_function(MPI_Datatype type, int type_keyval, + void *attribute_val, void *extra_state); + +The Fortran callback functions are: + +.. code-block:: fortran + + SUBROUTINE TYPE_COPY_ATTR_FN(OLDTYPE, TYPE_KEYVAL, EXTRA_STATE, + ATTRIBUTE_VAL_IN, ATTRIBUTE_VAL_OUT, FLAG, IERROR) + INTEGER OLDTYPE, TYPE KEYVAL, IERROR + INTEGER(KIND=MPI_ADDRESS_KIND) EXTRA_STATE, + ATTRIBUTE_VAL_IN, ATTRIBUTE_VAL_OUT + LOGICAL FLAG + +and + +:: + + SUBROUTINE TYPE_DELETE_ATTR_FN(TYPE, TYPE_KEYVAL, ATTRIBUTE_VAL, EXTRA_STATE, + IERROR) + INTEGER TYPE, TYPE_KEYVAL, IERROR + INTEGER(KIND=MPI_ADDRESS_KIND) ATTRIBUTE VAL, EXTRA_STATE + + +FORTRAN 77 NOTES +---------------- + +The MPI standard prescribes portable Fortran syntax for the +*EXTRA_STATE* argument only for Fortran 90. FORTRAN 77 users may use the +non-portable syntax + +:: + + INTEGER*MPI_ADDRESS_KIND EXTRA_STATE + +where MPI_ADDRESS_KIND is a constant defined in mpif.h and gives the +length of the declared integer in bytes. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. + + +.. seealso:: + :ref:`MPI_Type_free_keyval` diff --git a/docs/man-openmpi/man3/MPI_Type_create_resized.3.rst b/docs/man-openmpi/man3/MPI_Type_create_resized.3.rst new file mode 100644 index 00000000000..e34a115d694 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Type_create_resized.3.rst @@ -0,0 +1,116 @@ +.. _mpi_type_create_resized: + + +MPI_Type_create_resized +======================= + +.. include_body + +:ref:`MPI_Type_create_resized` - Returns a new data type with new extent +and upper and lower bounds. + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Type_create_resized(MPI_Datatype oldtype, MPI_Aint lb, + MPI_Aint extent, MPI_Datatype *newtype) + + +Fortran Syntax (see FORTRAN 77 NOTES) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_TYPE_CREATE_RESIZED(OLDTYPE, LB, EXTENT, NEWTYPE, IERROR) + INTEGER OLDTYPE, NEWTYPE, IERROR + INTEGER(KIND=MPI_ADDRESS_KIND) LB, EXTENT + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Type_create_resized(oldtype, lb, extent, newtype, ierror) + INTEGER(KIND=MPI_ADDRESS_KIND), INTENT(IN) :: lb, extent + TYPE(MPI_Datatype), INTENT(IN) :: oldtype + TYPE(MPI_Datatype), INTENT(OUT) :: newtype + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETERS +---------------- +* ``oldtype``: Input data type (handle). +* ``lb``: New lower bound of data type (integer). +* ``extent``: New extent of data type (integer). + +OUTPUT PARAMETERS +----------------- +* ``newtype``: Output data type (handle). +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +:ref:`MPI_Type_create_resized` returns in *newtype* a handle to a new data type +that is identical to *oldtype*, except that the lower bound of this new +data type is set to be *lb*, and its upper bound is set to be *lb* + +*extent*. Any previous *lb* and *ub* markers are erased, and a new pair +of lower bound and upper bound markers are put in the positions +indicated by the *lb* and *extent* arguments. This affects the behavior +of the data type when used in communication operations, with *count* > +1, and when used in the construction of new derived data types. + + +FORTRAN 77 NOTES +---------------- + +The MPI standard prescribes portable Fortran syntax for the *LB* and +*EXTENT* arguments only for Fortran 90. FORTRAN 77 users may use the +non-portable syntax + +:: + + INTEGER*MPI_ADDRESS_KIND LB + or + INTEGER*MPI_ADDRESS_KIND EXTENT + +where MPI_ADDRESS_KIND is a constant defined in mpif.h and gives the +length of the declared integer in bytes. + + +NOTE +---- + +Use of :ref:`MPI_Type_create_resized` is strongly recommended over the old +MPI-1 functions :ref:`MPI_Type_extent` and :ref:`MPI_Type_lb`. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. + + +.. seealso:: + :ref:`MPI_Type_get_extent` diff --git a/docs/man-openmpi/man3/MPI_Type_create_struct.3.rst b/docs/man-openmpi/man3/MPI_Type_create_struct.3.rst new file mode 100644 index 00000000000..208809839f7 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Type_create_struct.3.rst @@ -0,0 +1,110 @@ +.. _mpi_type_create_struct: + + +MPI_Type_create_struct +====================== + +.. include_body + +:ref:`MPI_Type_create_struct` - Creates a structured data type. + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Type_create_struct(int count, int array_of_blocklengths[], + const MPI_Aint array_of_displacements[], const MPI_Datatype array_of_types[], + MPI_Datatype *newtype) + + +Fortran Syntax (see FORTRAN 77 NOTES) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_TYPE_CREATE_STRUCT(COUNT, ARRAY_OF_BLOCKLENGTHS, + ARRAY_OF_DISPLACEMENTS, ARRAY_OF_TYPES, NEWTYPE, IERROR) + INTEGER COUNT, ARRAY_OF_BLOCKLENGTHS(*), ARRAY_OF_TYPES(*), + INTEGER NEWTYPE, IERROR + INTEGER(KIND=MPI_ADDRESS_KIND) ARRAY_OF_DISPLACEMENTS(*) + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Type_create_struct(count, array_of_blocklengths, + array_of_displacements, array_of_types, newtype, ierror) + INTEGER, INTENT(IN) :: count, array_of_blocklengths(count) + INTEGER(KIND=MPI_ADDRESS_KIND), INTENT(IN) :: + array_of_displacements(count) + TYPE(MPI_Datatype), INTENT(IN) :: array_of_types(count) + TYPE(MPI_Datatype), INTENT(OUT) :: newtype + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETERS +---------------- +* ``count``: Number of blocks (integer) -- also number of entries in arrays *array_of_types*, *array_of_displacements*, and *array_of_blocklengths*. +* ``array_of_blocklengths``: Number of elements in each block (array of integers). +* ``array_of_displacements``: Byte displacement of each block (array of integers). +* ``array_of_types``: Type of elements in each block (array of handles to data-type objects). + +OUTPUT PARAMETERS +----------------- +* ``newtype``: New data type (handle). +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +:ref:`MPI_Type_create_struct` creates a structured data type. This routine +replaces :ref:`MPI_Type_struct`, which is now deprecated. + +NOTE - This routine replaces :ref:`MPI_Type_struct`, which is deprecated. See +the man page :ref:`MPI_Type_struct` for information about that routine. + + +FORTRAN 77 NOTES +---------------- + +The MPI standard prescribes portable Fortran syntax for the +*ARRAY_OF_DISPLACEMENTS*\ (*) argument only for Fortran 90. FORTRAN 77 +users may use the non-portable syntax + +:: + + INTEGER*MPI_ADDRESS_KIND ARRAY_OF_DISPLACEMENTS(*) + +where MPI_ADDRESS_KIND is a constant defined in mpif.h and gives the +length of the declared integer in bytes. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. + + +.. seealso:: + :ref:`MPI_Type_struct` :ref:`MPI_Type_create_hindexed` diff --git a/docs/man-openmpi/man3/MPI_Type_create_subarray.3.rst b/docs/man-openmpi/man3/MPI_Type_create_subarray.3.rst new file mode 100644 index 00000000000..cebd63296f2 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Type_create_subarray.3.rst @@ -0,0 +1,164 @@ +.. _mpi_type_create_subarray: + + +MPI_Type_create_subarray +======================== + +.. include_body + +:ref:`MPI_Type_create_subarray` - Creates a data type describing an +*n*-dimensional subarray of an *n*-dimensional array. + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Type_create_subarray(int ndims, const int array_of_sizes[], + const int array_of_subsizes[], const int array_of_starts[], + int order, MPI_Datatype oldtype, MPI_Datatype *newtype) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_TYPE_CREATE_SUBARRAY(NDIMS, ARRAY_OF_SIZES, ARRAY_OF_SUBSIZES, + ARRAY_OF_STARTS, ORDER, OLDTYPE, NEWTYPE, IERROR) + + INTEGER NDIMS, ARRAY_OF_SIZES(*), ARRAY_OF_SUBSIZES(*), + ARRAY_OF_STARTS(*), ORDER, OLDTYPE, NEWTYPE, IERROR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Type_create_subarray(ndims, array_of_sizes, array_of_subsizes, + array_of_starts, order, oldtype, newtype, ierror) + INTEGER, INTENT(IN) :: ndims, array_of_sizes(ndims), + array_of_subsizes(ndims), array_of_starts(ndims), order + TYPE(MPI_Datatype), INTENT(IN) :: oldtype + TYPE(MPI_Datatype), INTENT(OUT) :: newtype + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETERS +---------------- +* ``ndims``: Number of array dimensions (positive integer). +* ``array_of_sizes``: Number of elements of type *oldtype* in each dimension of the full array (array of positive integers). +* ``array_of_subsizes``: Number of elements of type *oldtype* in each dimension of the subarray (array of positive integers). +* ``array_of_starts``: Starting coordinates of the subarray in each dimension (array of nonnegative integers). +* ``order``: Array storage order flag (state). +* ``oldtype``: Array element data type (handle). + +OUTPUT PARAMETERS +----------------- +* ``newtype``: New data type (handle). +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +The subarray type constructor creates an MPI data type describing an +*n*-dimensional subarray of an *n*-dimensional array. The subarray may +be situated anywhere within the full array, and may be of any nonzero +size up to the size of the larger array as long as it is confined within +this array. This type constructor facilitates creating file types to +access arrays distributed in blocks among processes to a single file +that contains the global array. + +This type constructor can handle arrays with an arbitrary number of +dimensions and works for both C- and Fortran-ordered matrices (that is, +row-major or column-major). Note that a C program may use Fortran order +and a Fortran program may use C order. + +The *ndims* parameter specifies the number of dimensions in the full +data array and gives the number of elements in *array_of_sizes*, +*array_of_subsizes*, and *array_of_starts*. + +The number of elements of type *oldtype* in each dimension of the +*n*-dimensional array and the requested subarray are specified by +*array_of_sizes* and *array_of_subsizes*, respectively. For any +dimension *i*, it is erroneous to specify *array_of_subsizes[i]* < 1 or +*array_of_subsizes[i]* > *array of sizes[i]*. + +The *array_of_starts* contains the starting coordinates of each +dimension of the subarray. Arrays are assumed to be indexed starting +from zero. For any dimension *i*, it is erroneous to specify + +.. code:: + + array_of_starts[i] < 0 + +or + +.. code:: + + array_of_starts[i] > (array_of_sizes[i] - array_of_subsizes[i]). + +The *order* argument specifies the storage order for the subarray as +well as the full array. It must be set to one of the following: + +* ``MPI_ORDER_C``: The ordering used by C arrays, (that is, row-major order) + +* ``MPI_ORDER_FORTRAN``: The ordering used by Fortran arrays, (that is, column-major order) + +A *ndims*-dimensional subarray (*newtype*) with no extra padding can be +defined by the function Subarray() as follows: + +.. code:: + + newtype = Subarray(ndims, {size_0, size_1,..., size_ndims-1}, + {subsize_0, subsize_1, ..., subsize_ndims-1}, + {start_0, start_1, ..., start_ndims-1}, oldtype) + +Let the typemap of *oldtype* have the form: + +.. code:: + + {(type_0, disp_0), (type_1, disp_1), ..., (type_n-1, disp_n-1)} + +where type\ *i* is a predefined MPI data type, and let *ex* be the +extent of *oldtype*. + +The ``Subarray()`` function is defined recursively in three equations on +page 72 of the MPI-2 standard. + +For an example use of :ref:`MPI_Type_create_subarray` in the context +of I/O, see Section 9.9.2 of the MPI-2 standard. + + +NOTES +----- + +In a Fortran program with arrays indexed starting from 1, if the +starting coordinate of a particular dimension of the subarray is *n*, +then the entry in array of starts for that dimension is *n*-1. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler(3) `; the +predefined error handler MPI_ERRORS_RETURN may be used to cause error +values to be returned. Note that MPI does not guarantee that an MPI +program can continue past an error. diff --git a/docs/man-openmpi/man3/MPI_Type_delete_attr.3.rst b/docs/man-openmpi/man3/MPI_Type_delete_attr.3.rst new file mode 100644 index 00000000000..92478384ea0 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Type_delete_attr.3.rst @@ -0,0 +1,91 @@ +.. _mpi_type_delete_attr: + + +MPI_Type_delete_attr +==================== + +.. include_body + +:ref:`MPI_Type_delete_attr` - Deletes a datatype-caching attribute value +associated with a key. + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Type_delete_attr(MPI_Datatype type, int type_keyval) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_TYPE_DELETE_ATTR(TYPE, TYPE_KEYVAL, IERROR) + INTEGER TYPE, TYPE_KEYVAL, IERROR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Type_delete_attr(datatype, type_keyval, ierror) + TYPE(MPI_Datatype), INTENT(IN) :: datatype + INTEGER, INTENT(IN) :: type_keyval + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT/OUTPUT PARAMETER +---------------------- +* ``type``: Data type from which the attribute is deleted (handle).n + +INPUT PARAMETER +--------------- +* ``type_keyval``: Key value (integer). + +OUTPUT PARAMETER +---------------- +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +:ref:`MPI_Type_delete_attr` deletes a datatype-caching attribute value +associated with a key. This routines partially replaces :ref:`MPI_Attr_delete`, +which is now deprecated. + + +NOTES +----- + +Note that it is not defined by the MPI standard what happens if the +delete_fn callback invokes other MPI functions. In Open MPI, it is not +valid for delete_fn callbacks (or any of their children) to add or +delete attributes on the same object on which the delete_fn callback is +being invoked. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. diff --git a/docs/man-openmpi/man3/MPI_Type_dup.3.rst b/docs/man-openmpi/man3/MPI_Type_dup.3.rst new file mode 100644 index 00000000000..90943750d38 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Type_dup.3.rst @@ -0,0 +1,98 @@ +.. _mpi_type_dup: + + +MPI_Type_dup +============ + +.. include_body + +:ref:`MPI_Type_dup` - Duplicates a data type with associated key values. + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Type_dup(MPI_Datatype type, MPI_Datatype *newtype) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_TYPE_DUP(TYPE, NEWTYPE, IERROR) + INTEGER TYPE, NEWTYPE, IERROR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Type_dup(oldtype, newtype, ierror) + TYPE(MPI_Datatype), INTENT(IN) :: oldtype + TYPE(MPI_Datatype), INTENT(OUT) :: newtype + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETER +--------------- +* ``type``: Data type (handle). + +OUTPUT PARAMETERS +----------------- +* ``newtype``: Copy of *type* (handle). +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +:ref:`MPI_Type_dup` is a type constructor that duplicates the existing type +with associated key values. For each key value, the respective copy +callback function determines the attribute value associated with this +key in the new communicator. One particular action that a copy callback +may take is to delete the attribute from the new data type. Returns in +*newtype* a new data type with exactly the same properties as *type*, as +well as any copied cached information. The new data type has identical +upper bound and lower bound and yields the same net result when fully +decoded with the functions described in Section 8.6 of the MPI-2 +standard. *newtype* has the same committed state as the old *type*. + + +NOTES +----- + +Note that it is not defined by the MPI standard what happens if the +attribute copy callback invokes other MPI functions. In Open MPI, it is +not valid for attribute copy callbacks (or any of their children) to add +or delete attributes on the same object on which the attribute copy +callback is being invoked. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. + + +.. seealso:: + :ref:`MPI_Type_create_keyval` diff --git a/docs/man-openmpi/man3/MPI_Type_extent.3.rst b/docs/man-openmpi/man3/MPI_Type_extent.3.rst new file mode 100644 index 00000000000..9bcd35f6c9b --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Type_extent.3.rst @@ -0,0 +1,104 @@ +.. _mpi_type_extent: + + +MPI_Type_extent +=============== + +.. include_body + +:ref:`MPI_Type_extent` - Returns the extent of a data type, the difference +between the upper and lower bounds of the data type -- use of this +routine is deprecated. + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Type_extent(MPI_Datatype datatype, MPI_Aint *extent) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + INCLUDE 'mpif.h' + MPI_TYPE_EXTENT(DATATYPE, EXTENT, IERROR) + INTEGER DATATYPE, EXTENT, IERROR + + +INPUT PARAMETER +--------------- +* ``datatype``: Datatype (handle). + +OUTPUT PARAMETERS +----------------- +* ``extent``: Datatype extent (integer). +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +Note that use of this routine is *deprecated* as of MPI-2. Please use +:ref:`MPI_Type_get_extent` instead. + +:ref:`MPI_Type_extent` returns the extent of a data type, the difference +between the upper and lower bounds of the data type. + +In general, if + +:: + + Typemap = {(type(0), disp(0)), ..., (type(n-1), disp(n-1))} + +then the lower bound of Typemap is defined to be + +:: + + ( min(j) disp(j) if no entry has + lb(Typemap)=( basic type lb + (min(j) {disp(j) such that type(j) = lb} otherwise + +Similarly, the upper bound of Typemap is defined to be + +:: + + (max(j) disp(j) + sizeof(type(j)) + e if no entry has + ub(Typemap)=( basic type ub + (max(j) {disp(j) such that type(j) = ub} otherwise + +Then + +:: + + extent(Typemap) = ub(Typemap) - lb(Typemap) + +If type(i) requires alignment to a byte address that is a multiple of +k(i), then e is the least nonnegative increment needed to round +extent(Typemap) to the next multiple of max(i) k(i). + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. + + +.. seealso:: + :ref:`MPI_Type_get_extent` diff --git a/docs/man-openmpi/man3/MPI_Type_f2c.3.rst b/docs/man-openmpi/man3/MPI_Type_f2c.3.rst new file mode 100644 index 00000000000..796a918ee10 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Type_f2c.3.rst @@ -0,0 +1,9 @@ +.. _mpi_type_f2c: + +MPI_Type_f2c +============ + .. include_body + +.. include:: ../man3/MPI_Comm_f2c.3.rst + :start-after: .. include_body + diff --git a/docs/man-openmpi/man3/MPI_Type_free.3.rst b/docs/man-openmpi/man3/MPI_Type_free.3.rst new file mode 100644 index 00000000000..7b61951649b --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Type_free.3.rst @@ -0,0 +1,80 @@ +.. _mpi_type_free: + + +MPI_Type_free +============= + +.. include_body + +:ref:`MPI_Type_free` - Frees a data type. + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Type_free(MPI_Datatype *datatype) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_TYPE_FREE(DATATYPE, IERROR) + INTEGER DATATYPE, IERROR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Type_free(datatype, ierror) + TYPE(MPI_Datatype), INTENT(INOUT) :: datatype + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT/OUTPUT PARAMETER +---------------------- +* ``datatype``: Datatype that is freed (handle). + +OUTPUT PARAMETER +---------------- +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +Marks the datatype object associated with datatype for de-allocation and +sets datatype to MPI_DATATYPE_NULL. Any communication that is currently +using this datatype will complete normally. Derived datatypes that were +defined from the freed datatype are not affected. + +Freeing a datatype does not affect any other datatype that was built +from the freed datatype. The system behaves as if input datatype +arguments to derived datatype constructors are passed by value. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. diff --git a/docs/man-openmpi/man3/MPI_Type_free_keyval.3.rst b/docs/man-openmpi/man3/MPI_Type_free_keyval.3.rst new file mode 100644 index 00000000000..42d3cabe1cf --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Type_free_keyval.3.rst @@ -0,0 +1,75 @@ +.. _mpi_type_free_keyval: + + +MPI_Type_free_keyval +==================== + +.. include_body + +:ref:`MPI_Type_free_keyval` - Frees a previously created type key value. + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Type_free_keyval(int *type_keyval) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_TYPE_FREE_KEYVAL(TYPE_KEYVAL, IERROR) + INTEGER TYPE_KEYVAL, IERROR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Type_free_keyval(type_keyval, ierror) + INTEGER, INTENT(INOUT) :: type_keyval + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT/OUTPUT PARAMETER +---------------------- +* ``type_keyval``: Key value to free (integer). + +OUTPUT PARAMETER +---------------- +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. + + +.. seealso:: + :ref:`MPI_Type_create_keyval` diff --git a/docs/man-openmpi/man3/MPI_Type_get_attr.3.rst b/docs/man-openmpi/man3/MPI_Type_get_attr.3.rst new file mode 100644 index 00000000000..82c0fc3d858 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Type_get_attr.3.rst @@ -0,0 +1,102 @@ +.. _mpi_type_get_attr: + + +MPI_Type_get_attr +================= + +.. include_body + +:ref:`MPI_Type_get_attr` - Returns the attribute associated with a data +type. + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Type_get_attr(MPI_Datatype type, int type_keyval, void *attribute_val, int *flag) + + +Fortran Syntax (see FORTRAN 77 NOTES) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_TYPE_GET_ATTR(TYPE, TYPE_KEYVAL, ATTRIBUTE_VAL, FLAG, IERROR) + INTEGER TYPE, TYPE_KEYVAL, IERROR + INTEGER(KIND=MPI_ADDRESS_KIND) ATTRIBUTE_VAL + LOGICAL FLAG + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Type_get_attr(datatype, type_keyval, attribute_val, flag, ierror) + TYPE(MPI_Datatype), INTENT(IN) :: datatype + INTEGER, INTENT(IN) :: type_keyval + INTEGER(KIND=MPI_ADDRESS_KIND), INTENT(OUT) :: attribute_val + LOGICAL, INTENT(OUT) :: flag + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETERS +---------------- +* ``type``: Data type to which the attribute is attached (handle). +* ``type_keyval``: Key value (integer). + +OUTPUT PARAMETERS +----------------- +* ``attribute_val``: Attribute value, unless *flag* = false +* ``flag``: "false" if no attribute is associated with the key (logical). +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +For the given data type, :ref:`MPI_Type_get_attr` returns an attribute value +that corresponds to the specified key value. + + +FORTRAN 77 NOTES +---------------- + +The MPI standard prescribes portable Fortran syntax for the +*ATTRIBUTE_VAL* argument only for Fortran 90. Sun FORTRAN 77 users may +use the non-portable syntax + +:: + + INTEGER*MPI_ADDRESS_KIND ATTRIBUTE_VAL + +where MPI_ADDRESS_KIND is a constant defined in mpif.h and gives the +length of the declared integer in bytes. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. + + +.. seealso:: + :ref:`MPI_Type_set_attr` diff --git a/docs/man-openmpi/man3/MPI_Type_get_contents.3.rst b/docs/man-openmpi/man3/MPI_Type_get_contents.3.rst new file mode 100644 index 00000000000..b3cc50bde6b --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Type_get_contents.3.rst @@ -0,0 +1,154 @@ +.. _mpi_type_get_contents: + + +MPI_Type_get_contents +===================== + +.. include_body + +:ref:`MPI_Type_get_contents` - Returns information about arguments used in +creation of a data type. + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Type_get_contents(MPI_Datatype datatype, int max_integers, + int max_addresses, int max_datatypes, int array_of_integers[], MPI_Aint array_of_addresses[], MPI_Datatype array_of_datatypes[]) + + +Fortran Syntax (see FORTRAN 77 NOTES) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_TYPE_GET_CONTENTS(DATATYPE, MAX_INTEGERS, MAX_ADDRESSES, + MAX_DATATYPES, ARRAY_OF_INTEGERS, ARRAY_OF_ADDRESSES, + ARRAY_OF_DATATYPES, IERROR) + INTEGER DATATYPE, MAX_INTEGERS, MAX_ADDRESSES, MAX_DATATYPES + INTEGER ARRAY_OF_INTEGERS(*), ARRAY_OF_DATATYPES(*), IERROR + INTEGER(KIND=MPI_ADDRESS_KIND) ARRAY_OF_ADDRESSES(*) + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Type_get_contents(datatype, max_integers, max_addresses, max_datatypes, + array_of_integers, array_of_addresses, array_of_datatypes, + ierror) + TYPE(MPI_Datatype), INTENT(IN) :: datatype + INTEGER, INTENT(IN) :: max_integers, max_addresses, max_datatypes + INTEGER, INTENT(OUT) :: array_of_integers(max_integers) + INTEGER(KIND=MPI_ADDRESS_KIND), INTENT(OUT) :: + array_of_addresses(max_addresses) + TYPE(MPI_Datatype), INTENT(OUT) :: array_of_datatypes(max_datatypes) + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETERS +---------------- +* ``datatype``: Data type to access (handle). +* ``max_integers``: Number of elements in *array_of_integers (nonnegative integer).* +* ``max_addresses``: Number of elements in *array_of_addresses (nonnegative integer).* +* ``max_datatypes``: Number of elements in *array_of_datatypes (nonnegative integer).* + +OUTPUT PARAMETERS +----------------- +* ``array_of_integers``: Contains integer arguments used in constructing *datatype (array of integers).* +* ``array_of_addresses``: Contains address arguments used in constructing *datatype (array of integers).* +* ``array_of_datatypes``: Contains data-type arguments used in constructing *datatype (array of integers).* +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +For the given data type, :ref:`MPI_Type_get_envelope` returns information on +the number and type of input arguments used in the call that created the +data type. The number-of-arguments values returned can be used to +provide sufficiently large arrays in the decoding routine +:ref:`MPI_Type_get_contents`. This call and the meaning of the returned values +is described below. The combiner reflects the MPI data type constructor +call that was used in creating *datatype.* + +The parameter *datatype must be a predefined unnamed or a derived data +type. The call is erroneous if datatype is a predefined named data +type.* + +The values given for *max_integers, max_addresses, and max_datatypes +must be at least as large as the value returned in num_integers, +num_addresses, and num_datatypes, respectively, in the call +:ref:`MPI_Type_get_envelope` for the same datatype argument.* + +The data types returned in *array_of_datatypes are handles to data-type +objects that are equivalent to the data types used in the original +construction call. If these were derived data types, then the returned +data types are new data-type objects, and the user is responsible for +freeing these datatypes with :ref:`MPI_Type_free`. If these were predefined +data types, then the returned data type is equal to that (constant) +predefined data type and cannot be freed.* + +The committed state of returned derived data types is undefined, that +is, the data types may or may not be committed. Furthermore, the content +of attributes of returned data types is undefined. + +Note that :ref:`MPI_Type_get_contents` can be invoked with a data-type argument +that was constructed using :ref:`MPI_Type_create_f90_real`, +:ref:`MPI_Type_create_f90_integer`, or :ref:`MPI_Type_create_f90_complex` (an unnamed +predefined data type). In such a case, an empty *array_of_datatypes is +returned.* + +In the MPI-1 data-type constructor calls, the address arguments in +Fortran are of type INTEGER. In the new MPI-2 calls, the address +arguments are of type INTEGER(KIND=MPI_ADDRESS_KIND). The call +:ref:`MPI_Type_get_contents` returns all addresses in an argument of type +INTEGER(KIND=MPI_ADDRESS_KIND). This is true even if the old MPI-1 calls +were used. Thus, the location of values returned can be thought of as +being returned by the C bindings. It can also be determined by examining +the new MPI-2 calls for data-type constructors for the deprecated MPI-1 +calls that involve addresses. + + +FORTRAN 77 NOTES +---------------- + +The MPI standard prescribes portable Fortran syntax for the +*ARRAY_OF_ADDRESSES argument only for Fortran 90. FORTRAN 77* users may +use the non-portable syntax + +:: + + INTEGER*MPI_ADDRESS_KIND ARRAY_OF_ADDRESSES(*) + +where MPI_ADDRESS_KIND is a constant defined in mpif.h and gives the +length of the declared integer in bytes. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. + + +.. seealso:: + :ref:`MPI_Type_get_envelope` diff --git a/docs/man-openmpi/man3/MPI_Type_get_envelope.3.rst b/docs/man-openmpi/man3/MPI_Type_get_envelope.3.rst new file mode 100644 index 00000000000..f2fc6274efd --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Type_get_envelope.3.rst @@ -0,0 +1,130 @@ +.. _mpi_type_get_envelope: + + +MPI_Type_get_envelope +===================== + +.. include_body + +:ref:`MPI_Type_get_envelope` - Returns information about input arguments +associated with a data type. + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Type_get_envelope(MPI_Datatype datatype, int *num_integers, + int *num_addresses, int *num_datatypes, int *combiner) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_TYPE_GET_ENVELOPE(DATATYPE, NUM_INTEGERS, NUM_ADDRESSES, + NUM_DATATYPES, COMBINER, IERROR) + INTEGER DATATYPE, NUM_INTEGERS, NUM_ADDRESSES + INTEGER NUM_DATATYPES, COMBINER, IERROR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Type_get_envelope(datatype, num_integers, num_addresses, num_datatypes, + combiner, ierror) + TYPE(MPI_Datatype), INTENT(IN) :: datatype + INTEGER, INTENT(OUT) :: num_integers, num_addresses, num_datatypes, + combiner + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETER +--------------- +* ``datatype``: Data type to access (handle). + +OUTPUT PARAMETERS +----------------- +* ``num_integers``: Number of input integers used in the call constructing *combiner* (nonnegative integer). +* ``num_addresses``: Number of input addresses used in the call constructing *combiner* (nonnegative integer). +* ``num_datatypes``: Number of input data types used in the call constructing *combiner* (nonnegative integer). +* ``combiner``: Combiner (state). +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +For the given data type, :ref:`MPI_Type_get_envelope` returns information on +the number and type of input arguments used in the call that created the +data type. The number-of-arguments values returned can be used to +provide sufficiently large arrays in the decoding routine +:ref:`MPI_Type_get_contents`. This call and the meaning of the returned values +is described below. The combiner reflects the MPI data type constructor +call that was used in creating *datatype*. + + +NOTES +----- + +These are the values that can be returned in *combiner* and their +associated calls: + +:: + + Values Associated Calls + + MPI_COMBINER_NAMED a named predefined data type + MPI_COMBINER_DUP MPI_Type_dup + MPI_COMBINER_CONTIGUOUS MPI_Type_contiguous + MPI_COMBINER_VECTOR MPI_Type_vector + MPI_COMBINER_HVECTOR MPI_Type_hvector + and MPI_Type_create_hvector + MPI_COMBINER_INDEXED MPI_Type_indexed + MPI_COMBINER_HINDEXED MPI_Type_hindexed + and MPI_Type_create_hindexed + MPI_COMBINER_INDEXED_BLOCK MPI_Type_create_indexed_block + MPI_COMBINER_STRUCT MPI_Type_struct + and MPI_Type_create_struct + MPI_COMBINER_SUBARRAY MPI_Type_create_subarray + MPI_COMBINER_DARRAY MPI_Type_create_darray + MPI_COMBINER_F90_REAL MPI_Type_create_f90_real + MPI_COMBINER_F90_COMPLEX MPI_Type_create_f90_complex + MPI_COMBINER_F90_INTEGER MPI_Type_create_f90_integer + MPI_COMBINER_RESIZED MPI_Type_create_resized + +If *combiner* is MPI_COMBINER_NAMED, then *datatype* is a named +predefined data type. + +The actual arguments used in the creation call for a data type can be +obtained from the call :ref:`MPI_Type_get_contents`. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. + + +.. seealso:: + :ref:`MPI_Type_get_contents` diff --git a/docs/man-openmpi/man3/MPI_Type_get_extent.3.rst b/docs/man-openmpi/man3/MPI_Type_get_extent.3.rst new file mode 100644 index 00000000000..4de4e9ca4dd --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Type_get_extent.3.rst @@ -0,0 +1,126 @@ +.. _mpi_type_get_extent: + + +MPI_Type_get_extent +=================== + +.. include_body + +:ref:`MPI_Type_get_extent`, :ref:`MPI_Type_get_extent_x` - Returns the lower +bound and extent of a data type. + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Type_get_extent(MPI_Datatype datatype, MPI_Aint *lb, + MPI_Aint *extent) + int MPI_Type_get_extent_x(MPI_Datatype datatype, MPI_Count *lb, + MPI_Count *extent) + + +Fortran Syntax (see FORTRAN 77 NOTES) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_TYPE_GET_EXTENT(DATATYPE, LB, EXTENT, IERROR) + INTEGER DATATYPE, IERROR + INTEGER(KIND=MPI_ADDRESS_KIND) LB, EXTENT + MPI_TYPE_GET_EXTENT_X(DATATYPE, LB, EXTENT, IERROR) + INTEGER DATATYPE, IERROR + INTEGER(KIND=MPI_COUNT_KIND) LB, EXTENT + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Type_get_extent(datatype, lb, extent, ierror) + TYPE(MPI_Datatype), INTENT(IN) :: datatype + INTEGER(KIND=MPI_ADDRESS_KIND), INTENT(OUT) :: lb, extent + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + MPI_Type_get_extent_x(datatype, lb, extent, ierror) + TYPE(MPI_Datatype), INTENT(IN) :: datatype + INTEGER(KIND = MPI_COUNT_KIND), INTENT(OUT) :: lb, extent + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETER +--------------- +* ``datatype``: Data type (handle). + +OUTPUT PARAMETERS +----------------- +* ``lb``: Lower bound of data type (integer). +* ``extent``: Data type extent (integer). +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +:ref:`MPI_Type_get_extent` returns the lower bound and the extent of +*datatype*. For either function, if either the *lb* or *extent* +parameter cannot express the value to be returned (e.g., if the +parameter is too small to hold the output value), it is set to +MPI_UNDEFINED. + + +NOTE +---- + +Use of :ref:`MPI_Type_get_extent` is strongly recommended over the old MPI-1 +functions :ref:`MPI_Type_extent` and :ref:`MPI_Type_lb`. + + +FORTRAN 77 NOTES +---------------- + +The MPI standard prescribes portable Fortran syntax for the *LB* and +*EXTENT* arguments only for Fortran 90. FORTRAN 77 users may use the +non-portable syntax + +:ref:`MPI_Type_get_extent`: + +:: + + INTEGER*MPI_ADDRESS_KIND LB + or + INTEGER*MPI_ADDRESS_KIND EXTENT + +:ref:`MPI_Type_get_extent_x`: + +:: + + INTEGER*MPI_COUNT_KIND LB + or + INTEGER*MPI_COUNT_KIND EXTENT + +where MPI_ADDRESS_KIND and MPI_COUNT_KIND are constants defined in +mpif.h and give the length of the declared integer in bytes. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. diff --git a/docs/man-openmpi/man3/MPI_Type_get_extent_x.3.rst b/docs/man-openmpi/man3/MPI_Type_get_extent_x.3.rst new file mode 100644 index 00000000000..f211f351d2c --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Type_get_extent_x.3.rst @@ -0,0 +1,9 @@ +.. _mpi_type_get_extent_x: + +MPI_Type_get_extent_x +===================== + .. include_body + +.. include:: ../man3/MPI_Type_get_extent.3.rst + :start-after: .. include_body + diff --git a/docs/man-openmpi/man3/MPI_Type_get_name.3.rst b/docs/man-openmpi/man3/MPI_Type_get_name.3.rst new file mode 100644 index 00000000000..a2b5e0df194 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Type_get_name.3.rst @@ -0,0 +1,84 @@ +.. _mpi_type_get_name: + + +MPI_Type_get_name +================= + +.. include_body + +:ref:`MPI_Type_get_name` - Gets the name of a data type. + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Type_get_name(MPI_Datatype type, char *type_name, + int *resultlen) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_TYPE_GET_NAME(TYPE, TYPE_NAME, RESULTLEN, IERROR) + INTEGER TYPE, RESULTLEN, IERROR + CHARACTER*(*) TYPE_NAME + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Type_get_name(datatype, type_name, resultlen, ierror) + TYPE(MPI_Datatype), INTENT(IN) :: datatype + CHARACTER(LEN=MPI_MAX_OBJECT_NAME), INTENT(OUT) :: type_name + INTEGER, INTENT(OUT) :: resultlen + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETER +--------------- +* ``type``: Data type whose name is to be returned (handle). + +OUTPUT PARAMETERS +----------------- +* ``type_name``: The name previously stored on the data type, or an empty string if not such name exists (string). +* ``resultlen``: Length of returned name (integer). +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +:ref:`MPI_Type_get_name` returns the printable identifier associated with an +MPI data type. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. + + +.. seealso:: + :ref:`MPI_Type_set_name` diff --git a/docs/man-openmpi/man3/MPI_Type_get_true_extent.3.rst b/docs/man-openmpi/man3/MPI_Type_get_true_extent.3.rst new file mode 100644 index 00000000000..a5782a04742 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Type_get_true_extent.3.rst @@ -0,0 +1,130 @@ +.. _mpi_type_get_true_extent: + + +MPI_Type_get_true_extent +======================== + +.. include_body + +:ref:`MPI_Type_get_true_extent`, :ref:`MPI_Type_get_true_extent_x` - Returns +the true lower bound and extent of a data type's corresponding typemap, +ignoring MPI_UB and MPI_LB markers. + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Type_get_true_extent(MPI_Datatype datatype, + MPI_Aint *true_lb, MPI_Aint *true_extent) + int MPI_Type_get_true_extent_x(MPI_Datatype datatype, + MPI_Count *true_lb, MPI_Count *true_extent) + + +Fortran Syntax (see FORTRAN 77 NOTES) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_TYPE_GET_TRUE_EXTENT(DATATYPE, TRUE_LB, TRUE_EXTENT, IERROR) + INTEGER DATATYPE, IERROR + INTEGER(KIND=MPI_ADDRESS_KIND) TRUE_LB, TRUE_EXTENT + MPI_TYPE_GET_TRUE_EXTENT_X(DATATYPE, TRUE_LB, TRUE_EXTENT, IERROR) + INTEGER DATATYPE, IERROR + INTEGER(KIND=MPI_COUNT_KIND) TRUE_LB, TRUE_EXTENT + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Type_get_true_extent(datatype, true_lb, true_extent, ierror) + TYPE(MPI_Datatype), INTENT(IN) :: datatype + INTEGER(KIND=MPI_ADDRESS_KIND), INTENT(OUT) :: true_lb, true_extent + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + MPI_Type_get_true_extent_x(datatype, true_lb, true_extent, ierror) + TYPE(MPI_Datatype), INTENT(IN) :: datatype + INTEGER(KIND = MPI_COUNT_KIND), INTENT(OUT) :: true_lb, true_extent + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETER +--------------- +* ``datatype``: Data type for which information is wanted (handle). + +OUTPUT PARAMETERS +----------------- +* ``true_lb``: True lower bound of data type (integer). +* ``true_extent``: True size of data type (integer). +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +The *true_lb* parameter returns the offset of the lowest unit of store +that is addressed by the data type, that is, the lower bound of the +corresponding typemap, ignoring MPI_LB markers. The *true_extent* +parameter returns the true size of the data type, that is, the extent of +the corresponding typemap, ignoring MPI_LB and MPI_UB markers, and +performing no rounding for alignment. For both functions, if either the +*true_lb* or *true_extent* parameter cannot express the value to be +returned (e.g., if the parameter is too small to hold the output value), +it is set to MPI_UNDEFINED. + +The *true_extent* is the minimum number of bytes of memory necessary to +hold a data type, uncompressed. + +See section 4.1.8 of the MPI-3 standard for more detailed definitions of these +parameters in relation to the typemap. + + +FORTRAN 77 NOTES +---------------- + +The MPI standard prescribes portable Fortran syntax for the *TRUE_LB* +and *TRUE_EXTENT* arguments only for Fortran 90. FORTRAN 77 users may +use the non-portable syntax + +:ref:`MPI_Type_get_true_extent`: + +:: + + INTEGER*MPI_ADDRESS_KIND TRUE_LB + or + INTEGER*MPI_ADDRESS_KIND TRUE_EXTENT + +:ref:`MPI_Type_get_true_extent_x`: + +:: + + INTEGER*MPI_COUNT_KIND TRUE_LB + or + INTEGER*MPI_COUNT_KIND TRUE_EXTENT + +where MPI_ADDRESS_KIND and MPI_COUNT_KIND are constants defined in +mpif.h and give the length of the declared integer in bytes. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. diff --git a/docs/man-openmpi/man3/MPI_Type_get_true_extent_x.3.rst b/docs/man-openmpi/man3/MPI_Type_get_true_extent_x.3.rst new file mode 100644 index 00000000000..6a0e15e073e --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Type_get_true_extent_x.3.rst @@ -0,0 +1,9 @@ +.. _mpi_type_get_true_extent_x: + +MPI_Type_get_true_extent_x +========================== + .. include_body + +.. include:: ../man3/MPI_Type_get_true_extent.3.rst + :start-after: .. include_body + diff --git a/docs/man-openmpi/man3/MPI_Type_hindexed.3.rst b/docs/man-openmpi/man3/MPI_Type_hindexed.3.rst new file mode 100644 index 00000000000..f9f2c5aaf09 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Type_hindexed.3.rst @@ -0,0 +1,101 @@ +.. _mpi_type_hindexed: + + +MPI_Type_hindexed +================= + +.. include_body + +:ref:`MPI_Type_hindexed` - Creates an indexed datatype with offsets in +bytes -- use of this routine is deprecated. + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Type_hindexed(int count, int *array_of_blocklengths, + MPI_Aint *array_of_displacements, MPI_Datatype oldtype, + MPI_Datatype *newtype) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + INCLUDE 'mpif.h' + MPI_TYPE_HINDEXED(COUNT, ARRAY_OF_BLOCKLENGTHS, + ARRAY_OF_DISPLACEMENTS, OLDTYPE, NEWTYPE, IERROR) + INTEGER COUNT, ARRAY_OF_BLOCKLENGTHS(*) + INTEGER ARRAY_OF_DISPLACEMENTS(*), OLDTYPE, NEWTYPE + INTEGER IERROR + + +INPUT PARAMETERS +---------------- +* ``count``: Number of blocks -- also number of entries in array_of_displacements and array_of_blocklengths (integer). +* ``array_of_blocklengths``: Number of elements in each block (array of nonnegative integers). +* ``array_of_displacements``: Byte displacement of each block (C: array of *MPI_Aint*, Fortran: array of integer). +* ``oldtype``: Old datatype (handle). + +OUTPUT PARAMETERS +----------------- +* ``newtype``: New datatype (handle). +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +Note that use of this routine is *deprecated* as of MPI-2. Use +:ref:`MPI_Type_create_hindexed` instead. + +The function is identical to :ref:`MPI_Type_indexed`, except that block +displacements in array_of_displacements are specified in bytes, rather +than in multiples of the oldtype extent. + +Assume that oldtype has type map + +:: + + {(type(0), disp(0)), ..., (type(n-1), disp(n-1))}, + +with extent ex. Let B be the array_of_blocklength argument and D be the +array_of_displacements argument. The newly created datatype has + +:: + + n x S^count-1 + (i=0) B[i] entries: + + {(type(0), disp(0) + D[0]),...,(type(n-1), disp(n-1) + D[0]),..., + (type(0), disp(0) + (D[0] + B[0]-1)* ex),..., + type(n-1), disp(n-1) + (D[0]+ B[0]-1)* ex),..., + (type(0), disp(0) + D[count-1]),...,(type(n-1), disp(n-1) + D[count-1]),..., + (type(0), disp(0) + D[count-1] + (B[count-1] -1)* ex),..., + (type(n-1), disp(n-1) + D[count-1] + (B[count-1] -1)* ex)} + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. + + +.. seealso:: + :ref:`MPI_Type_create_hindexed` :ref:`MPI_Type_indexed` diff --git a/docs/man-openmpi/man3/MPI_Type_hvector.3.rst b/docs/man-openmpi/man3/MPI_Type_hvector.3.rst new file mode 100644 index 00000000000..a4ca604223b --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Type_hvector.3.rst @@ -0,0 +1,102 @@ +.. _mpi_type_hvector: + + +MPI_Type_hvector +================ + +.. include_body + +:ref:`MPI_Type_hvector` - Creates a vector (strided) datatype with offset +in bytes -- use of this routine is deprecated. + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Type_hvector(int count, int blocklength, MPI_Aint stride, + MPI_Datatype oldtype, MPI_Datatype *newtype) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + INCLUDE 'mpif.h' + MPI_TYPE_HVECTOR(COUNT, BLOCKLENGTH, STRIDE, OLDTYPE, NEWTYPE, + IERROR) + INTEGER COUNT, BLOCKLENGTH, STRIDE, OLDTYPE + INTEGER NEWTYPE, IERROR + + +INPUT PARAMETERS +---------------- +* ``count``: Number of blocks (nonnegative integer). +* ``blocklength``: Number of elements in each block (nonnegative integer). +* ``stride``: Number of bytes between start of each block (integer). +* ``oldtype``: Old datatype (handle). + +OUTPUT PARAMETERS +----------------- +* ``newtype``: New datatype (handle). +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +Note that use of this routine is *deprecated* as of MPI-2. Use +:ref:`MPI_Type_create_hvector` instead. + +The function :ref:`MPI_Type_hvector` is identical to :ref:`MPI_Type_vector`, except +that stride is given in bytes, rather than in elements. The use for both +types of vector constructors is illustrated in the examples in Section +3.12.7 of the MPI-1 Standard. + +Assume that oldtype has type map + +:: + + {(type(0), disp(0)), ..., (type(n-1), disp(n-1))} + +with extent ex. Let bl be the blocklength. The newly created datatype +has a type map with count \* bl \* n entries: + +:: + + {(type(0), disp(0)), ..., (type(n-1), disp(n-1)), + (type(0), disp(0) + ex), ..., (type(n-1), disp(n-1) + ex), + ..., (type(0), disp(0) + (bl -1) * ex),...,(type(n-1), + disp(n-1) + (bl -1) * ex), (type(0), disp(0) + stride), + ...,(type(n-1), disp(n-1) + stride), ..., (type(0), + disp(0) + stride + (bl - 1) * ex), ..., (type(n-1), + disp(n-1) + stride + (bl -1) * ex), ..., (type(0), + disp(0) + stride * (count -1)), ...,(type(n-1), + disp(n-1) + stride * (count -1)), ..., (type(0), + disp(0) + stride * (count -1) + (bl -1) * ex), ..., + (type(n-1), disp(n-1) + stride * (count -1) + (bl -1) * ex)} + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. + + +.. seealso:: + :ref:`MPI_Type_create_hvector` :ref:`MPI_Type_vector` diff --git a/docs/man-openmpi/man3/MPI_Type_indexed.3.rst b/docs/man-openmpi/man3/MPI_Type_indexed.3.rst new file mode 100644 index 00000000000..1604d186022 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Type_indexed.3.rst @@ -0,0 +1,169 @@ +.. _mpi_type_indexed: + + +MPI_Type_indexed +================ + +.. include_body + +:ref:`MPI_Type_indexed`, :ref:`MPI_Type_create_hindexed` - Creates an indexed +datatype. + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Type_indexed(int count, const int array_of_blocklengths[], + const int array_of_displacements[], MPI_Datatype oldtype, + MPI_Datatype *newtype) + + int MPI_Type_create_hindexed(int count, + const int array_of_blocklengths[], + const MPI_Aint array_of_displacements[], MPI_Datatype oldtype, + MPI_Datatype *newtype) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_TYPE_INDEXED(COUNT, ARRAY_OF_BLOCKLENGTHS, + ARRAY_OF_DISPLACEMENTS, OLDTYPE, NEWTYPE, IERROR) + INTEGER COUNT, ARRAY_OF_BLOCKLENGTHS(*) + INTEGER ARRAY_OF_DISPLACEMENTS(*), OLDTYPE, NEWTYPE + INTEGER IERROR + + MPI_TYPE_CREATE_HINDEXED(COUNT, ARRAY_OF_BLOCKLENGTHS, + ARRAY_OF_DISPLACEMENTS, OLDTYPE, NEWTYPE, IERROR) + INTEGER COUNT, ARRAY_OF_BLOCKLENGTHS(*) + INTEGER OLDTYPE, NEWTYPE + INTEGER(KIND=MPI_ADDRESS_KIND) ARRAY_OF_DISPLACEMENTS(*) + INTEGER IERROR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Type_indexed(count, array_of_blocklengths, array_of_displacements, + oldtype, newtype, ierror) + INTEGER, INTENT(IN) :: count, array_of_blocklengths(count), + array_of_displacements(count) + TYPE(MPI_Datatype), INTENT(IN) :: oldtype + TYPE(MPI_Datatype), INTENT(OUT) :: newtype + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + MPI_Type_create_hindexed(count, array_of_blocklengths, + array_of_displacements, oldtype, newtype, ierror) + INTEGER, INTENT(IN) :: count, array_of_blocklengths(count) + INTEGER(KIND=MPI_ADDRESS_KIND), INTENT(IN) :: + array_of_displacements(count) + TYPE(MPI_Datatype), INTENT(IN) :: oldtype + TYPE(MPI_Datatype), INTENT(OUT) :: newtype + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETERS +---------------- +* ``count``: Number of blocks -- also number of entries in array_of_displacements and array_of_blocklengths (nonnegative integer). +* ``array_of_blocklengths``: Number of elements per block (array of nonnegative integers). +* ``array_of_displacements``: Displacement for each block, in multiples of oldtype extent for MPI_Type_indexed and bytes for MPI_Type_create_hindexed (array of integer for **MPI_TYPE_INDEXED**, array of *MPI_Aint* for **MPI_TYPE_CREATE_HINDEXED**). +* ``oldtype``: Old datatype (handle). + +OUTPUT PARAMETERS +----------------- +* ``newtype``: New datatype (handle). +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +The function :ref:`MPI_Type_indexed` allows replication of an old datatype into +a sequence of blocks (each block is a concatenation of the old +datatype), where each block can contain a different number of copies and +have a different displacement. All block displacements are multiples of +the old data type's extent. + +**Example:** Let oldtype have type map {(double, 0), (char, 8)}, with +extent 16. Let B = (3, 1) and let D = (4, 0). A call to +MPI_Type_indexed(2, B, D, oldtype, newtype) returns a datatype with type +map + +:: + + {(double, 64), (char, 72), (double, 80), (char, 88), + (double, 96), (char, 104), + (double, 0), (char, 8)} + +That is, three copies of the old type starting at displacement 4 x 16 = +64, and one copy starting at displacement 0. + +In general, assume that oldtype has type map + +:: + + {(type(0), disp(0)), ..., (type(n-1), disp(n-1))}, + +| with extent ex. Let B be the array_of_blocklength argument and D be + the array_of_displacements argument. The newly created datatype has + +:: + + n x S ^count-1 + i = 0 B[i] entries: + + {(type(0), disp(0) + D[0]* ex), ..., + (type(n-1), disp(n-1) + D[0]* ex), ..., + (type(0), disp(0) + (D[0] + B[0]-1)* ex), ..., + (type(n-1), disp(n-1) + (D[0]+ B[0]-1)* ex), ..., + (type(0), disp(0) + D[count-1]* ex), ..., + (type(n-1), disp(n-1) + D[count-1]* ex), ..., + (type(0), disp(0) + (D[count-1] + B[count-1] -1)* ex), ..., + (type(n-1), disp(n-1) + (D[count-1] + B[count-1] -1)* ex)} + +A call to MPI_Type_vector(count, blocklength, stride, oldtype, newtype) +is equivalent to a call to MPI_Type_indexed(count, B, D, oldtype, +newtype) where + +:: + + D[j] = j * stride, j = 0,..., count-1 + + and + + B[j] = blocklength, j = 0, .., count-1 + +The function :ref:`MPI_Type_create_hindexed` is identical to :ref:`MPI_Type_indexed`, +except that block displacements in *array_of_displacements* are +specified in bytes, rather than in multiples of the *oldtype* extent. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. + + +.. seealso:: + :ref:`MPI_Type_hindexed` diff --git a/docs/man-openmpi/man3/MPI_Type_lb.3.rst b/docs/man-openmpi/man3/MPI_Type_lb.3.rst new file mode 100644 index 00000000000..22fd099638c --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Type_lb.3.rst @@ -0,0 +1,111 @@ +.. _mpi_type_lb: + + +MPI_Type_lb +=========== + +.. include_body + +:ref:`MPI_Type_lb` - Returns the lower bound of a data type -- use of this +routine is deprecated. + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Type_lb(MPI_Datatype datatype, MPI_Aint *displacement) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + INCLUDE 'mpif.h' + MPI_TYPE_LB(DATATYPE, DISPLACEMENT, IERROR) + INTEGER DATATYPE, DISPLACEMENT, IERROR + + +INPUT PARAMETER +--------------- +* ``datatype``: Datatype (handle). + +OUTPUT PARAMETERS +----------------- +* ``displacement``: Displacement of lower bound from origin, in bytes (integer). +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +Note that use of this routine is *deprecated* as of MPI-2. Please use +:ref:`MPI_Type_get_extent` instead. + +:ref:`MPI_Type_lb` returns the lower bound of a data type. This may differ from +zero if the type was constructed using MPI_LB. + +The "pseudo-datatypes," MPI_LB and MPI_UB, can be used, respectively, to +mark the lower bound (or the upper bound) of a datatype. These +pseudo-datatypes occupy no space (extent (MPI_LB) = extent (MPI_UB) =0. +They do not affect the size or count of a datatype, and do not affect +the context of a message created with this datatype. However, they do +affect the definition of the extent of a datatype and, therefore, affect +the outcome of a replication of this datatype by a datatype constructor. + +In general, if + +:: + + Typemap = {(type0, disp0), ..., (type(n-1), disp(n-1)} + +then the lower bound of Typemap is defined to be + +:: + + + (min(j) disp(j) if no entry has + lb(Typemap) = ( basic type lb + (min(j) {disp(j) such that type(j) = lb} otherwise + +Similarly, the upper bound of Typemap is defined to be + +:: + + + (max(j) disp(j) + sizeof((type(j)) + e if no entry has + ub(Typemap) = ( basic type ub + (max(j) {disp(j) such that type(j) = ub} otherwise + + Then + + extent(Typemap) = ub(Typemap) - lb(Typemap) + +If type(i) requires alignment to a byte address that is a multiple of +k(i), then e is the least nonnegative increment needed to round +extent(Typemap) to the next multiple of max(i) k(i). + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. + + +.. seealso:: + :ref:`MPI_Type_get_extent` diff --git a/docs/man-openmpi/man3/MPI_Type_match_size.3.rst b/docs/man-openmpi/man3/MPI_Type_match_size.3.rst new file mode 100644 index 00000000000..3c399ba0a7f --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Type_match_size.3.rst @@ -0,0 +1,97 @@ +.. _mpi_type_match_size: + + +MPI_Type_match_size +=================== + +.. include_body + +:ref:`MPI_Type_match_size` - Returns an MPI datatype of a given type and +size + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Type_match_size(int typeclass, int size, + MPI_Datatype *type) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_TYPE_MATCH_SIZE(TYPECLASS, SIZE, TYPE, IERROR) + INTEGER TYPECLASS, SIZE, TYPE, IERROR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Type_match_size(typeclass, size, datatype, ierror) + INTEGER, INTENT(IN) :: typeclass, size + TYPE(MPI_Datatype), INTENT(OUT) :: datatype + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETERS +---------------- +* ``typeclass``: Generic type specifier (integer). +* ``size``: Size, in bytes, of representation (integer). + +OUTPUT PARAMETERS +----------------- +* ``type``: Datatype with correct type and size (handle). +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +The function returns an MPI datatype matching a local variable of type +(*typeclass*, *size*). The returned type is a reference (handle) to a +predefined named datatype, not a duplicate. This type cannot be freed. + +The value of *typeclass* may be set to one of MPI_TYPECLASS_REAL, +MPI_TYPECLASS_INTEGER, or MPI_TYPECLASS_COMPLEX, corresponding to the +desired datatype. + +MPI_type_match_size can be used to obtain a size-specific type that +matches a Fortran numeric intrinsic type: first call :ref:`MPI_Sizeof` to +compute the variable size, then call :ref:`MPI_Type_match_size` to find a +suitable datatype. In C use the sizeof builtin instead of :ref:`MPI_Sizeof`. + +It is erroneous to specify a size not supported by the compiler. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. + +See the MPI man page for a full list of MPI error codes. + + +.. seealso:: + :ref:`MPI_Sizeof` :ref:`MPI_Type_get_extent` diff --git a/docs/man-openmpi/man3/MPI_Type_set_attr.3.rst b/docs/man-openmpi/man3/MPI_Type_set_attr.3.rst new file mode 100644 index 00000000000..53093184021 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Type_set_attr.3.rst @@ -0,0 +1,102 @@ +.. _mpi_type_set_attr: + + +MPI_Type_set_attr +================= + +.. include_body + +:ref:`MPI_Type_set_attr` - Sets a key value/attribute pair to a data type. + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Type_set_attr(MPI_Datatype type, int type_keyval, + void *attribute_val) + + +Fortran Syntax (see FORTRAN 77 NOTES) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_TYPE_SET_ATTR(TYPE, TYPE_KEYVAL, ATTRIBUTE_VAL, IERROR) + INTEGER TYPE, TYPE_KEYVAL, IERROR + INTEGER(KIND=MPI_ADDRESS_KIND) ATTRIBUTE_VAL + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Type_set_attr(datatype, type_keyval, attribute_val, ierror) + TYPE(MPI_Datatype), INTENT(IN) :: datatype + INTEGER, INTENT(IN) :: type_keyval + INTEGER(KIND=MPI_ADDRESS_KIND), INTENT(IN) :: attribute_val + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT/OUTPUT PARAMETER +---------------------- +* ``type``: Data type to which attribute will be attached (handle). + +INPUT PARAMETERS +---------------- +* ``type_keyval``: Key value (integer). +* ``attribute_val``: Attribute value. + +OUTPUT PARAMETER +---------------- +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +For the given data type, :ref:`MPI_Type_set_attr` sets the key value to the +value of the specified attribute. + + +FORTRAN 77 NOTES +---------------- + +The MPI standard prescribes portable Fortran syntax for the +*ATTRIBUTE_VAL* argument only for Fortran 90. FORTRAN 77 users may use +the non-portable syntax + +:: + + INTEGER*MPI_ADDRESS_KIND ATTRIBUTE_VAL + +where MPI_ADDRESS_KIND is a constant defined in mpif.h and gives the +length of the declared integer in bytes. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. + + +.. seealso:: + :ref:`MPI_Type_get_attr` diff --git a/docs/man-openmpi/man3/MPI_Type_set_name.3.rst b/docs/man-openmpi/man3/MPI_Type_set_name.3.rst new file mode 100644 index 00000000000..3fb01a6f149 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Type_set_name.3.rst @@ -0,0 +1,84 @@ +.. _mpi_type_set_name: + + +MPI_Type_set_name +================= + +.. include_body + +:ref:`MPI_Type_set_name` - Sets the name of a data type. + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Type_set_name(MPI_Datatype type, const char *type_name) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_TYPE_SET_NAME(TYPE, TYPE_NAME, IERROR) + INTEGER TYPE, IERROR + CHARACTER*(*) TYPE_NAME + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Type_set_name(datatype, type_name, ierror) + TYPE(MPI_Datatype), INTENT(IN) :: datatype + CHARACTER(LEN=*), INTENT(IN) :: type_name + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT/OUTPUT PARAMETER +---------------------- +* ``type``: Data type for which the identifier is to be set (handle). + +INPUT PARAMETER +--------------- +* ``type_name``: The character string remembered as the name (string). + +OUTPUT PARAMETER +---------------- +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +:ref:`MPI_Type_set_name` associates a printable identifier with an MPI data +type. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. + + +.. seealso:: + :ref:`MPI_Type_get_name` diff --git a/docs/man-openmpi/man3/MPI_Type_size.3.rst b/docs/man-openmpi/man3/MPI_Type_size.3.rst new file mode 100644 index 00000000000..b54fb8452c5 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Type_size.3.rst @@ -0,0 +1,106 @@ +.. _mpi_type_size: + + +MPI_Type_size +============= + +.. include_body + +:ref:`MPI_Type_size`, :ref:`MPI_Type_size_x` - Returns the number of bytes +occupied by entries in a data type. + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Type_size(MPI_Datatype datatype, int *size) + int MPI_Type_size_x(MPI_Datatype datatype, MPI_Count *size) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_TYPE_SIZE(DATATYPE, SIZE, IERROR) + INTEGER DATATYPE, SIZE, IERROR + MPI_TYPE_SIZE_X(DATATYPE, SIZE, IERROR) + INTEGER DATATYPE + INTEGER(KIND=MPI_COUNT_KIND) SIZE + INTEGER IERROR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Type_size(datatype, size, ierror) + TYPE(MPI_Datatype), INTENT(IN) :: datatype + INTEGER, INTENT(OUT) :: size + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + MPI_Type_size_x(datatype, size, ierror) + TYPE(MPI_Datatype), INTENT(IN) :: datatype + INTEGER(KIND=MPI_COUNT_KIND), INTENT(OUT) :: size + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETER +--------------- +* ``datatype``: Datatype (handle). + +OUTPUT PARAMETERS +----------------- +* ``size``: Datatype size (integer). +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +:ref:`MPI_Type_size` returns the total size, in bytes, of the entries in the +type signature associated with datatype; i.e., the total size of the +data in a message that would be created with this datatype. Entries that +occur multiple times in the datatype are counted with their +multiplicity. For either function, if the *size* parameter cannot +express the value to be returned (e.g., if the parameter is too small to +hold the output value), it is set to MPI_UNDEFINED. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. + + +FORTRAN 77 NOTES +---------------- + +The MPI standard prescribes portable Fortran syntax for the *SIZE* +argument of :ref:`MPI_Type_size_x` only for Fortran 90. FORTRAN 77 users may +use the non-portable syntax + +:: + + INTEGER*MPI_COUNT_KIND SIZE + +where MPI_COUNT_KIND is a constant defined in mpif.h and gives the +length of the declared integer in bytes. diff --git a/docs/man-openmpi/man3/MPI_Type_size_x.3.rst b/docs/man-openmpi/man3/MPI_Type_size_x.3.rst new file mode 100644 index 00000000000..d942f7871ab --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Type_size_x.3.rst @@ -0,0 +1,9 @@ +.. _mpi_type_size_x: + +MPI_Type_size_x +=============== + .. include_body + +.. include:: ../man3/MPI_Type_size.3.rst + :start-after: .. include_body + diff --git a/docs/man-openmpi/man3/MPI_Type_struct.3.rst b/docs/man-openmpi/man3/MPI_Type_struct.3.rst new file mode 100644 index 00000000000..9420ff45fcf --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Type_struct.3.rst @@ -0,0 +1,143 @@ +.. _mpi_type_struct: + + +MPI_Type_struct +=============== + +.. include_body + +:ref:`MPI_Type_struct` - Creates a *struct* data type -- use of this +routine is deprecated. + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Type_struct(int count, int *array_of_blocklengths, + MPI_Aint *array_of_displacements, MPI_Datatype *array_of_types, + MPI_Datatype *newtype) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + INCLUDE 'mpif.h' + MPI_TYPE_STRUCT(COUNT, ARRAY_OF_BLOCKLENGTHS, + ARRAY_OF_DISPLACEMENTS, ARRAY_OF_TYPES, + NEWTYPE, IERROR) + INTEGER COUNT, ARRAY_OF_BLOCKLENGTHS(*) + INTEGER ARRAY_OF_DISPLACEMENTS(*) + INTEGER ARRAY_OF_TYPES(*), NEWTYPE, IERROR + + +INPUT PARAMETERS +---------------- +* ``count``: Number of blocks (integer) also number of entries in arrays array_of_types, array_of_displacements, and array_of_blocklengths. +* ``array_of_blocklengths``: Number of elements in each block (array). +* ``array_of_displacements``: Byte displacement of each block (array). +* ``array_of_types``: Type of elements in each block (array of handles to datatype objects). + +OUTPUT PARAMETERS +----------------- +* ``newtype``: New datatype (handle). +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +Note that use of this routine is *deprecated* as of MPI-2. Use +:ref:`MPI_Type_create_struct` instead. + +:ref:`MPI_Type_struct` is the most general type constructor. It further +generalizes :ref:`MPI_Type_hindexed` in that it allows each block to consist of +replications of different datatypes. + +**Example:** Let type1 have type map + +:: + + + {(double, 0), (char, 8)} + +with extent 16. Let B = (2, 1, 3), D = (0, 16, 26), and T = (MPI_FLOAT, +type1, MPI_CHAR). Then a call to MPI_Type_struct(3, B, D, T, newtype) +returns a datatype with type map + +:: + + + {(float, 0), (float,4), (double, 16), (char, 24), + (char, 26), (char, 27), (char, 28)} + +That is, two copies of MPI_FLOAT starting at 0, followed by one copy of +type1 starting at 16, followed by three copies of MPI_CHAR, starting at +26. (We assume that a float occupies 4 bytes.) + +For more information, see section 3.12.1 of the MPI-1.1 Standard. + + +NOTES +----- + +If an upperbound is set explicitly by using the MPI datatype MPI_UB, the +corresponding index must be positive. + +The MPI-1 Standard originally made vague statements about padding and +alignment; this was intended to allow the simple definition of +structures that could be sent with a count greater than one. For +example, + +:: + + struct {int a; char b;} foo; + +may have + +:: + + sizeof(foo) = sizeof(int) + sizeof(char); + +defining the extent of a datatype as including an epsilon, which would +have allowed an implementation to make the extent an MPI datatype for +this structure equal to 2*sizeof(int). However, since different systems +might define different paddings, a clarification to the standard made +epsilon zero. Thus, if you define a structure datatype and wish to send +or receive multiple items, you should explicitly include an MPI_UB entry +as the last member of the structure. For example, the following code can +be used for the structure foo: + +:: + + + blen[0] = 1; indices[0] = 0; oldtypes[0] = MPI_INT; + blen[1] = 1; indices[1] = &foo.b - &foo; oldtypes[1] = MPI_CHAR; + blen[2] = 1; indices[2] = sizeof(foo); oldtypes[2] = MPI_UB; + MPI_Type_struct( 3, blen, indices, oldtypes, &newtype ); + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. + + +.. seealso:: + :ref:`MPI_Type_create_struct` :ref:`MPI_Type_create_hindexed` diff --git a/docs/man-openmpi/man3/MPI_Type_ub.3.rst b/docs/man-openmpi/man3/MPI_Type_ub.3.rst new file mode 100644 index 00000000000..5c20cba388d --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Type_ub.3.rst @@ -0,0 +1,116 @@ +.. _mpi_type_ub: + + +MPI_Type_ub +=========== + +.. include_body + +:ref:`MPI_Type_ub` - Returns the upper bound of a datatype -- use of this +routine is deprecated. + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Type_ub(MPI_Datatype datatype, MPI_Aint *displacement) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + INCLUDE 'mpif.h' + MPI_TYPE_UB(DATATYPE, DISPLACEMENT, IERROR) + INTEGER DATATYPE, DISPLACEMENT, IERROR + + +INPUT PARAMETER +--------------- +* ``datatype``: Datatype (handle). + +OUTPUT PARAMETERS +----------------- +* ``displacement``: Displacement of upper bound from origin, in bytes (integer). +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +Note that use of this routine is *deprecated* as of MPI-2. Please use +:ref:`MPI_Type_get_extent` instead. + +:ref:`MPI_Type_ub` returns the upper bound of a data type. This will differ +from zero if the type was constructed using MPI_UB. The upper bound will +take into account any alignment considerations. + +The "pseudo-datatypes," MPI_LB and MPI_UB, can be used, respectively, to +mark the upper bound (or the lower bound) of a datatype. These +pseudo-datatypes occupy no space (extent (MPI_LB) = extent (MPI_UB) =0. +They do not affect the size or count of a datatype, and do not affect +the context of a message created with this datatype. However, they do +affect the definition of the extent of a datatype and, therefore, affect +the outcome of a replication of this datatype by a datatype constructor. + +In general, if + +:: + + + Typemap = {(type(0), disp(0)), ..., (type(n-1), disp(n-1))} + +then the lower bound of Typemap is defined to be + +:: + + + (min(j) disp(j) if no entry has + lb(Typemap) = ( basic type lb + (min(j) {disp(j) such that type(j) = lb} otherwise + +Similarly, the upper bound of Typemap is defined to be + +:: + + + (max(j) disp(j) + sizeof(type(j) = lb} if no entry has + ub(Typemap) = ( basic type ub + (max(j) {disp(j) such that type(j) = ub} otherwise + +Then + +:: + + + extent(Typemap) = ub(Typemap) - lb(Typemap) + +If type(i) requires alignment to a byte address that is a multiple of +k(i), then e is the least nonnegative increment needed to round +extent(Typemap) to the next multiple of max(i) k(i). + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. + + +.. seealso:: + :ref:`MPI_Type_get_extent` diff --git a/docs/man-openmpi/man3/MPI_Type_vector.3.rst b/docs/man-openmpi/man3/MPI_Type_vector.3.rst new file mode 100644 index 00000000000..b7eb80e160b --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Type_vector.3.rst @@ -0,0 +1,143 @@ +.. _mpi_type_vector: + + +MPI_Type_vector +=============== + +.. include_body + +:ref:`MPI_Type_vector` - Creates a vector (strided) datatype. + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Type_vector(int count, int blocklength, int stride, + MPI_Datatype oldtype, MPI_Datatype *newtype) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_TYPE_VECTOR(COUNT, BLOCKLENGTH, STRIDE, OLDTYPE, NEWTYPE, + IERROR) + INTEGER COUNT, BLOCKLENGTH, STRIDE, OLDTYPE + INTEGER NEWTYPE, IERROR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Type_vector(count, blocklength, stride, oldtype, newtype, ierror) + INTEGER, INTENT(IN) :: count, blocklength, stride + TYPE(MPI_Datatype), INTENT(IN) :: oldtype + TYPE(MPI_Datatype), INTENT(OUT) :: newtype + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETERS +---------------- +* ``count``: Number of blocks (nonnegative integer). +* ``blocklength``: Number of elements in each block (nonnegative integer). +* ``stride``: Number of elements between start of each block (integer). +* ``oldtype``: Old datatype (handle). + +OUTPUT PARAMETERS +----------------- +* ``newtype``: New datatype (handle). +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +The function :ref:`MPI_Type_vector` is a general constructor that allows +replication of a datatype into locations that consist of equally spaced +blocks. Each block is obtained by concatenating the same number of +copies of the old datatype. The spacing between blocks is a multiple of +the extent of the old datatype. + +**Example 1:** Assume, again, that oldtype has type map {(double, 0), +(char, 8)}, with extent 16. A call to MPI_Type_vector(2, 3, 4, oldtype, +newtype) will create the datatype with type map + +:: + + {(double, 0), (char, 8), (double, 16), (char, 24), + (double, 32), (char, 40), + (double, 64), (char, 72), + (double, 80), (char, 88), (double, 96), (char, 104)} + +That is, two blocks with three copies each of the old type, with a +stride of 4 elements (4 x 16 bytes) between the blocks. + +**Example 2:** A call to MPI_Type_vector(3, 1, -2, oldtype, newtype) +will create the datatype + +:: + + + {(double, 0), (char, 8), (double, -32), (char, -24), + (double, -64), (char, -56)} + +In general, assume that oldtype has type map + +:: + + + {(type(0), disp(0)), ..., (type(n-1), disp(n-1))}, + +with extent ex. Let bl be the blocklength. The newly created datatype +has a type map with count x bl x n entries: + +:: + + + {(type(0), disp(0)), ..., (type(n-1), disp(n-1)), + (type(0), disp(0) + ex), ..., (type(n-1), disp(n-1) + ex), ..., + (type(0), disp(0) + (bl -1) * ex),..., + (type(n-1), disp(n-1) + (bl -1)* ex), + (type(0), disp(0) + stride * ex),..., (type(n-1), + disp(n-1) + stride * ex), ..., + (type(0), disp(0) + (stride + bl - 1) * ex), ..., + (type(n-1), disp(n-1) + (stride + bl -1) * ex), ..., + (type(0), disp(0) + stride * (count -1) * ex), ..., + (type(n-1), disp(n-1) + stride * (count -1) * ex), ..., + (type(0), disp(0) + (stride * (count -1) + bl -1) * ex), ..., + (type(n-1), disp(n-1) + (stride * (count -1) + bl -1) * ex)} + +A call to MPI_Type_contiguous(count, oldtype, newtype) is equivalent to +a call to MPI_Type_vector(count, 1, 1, oldtype, newtype), or to a call +to MPI_Type_vector(1, count, n, oldtype, newtype), n arbitrary. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. + + +.. seealso:: + :ref:`MPI_Type_create_hvector` :ref:`MPI_Type_hvector` diff --git a/docs/man-openmpi/man3/MPI_Unpack.3.rst b/docs/man-openmpi/man3/MPI_Unpack.3.rst new file mode 100644 index 00000000000..3ba1d9df4eb --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Unpack.3.rst @@ -0,0 +1,164 @@ +.. _mpi_unpack: + + +MPI_Unpack +========== + +.. include_body + +:ref:`MPI_Unpack` - Unpacks a datatype into contiguous memory. + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Unpack(const void *inbuf, int insize, int *position, + void *outbuf, int outcount, MPI_Datatype datatype, + MPI_Comm comm) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_UNPACK(INBUF, INSIZE, POSITION, OUTBUF, OUTCOUNT, + DATATYPE, COMM, IERROR) + INBUF(*), OUTBUF(*) + INTEGER INSIZE, POSITION, OUTCOUNT, DATATYPE, + COMM, IERROR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Unpack(inbuf, insize, position, outbuf, outcount, datatype, comm, + ierror) + TYPE(*), DIMENSION(..), INTENT(IN) :: inbuf + TYPE(*), DIMENSION(..) :: outbuf + INTEGER, INTENT(IN) :: insize, outcount + INTEGER, INTENT(INOUT) :: position + TYPE(MPI_Datatype), INTENT(IN) :: datatype + TYPE(MPI_Comm), INTENT(IN) :: comm + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETERS +---------------- +* ``inbuf``: Input buffer start (choice). +* ``insize``: Size of input buffer, in bytes (integer). +* ``outcount``: Number of items to be unpacked (integer). +* ``datatype``: Datatype of each output data item (handle). +* ``comm``: Communicator for packed message (handle). + +INPUT/OUTPUT PARAMETER +---------------------- +* ``position``: Current position in bytes (integer). + +OUTPUT PARAMETERS +----------------- +* ``outbuf``: Output buffer start (choice). +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +Unpacks a message into the receive buffer specified by outbuf, outcount, +datatype from the buffer space specified by inbuf and insize. The output +buffer can be any communication buffer allowed in :ref:`MPI_Recv`. The input +buffer is a contiguous storage area containing insize bytes, starting at +address inbuf. The input value of position is the first location in the +input buffer occupied by the packed message. *position* is incremented +by the size of the packed message, so that the output value of position +is the first location in the input buffer after the locations occupied +by the message that was unpacked. *comm* is the communicator used to +receive the packed message. + + +NOTES +----- + +Note the difference between :ref:`MPI_Recv` and :ref:`MPI_Unpack`: In :ref:`MPI_Recv`, the +*count* argument specifies the maximum number of items that can be +received. The actual number of items received is determined by the +length of the incoming message. In :ref:`MPI_Unpack`, the count argument +specifies the actual number of items that are to be unpacked; the "size" +of the corresponding message is the increment in position. The reason +for this change is that the "incoming message size" is not predetermined +since the user decides how much to unpack; nor is it easy to determine +the "message size" from the number of items to be unpacked. + +To understand the behavior of pack and unpack, it is convenient to think +of the data part of a message as being the sequence obtained by +concatenating the successive values sent in that message. The pack +operation stores this sequence in the buffer space, as if sending the +message to that buffer. The unpack operation retrieves this sequence +from buffer space, as if receiving a message from that buffer. (It is +helpful to think of internal Fortran files or sscanf in C for a similar +function.) + +Several messages can be successively packed into one packing unit. This +is effected by several successive related calls to :ref:`MPI_Pack`, where the +first call provides position = 0, and each successive call inputs the +value of position that was output by the previous call, and the same +values for outbuf, outcount, and comm. This packing unit now contains +the equivalent information that would have been stored in a message by +one send call with a send buffer that is the "concatenation" of the +individual send buffers. + +A packing unit can be sent using type MPI_Packed. Any point-to-point or +collective communication function can be used to move the sequence of +bytes that forms the packing unit from one process to another. This +packing unit can now be received using any receive operation, with any +datatype: The type-matching rules are relaxed for messages sent with +type MPI_Packed. + +A message sent with any type (including MPI_Packed) can be received +using the type MPI_Packed. Such a message can then be unpacked by calls +to :ref:`MPI_Unpack`. + +A packing unit (or a message created by a regular, "typed" send) can be +unpacked into several successive messages. This is effected by several +successive related calls to :ref:`MPI_Unpack`, where the first call provides +position = 0, and each successive call inputs the value of position that +was output by the previous call, and the same values for inbuf, insize, +and comm. + +The concatenation of two packing units is not necessarily a packing +unit; nor is a substring of a packing unit necessarily a packing unit. +Thus, one cannot concatenate two packing units and then unpack the +result as one packing unit; nor can one unpack a substring of a packing +unit as a separate packing unit. Each packing unit that was created by a +related sequence of pack calls or by a regular send must be unpacked as +a unit, by a sequence of related unpack calls. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. + + +.. seealso:: + :ref:`MPI_Pack` :ref:`MPI_Pack_size` diff --git a/docs/man-openmpi/man3/MPI_Unpack_external.3.rst b/docs/man-openmpi/man3/MPI_Unpack_external.3.rst new file mode 100644 index 00000000000..6af42892483 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Unpack_external.3.rst @@ -0,0 +1,171 @@ +.. _mpi_unpack_external: + + +MPI_Unpack_external +=================== + +.. include_body + +:ref:`MPI_Unpack_external` - Reads data from a portable format + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Unpack_external(const char datarep[], const void *inbuf, + MPI_Aint insize, MPI_Aint *position, + void *outbuf, int outcount, + MPI_Datatype datatype) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_UNPACK_EXTERNAL(DATAREP, INBUF, INSIZE, POSITION, + OUTBUF, OUTCOUNT, DATATYPE, IERROR) + + INTEGER OUTCOUNT, DATATYPE, IERROR + INTEGER(KIND=MPI_ADDRESS_KIND) INSIZE, POSITION + CHARACTER*(*) DATAREP + INBUF(*), OUTBUF(*) + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Unpack_external(datarep, inbuf, insize, position, outbuf, outcount, + datatype, ierror) + CHARACTER(LEN=*), INTENT(IN) :: datarep + TYPE(*), DIMENSION(..), INTENT(IN) :: inbuf + TYPE(*), DIMENSION(..) :: outbuf + INTEGER(KIND=MPI_ADDRESS_KIND), INTENT(IN) :: insize + INTEGER(KIND=MPI_ADDRESS_KIND), INTENT(INOUT) :: position + INTEGER, INTENT(IN) :: outcount + TYPE(MPI_Datatype), INTENT(IN) :: datatype + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETERS +---------------- +* ``datarep``: Data Representation (string). +* ``inbuf``: Input buffer start (choice). +* ``insize``: Size of input buffer, in bytes (integer). +* ``outcount``: Number of items to be unpacked (integer). +* ``datatype``: Datatype of each output data item (handle). + +INPUT/OUTPUT PARAMETER +---------------------- +* ``position``: Current position in buffer, in bytes (integer). + +OUTPUT PARAMETERS +----------------- +* ``outbuf``: Output buffer start (choice). +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +:ref:`MPI_Unpack_external` unpacks data from the external32 format, a universal +data representation defined by the MPI Forum. This format is useful for +exchanging data between MPI implementations, or when writing data to a +file. + +The input buffer is a contiguous storage area pointed to by *inbuf* +containing *insize* bytes. The output buffer can be any communication +buffer allowed in :ref:`MPI_Recv`, and is specified by *outbuf*, *outcount*, +and *datatype*. + +The input value of *position* is the first position in *inbuf* to be +read for unpacking (measured in bytes, not elements, relative to the +start of the buffer). When the function returns, *position* is +incremented by the size of the packed message, so that it points to the +first location in *inbuf* following the message that was unpacked. This +way it may be used as input to a subsequent call to :ref:`MPI_Unpack_external`. + + +NOTES +----- + +Note the difference between :ref:`MPI_Recv` and :ref:`MPI_Unpack_external`: In +:ref:`MPI_Recv`, the *count* argument specifies the maximum number of items +that can be received. In :ref:`MPI_Unpack_external`, the *outcount* argument +specifies the actual number of items that are to be unpacked. With a +regular receive operation, the incoming message size determines the +number of components that will be received. With :ref:`MPI_Unpack_external`, it +is up to the user to specify how many components to unpack, since the +user may wish to unpack the received message multiple times into various +buffers. + +To understand the behavior of pack and unpack, it is convenient to think +of the data part of a message as being the sequence obtained by +concatenating the successive values sent in that message. The pack +operation stores this sequence in the buffer space, as if sending the +message to that buffer. The unpack operation retrieves this sequence +from buffer space, as if receiving a message from that buffer. (It is +helpful to think of internal Fortran files or sscanf in C for a similar +function.) + +Several messages can be successively packed into one packing unit. This +is effected by several successive related calls to :ref:`MPI_Pack_external`, +where the first call provides *position*\ =0, and each successive call +inputs the value of *position* that was output by the previous call, +along with the same values for *outbuf* and *outcount*. This packing +unit now contains the equivalent information that would have been stored +in a message by one send call with a send buffer that is the +"concatenation" of the individual send buffers. + +A packing unit can be sent using type MPI_BYTE. Any point-to-point or +collective communication function can be used to move the sequence of +bytes that forms the packing unit from one process to another. This +packing unit can now be received using any receive operation, with any +datatype: The type-matching rules are relaxed for messages sent with +type MPI_BYTE. + +A packing unit can be unpacked into several successive messages. This is +effected by several successive related calls to :ref:`MPI_Unpack_external`, +where the first call provides *position*\ =0, and each successive call +inputs the value of position that was output by the previous call, and +the same values for *inbuf* and *insize*. + +The concatenation of two packing units is not necessarily a packing +unit; nor is a substring of a packing unit necessarily a packing unit. +Thus, one cannot concatenate two packing units and then unpack the +result as one packing unit; nor can one unpack a substring of a packing +unit as a separate packing unit. Each packing unit that was created by a +related sequence of pack calls must be unpacked as a unit by a sequence +of related unpack calls. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. + +See the MPI man page for a full list of MPI error codes. + + +.. seealso:: + :ref:`MPI_Pack_external` :ref:`MPI_Pack_external_size` :ref:`MPI_Recv` sscanf(3C) diff --git a/docs/man-openmpi/man3/MPI_Unpublish_name.3.rst b/docs/man-openmpi/man3/MPI_Unpublish_name.3.rst new file mode 100644 index 00000000000..5b1b52d5233 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Unpublish_name.3.rst @@ -0,0 +1,134 @@ +.. _mpi_unpublish_name: + + +MPI_Unpublish_name +================== + +.. include_body + +:: + + MPI_Unpublish_name - Unpublishes a service name + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Unpublish_name(const char *service_name, MPI_Info info, + const char *port_name) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_UNPUBLISH_NAME(SERVICE_NAME, INFO, PORT_NAME, IERROR) + CHARACTER*(*) SERVICE_NAME, PORT_NAME + INTEGER INFO, IERROR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Unpublish_name(service_name, info, port_name, ierror) + CHARACTER(LEN=*), INTENT(IN) :: service_name, port_name + TYPE(MPI_Info), INTENT(IN) :: info + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETERS +---------------- +* ``service_name``: A service name (string). +* ``info``: Options to the name service functions (handle). +* ``port_name``: A port name (string). + +OUTPUT PARAMETER +---------------- +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +This routine removes the pair (*service_name, port_name*) so that +applications may no longer retrieve *port_name* by calling +:ref:`MPI_Lookup_name`. It is an error to unpublish a *service_name* that was +not published via :ref:`MPI_Publish_name`. Both the *service_name* and +*port_name* arguments to :ref:`MPI_Unpublish_name` must be identical to the +arguments to the previous call to :ref:`MPI_Publish_name`. + + +INFO ARGUMENTS +-------------- + +The following keys for *info* are recognized: + +:: + + Key Type Description + --- ---- ----------- + + ompi_global_scope bool If set to true, unpublish the name from + the global scope. Unpublish from the local + scope otherwise. See the NAME SCOPE + section for more details. + +*bool* info keys are actually strings but are evaluated as follows: if +the string value is a number, it is converted to an integer and cast to +a boolean (meaning that zero integers are false and non-zero values are +true). If the string value is (case-insensitive) "yes" or "true", the +boolean is true. If the string value is (case-insensitive) "no" or +"false", the boolean is false. All other string values are unrecognized, +and therefore false. + +If no info key is provided, the function will first check to see if a +global server has been specified and is available. If so, then the +unpublish function will default to global scope first, followed by +local. Otherwise, the data will default to unpublish with local scope. + + +NAME SCOPE +---------- + +Open MPI supports two name scopes: *global* and *local*. Local scope +values are placed in a data store located on the mpirun of the calling +process' job, while global scope values reside on a central server. +Calls to :ref:`MPI_Unpublish_name` must correctly specify the scope to be used +in finding the value to be removed. The function will return an error if +the specified service name is not found on the indicated location. + +For a more detailed description of scoping rules, please see the +:ref:`MPI_Publish_name` man page. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. + +See the MPI man page for a full list of MPI error codes. + + +.. seealso:: + :ref:`MPI_Publish_name` :ref:`MPI_Lookup_name` :ref:`MPI_Open_port` diff --git a/docs/man-openmpi/man3/MPI_Wait.3.rst b/docs/man-openmpi/man3/MPI_Wait.3.rst new file mode 100644 index 00000000000..98b75c5950e --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Wait.3.rst @@ -0,0 +1,137 @@ +.. _mpi_wait: + + +MPI_Wait +======== + +.. include_body + +:ref:`MPI_Wait` - Waits for an MPI send or receive to complete. + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Wait(MPI_Request *request, MPI_Status *status) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_WAIT(REQUEST, STATUS, IERROR) + INTEGER REQUEST, STATUS(MPI_STATUS_SIZE), IERROR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Wait(request, status, ierror) + TYPE(MPI_Request), INTENT(INOUT) :: request + TYPE(MPI_Status) :: status + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETER +--------------- +* ``request``: Request (handle). + +OUTPUT PARAMETERS +----------------- +* ``status``: Status object (status). +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +A call to :ref:`MPI_Wait` returns when the operation identified by request is +complete. If the communication object associated with this request was +created by a nonblocking send or receive call, then the object is +deallocated by the call to :ref:`MPI_Wait` and the request handle is set to +MPI_REQUEST_NULL. + +The call returns, in status, information on the completed operation. The +content of the status object for a receive operation can be accessed as +described in Section 3.2.5 of the MPI-1 Standard, "Return Status." The +status object for a send operation may be queried by a call to +:ref:`MPI_Test_cancelled` (see Section 3.8 of the MPI-1 Standard, "Probe and +Cancel"). + +If your application does not need to examine the *status* field, you can +save resources by using the predefined constant MPI_STATUS_IGNORE as a +special value for the *status* argument. + +One is allowed to call :ref:`MPI_Wait` with a null or inactive request +argument. In this case the operation returns immediately with empty +status. + + +NOTES +----- + +Successful return of :ref:`MPI_Wait` after an :ref:`MPI_Ibsend` implies that the user +send buffer can be reused i.e., data has been sent out or copied into a +buffer attached with :ref:`MPI_Buffer_attach`. Note that, at this point, we can +no longer cancel the send (for more information, see Section 3.8 of the +MPI-1 Standard, "Probe and Cancel"). If a matching receive is never +posted, then the buffer cannot be freed. This runs somewhat counter to +the stated goal of :ref:`MPI_Cancel` (always being able to free program space +that was committed to the communication subsystem). + +Example: Simple usage of nonblocking operations and :ref:`MPI_Wait`. + +:: + + CALL MPI_COMM_RANK(comm, rank, ierr) + IF(rank.EQ.0) THEN + CALL MPI_ISEND(a(1), 10, MPI_REAL, 1, tag, comm, request, ierr) + **** do some computation **** + CALL MPI_WAIT(request, status, ierr) + ELSE + CALL MPI_IRECV(a(1), 15, MPI_REAL, 0, tag, comm, request, ierr) + **** do some computation **** + CALL MPI_WAIT(request, status, ierr) + END IF + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`, :ref:`MPI_File_set_errhandler`, or +:ref:`MPI_Win_set_errhandler` (depending on the type of MPI handle that +generated the request); the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. + +Note that per MPI-1 section 3.2.5, MPI errors on requests passed to +:ref:`MPI_WAIT` do not set the status.MPI_ERROR field in the returned status. +The error code is passed to the back-end error handler and may be passed +back to the caller through the return value of :ref:`MPI_WAIT` if the back-end +error handler returns it. The pre-defined MPI error handler +MPI_ERRORS_RETURN exhibits this behavior, for example. + + +.. seealso:: + :ref:`MPI_Comm_set_errhandler` :ref:`MPI_File_set_errhandler` :ref:`MPI_Test` :ref:`MPI_Testall` + :ref:`MPI_Testany` :ref:`MPI_Testsome` :ref:`MPI_Waitall` :ref:`MPI_Waitany` :ref:`MPI_Waitsome` + :ref:`MPI_Win_set_errhandler` diff --git a/docs/man-openmpi/man3/MPI_Waitall.3.rst b/docs/man-openmpi/man3/MPI_Waitall.3.rst new file mode 100644 index 00000000000..e04a5eea6b2 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Waitall.3.rst @@ -0,0 +1,126 @@ +.. _mpi_waitall: + + +MPI_Waitall +=========== + +.. include_body + +:ref:`MPI_Waitall` - Waits for all given communications to complete. + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Waitall(int count, MPI_Request array_of_requests[], + MPI_Status *array_of_statuses) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_WAITALL(COUNT, ARRAY_OF_REQUESTS, ARRAY_OF_STATUSES, IERROR) + INTEGER COUNT, ARRAY_OF_REQUESTS(*) + INTEGER ARRAY_OF_STATUSES(MPI_STATUS_SIZE,*), IERROR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Waitall(count, array_of_requests, array_of_statuses, ierror) + INTEGER, INTENT(IN) :: count + TYPE(MPI_Request), INTENT(INOUT) :: array_of_requests(count) + TYPE(MPI_Status) :: array_of_statuses(*) + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETERS +---------------- +* ``count``: Lists length (integer). +* ``array_of_requests``: Array of requests (array of handles). + +OUTPUT PARAMETERS +----------------- +* ``array_of_statuses``: Array of status objects (array of status). +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +Blocks until all communication operations associated with active handles +in the list complete, and returns the status of all these operations +(this includes the case where no handle in the list is active). Both +arrays have the same number of valid entries. The ith entry in +array_of_statuses is set to the return status of the ith operation. +Requests that were created by nonblocking communication operations are +deallocated, and the corresponding handles in the array are set to +MPI_REQUEST_NULL. The list may contain null or inactive handles. The +call sets to empty the status of each such entry. + +The error-free execution of MPI_Waitall(count, array_of_requests, +array_of_statuses) has the same effect as the execution of +MPI_Wait(&array_of_request[i], &array_of_statuses[i]), for +i=0,...,count-1, in some arbitrary order. :ref:`MPI_Waitall` with an array of +length 1 is equivalent to :ref:`MPI_Wait`. + +When one or more of the communications completed by a call to +:ref:`MPI_Waitall` fail, it is desirable to return specific information on each +communication. The function :ref:`MPI_Waitall` will return in such case the +error code MPI_ERR_IN_STATUS and will set the error field of each status +to a specific error code. This code will be MPI_SUCCESS if the specific +communication completed; it will be another specific error code if it +failed; or it can be MPI_ERR_PENDING if it has neither failed nor +completed. The function :ref:`MPI_Waitall` will return MPI_SUCCESS if no +request had an error, or will return another error code if it failed for +other reasons (such as invalid arguments). In such cases, it will not +update the error fields of the statuses. + +If your application does not need to examine the *array_of_statuses* +field, you can save resources by using the predefined constant +MPI_STATUSES_IGNORE can be used as a special value for the +*array_of_statuses* argument. + + +ERRORS +------ + +For each invocation of :ref:`MPI_Waitall`, if one or more requests generate an +MPI error, only the *first* MPI request that caused an error will be +passed to its corresponding error handler. No other error handlers will +be invoked (even if multiple requests generated errors). However, *all* +requests that generate an error will have a relevant error code set in +the corresponding status.MPI_ERROR field (unless MPI_STATUSES_IGNORE was +used). + +The default error handler aborts the MPI job, except for I/O function +errors. The error handler may be changed with :ref:`MPI_Comm_set_errhandler`, +:ref:`MPI_File_set_errhandler`, or :ref:`MPI_Win_set_errhandler` (depending on the +type of MPI handle that generated the MPI request); the predefined error +handler MPI_ERRORS_RETURN may be used to cause error values to be +returned. Note that MPI does not guarantee that an MPI program can +continue past an error. + +If the invoked error handler allows :ref:`MPI_Waitall` to return to the caller, +the value MPI_ERR_IN_STATUS will be returned in the C and Fortran +bindings. + + +.. seealso:: + :ref:`MPI_Comm_set_errhandler` :ref:`MPI_File_set_errhandler` :ref:`MPI_Test` :ref:`MPI_Testall` + :ref:`MPI_Testany` :ref:`MPI_Testsome` :ref:`MPI_Wait` :ref:`MPI_Waitany` :ref:`MPI_Waitsome` + :ref:`MPI_Win_set_errhandler` diff --git a/docs/man-openmpi/man3/MPI_Waitany.3.rst b/docs/man-openmpi/man3/MPI_Waitany.3.rst new file mode 100644 index 00000000000..723d866de23 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Waitany.3.rst @@ -0,0 +1,136 @@ +.. _mpi_waitany: + + +MPI_Waitany +=========== + +.. include_body + +:ref:`MPI_Waitany` - Waits for any specified send or receive to complete. + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Waitany(int count, MPI_Request array_of_requests[], + int *index, MPI_Status *status) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_WAITANY(COUNT, ARRAY_OF_REQUESTS, INDEX, STATUS, IERROR) + INTEGER COUNT, ARRAY_OF_REQUESTS(*), INDEX + INTEGER STATUS(MPI_STATUS_SIZE), IERROR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Waitany(count, array_of_requests, index, status, ierror) + INTEGER, INTENT(IN) :: count + TYPE(MPI_Request), INTENT(INOUT) :: array_of_requests(count) + INTEGER, INTENT(OUT) :: index + TYPE(MPI_Status) :: status + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETERS +---------------- +* ``count``: List length (integer). +* ``array_of_requests``: Array of requests (array of handles). + +OUTPUT PARAMETERS +----------------- +* ``index``: Index of handle for operation that completed (integer). In the range 0 to count-1. In Fortran, the range is 1 to count. +* ``status``: Status object (status). +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +A call to :ref:`MPI_Waitany` can be used to wait for the completion of one out +of several requests. + +The array_of_requests list may contain null or inactive handles. If the +list contains no active handles (list has length zero or all entries are +null or inactive), then the call returns immediately with index = +MPI_UNDEFINED, and an empty status. + +The execution of MPI_Waitany(count, array_of_requests, index, status) +has the same effect as the execution of MPI_Wait(&array_of_requests[i], +status), where i is the value returned by index (unless the value of +index is MPI_UNDEFINED). :ref:`MPI_Waitany` with an array containing one active +entry is equivalent to :ref:`MPI_Wait`. + +If your application does not need to examine the *status* field, you can +save resources by using the predefined constant MPI_STATUS_IGNORE as a +special value for the *status* argument. + +**Example:** Client-server code (starvation can occur). + +:: + + CALL MPI_COMM_SIZE(comm, size, ierr) + CALL MPI_COMM_RANK(comm, rank, ierr) + IF(rank .GT 0) THEN ! client code + DO WHILE(.TRUE.) + CALL MPI_ISEND(a, n, MPI_REAL, 0, tag, comm, request, ierr) + CALL MPI_WAIT(request, status, ierr) + END DO + ELSE ! rank=0 -- server code + DO i=1, size-1 + CALL MPI_IRECV(a(1,i), n, MPI_REAL, i tag, + comm, request_list(i), ierr) + END DO + DO WHILE(.TRUE.) + CALL MPI_WAITANY(size-1, request_list, index, status, ierr) + CALL DO_SERVICE(a(1,index)) ! handle one message + CALL MPI_IRECV(a(1, index), n, MPI_REAL, index, tag, + comm, request_list(index), ierr) + END DO + END IF + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`, :ref:`MPI_File_set_errhandler`, or +:ref:`MPI_Win_set_errhandler` (depending on the type of MPI handle that +generated the request); the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. + +Note that per MPI-1 section 3.2.5, MPI errors on requests passed to +:ref:`MPI_WAITANY` do not set the status.MPI_ERROR field in the returned +status. The error code is passed to the back-end error handler and may +be passed back to the caller through the return value of :ref:`MPI_WAITANY` if +the back-end error handler returns it. The pre-defined MPI error handler +MPI_ERRORS_RETURN exhibits this behavior, for example. + + +.. seealso:: + :ref:`MPI_Comm_set_errhandler` :ref:`MPI_File_set_errhandler` :ref:`MPI_Test` :ref:`MPI_Testall` + :ref:`MPI_Testany` :ref:`MPI_Testsome` :ref:`MPI_Wait` :ref:`MPI_Waitall` :ref:`MPI_Waitsome` + :ref:`MPI_Win_set_errhandler` diff --git a/docs/man-openmpi/man3/MPI_Waitsome.3.rst b/docs/man-openmpi/man3/MPI_Waitsome.3.rst new file mode 100644 index 00000000000..0c76e2baffd --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Waitsome.3.rst @@ -0,0 +1,167 @@ +.. _mpi_waitsome: + + +MPI_Waitsome +============ + +.. include_body + +:ref:`MPI_Waitsome` - Waits for some given communications to complete. + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Waitsome(int incount, MPI_Request array_of_requests[], + int *outcount, int array_of_indices[], + MPI_Status array_of_statuses[]) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_WAITSOME(INCOUNT, ARRAY_OF_REQUESTS, OUTCOUNT, + ARRAY_OF_INDICES, ARRAY_OF_STATUSES, IERROR) + INTEGER INCOUNT, ARRAY_OF_REQUESTS(*), OUTCOUNT + INTEGER ARRAY_OF_INDICES(*) + INTEGER ARRAY_OF_STATUSES(MPI_STATUS_SIZE*) + INTEGER IERROR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Waitsome(incount, array_of_requests, outcount, array_of_indices, + array_of_statuses, ierror) + INTEGER, INTENT(IN) :: incount + TYPE(MPI_Request), INTENT(INOUT) :: array_of_requests(incount) + INTEGER, INTENT(OUT) :: outcount, array_of_indices(*) + TYPE(MPI_Status) :: array_of_statuses(*) + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETERS +---------------- +* ``incount``: Length of array_of_requests (integer). +* ``array_of_requests``: Array of requests (array of handles). + +OUTPUT PARAMETERS +----------------- +* ``outcount``: Number of completed requests (integer). +* ``array_of_indices``: Array of indices of operations that completed (array of integers). +* ``array_of_statuses``: Array of status objects for operations that completed (array of status). +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +Waits until at least one of the operations associated with active +handles in the list have completed. Returns in outcount the number of +requests from the list array_of_requests that have completed. Returns in +the first outcount locations of the array array_of_indices the indices +of these operations (index within the array array_of_requests; the array +is indexed from 0 in C and from 1 in Fortran). Returns in the first +outcount locations of the array array_of_status the status for these +completed operations. If a request that completed was allocated by a +nonblocking communication call, then it is deallocated, and the +associated handle is set to MPI_REQUEST_NULL. + +If the list contains no active handles, then the call returns +immediately with outcount = MPI_UNDEFINED. + +When one or more of the communications completed by :ref:`MPI_Waitsome` fails, +then it is desirable to return specific information on each +communication. The arguments outcount, array_of_indices, and +array_of_statuses will be adjusted to indicate completion of all +communications that have succeeded or failed. The call will return the +error code MPI_ERR_IN_STATUS and the error field of each status returned +will be set to indicate success or to indicate the specific error that +occurred. The call will return MPI_SUCCESS if no request resulted in an +error, and will return another error code if it failed for other reasons +(such as invalid arguments). In such cases, it will not update the error +fields of the statuses. + +If your application does not need to examine the *array_of_statuses* +field, you can save resources by using the predefined constant +MPI_STATUSES_IGNORE can be used as a special value for the +*array_of_statuses* argument. + +**Example:** Same code as the example in the :ref:`MPI_Waitany` man page, but +using :ref:`MPI_Waitsome`. + +:: + + CALL MPI_COMM_SIZE(comm, size, ierr) + CALL MPI_COMM_RANK(comm, rank, ierr) + IF(rank .GT. 0) THEN ! client code + DO WHILE(.TRUE.) + CALL MPI_ISEND(a, n, MPI_REAL, 0, tag, comm, request, ierr) + CALL MPI_WAIT(request, status, ierr) + END DO + ELSE ! rank=0 -- server code + DO i=1, size-1 + CALL MPI_IRECV(a(1,i), n, MPI_REAL, i, tag, + comm, requests(i), ierr) + END DO + DO WHILE(.TRUE.) + CALL MPI_WAITSOME(size, request_list, numdone, + indices, statuses, ierr) + DO i=1, numdone + CALL DO_SERVICE(a(1, indices(i))) + CALL MPI_IRECV(a(1, indices(i)), n, MPI_REAL, 0, tag, + comm, requests(indices(i)), ierr) + END DO + END DO + END IF + + +NOTES +----- + +The array of indices are in the range 0 to incount-1 for C and in the +range 1 to incount for Fortran. + + +ERRORS +------ + +For each invocation of :ref:`MPI_Waitsome`, if one or more requests generate an +MPI error, only the *first* MPI request that caused an error will be +passed to its corresponding error handler. No other error handlers will +be invoked (even if multiple requests generated errors). However, *all* +requests that generate an error will have a relevant error code set in +the corresponding status.MPI_ERROR field (unless MPI_STATUSES_IGNORE was +used). + +The default error handler aborts the MPI job, except for I/O function +errors. The error handler may be changed with :ref:`MPI_Comm_set_errhandler`, +:ref:`MPI_File_set_errhandler`, or :ref:`MPI_Win_set_errhandler` (depending on the +type of MPI handle that generated the MPI request); the predefined error +handler MPI_ERRORS_RETURN may be used to cause error values to be +returned. Note that MPI does not guarantee that an MPI program can +continue past an error. + +If the invoked error handler allows :ref:`MPI_Waitsome` to return to the +caller, the value MPI_ERR_IN_STATUS will be returned in the C and +Fortran bindings. + + +.. seealso:: + :ref:`MPI_Comm_set_errhandler` :ref:`MPI_File_set_errhandler` :ref:`MPI_Test` :ref:`MPI_Testall` + :ref:`MPI_Testany` :ref:`MPI_Testsome` :ref:`MPI_Wait` :ref:`MPI_Waitall` :ref:`MPI_Waitany` + :ref:`MPI_Win_set_errhandler` diff --git a/docs/man-openmpi/man3/MPI_Win_allocate.3.rst b/docs/man-openmpi/man3/MPI_Win_allocate.3.rst new file mode 100644 index 00000000000..a825142ea8a --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Win_allocate.3.rst @@ -0,0 +1,134 @@ +.. _mpi_win_allocate: + + +MPI_Win_allocate +================ + +.. include_body + +:ref:`MPI_Win_allocate` - One-sided MPI call that allocates memory and +returns a window object for RMA operations. + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Win_allocate (MPI_Aint size, int disp_unit, MPI_Info info, + MPI_Comm comm, void *baseptr, MPI_Win *win) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_WIN_ALLOCATE(SIZE, DISP_UNIT, INFO, COMM, BASEPTR, WIN, IERROR) + INTEGER(KIND=MPI_ADDRESS_KIND) SIZE, BASEPTR + INTEGER DISP_UNIT, INFO, COMM, WIN, IERROR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Win_allocate(size, disp_unit, info, comm, baseptr, win, ierror) + USE, INTRINSIC :: ISO_C_BINDING, ONLY : C_PTR + INTEGER(KIND=MPI_ADDRESS_KIND), INTENT(IN) :: size + INTEGER, INTENT(IN) :: disp_unit + TYPE(MPI_Info), INTENT(IN) :: info + TYPE(MPI_Comm), INTENT(IN) :: comm + TYPE(C_PTR), INTENT(OUT) :: baseptr + TYPE(MPI_Win), INTENT(OUT) :: win + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETERS +---------------- +* ``size``: Size of window in bytes (nonnegative integer). +* ``disp_unit``: Local unit size for displacements, in bytes (positive integer). +* ``info``: Info argument (handle). +* ``comm``: Communicator (handle). + +OUTPUT PARAMETERS +----------------- +* ``baseptr``: Initial address of window. +* ``win``: Window object returned by the call (handle). +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +:ref:`MPI_Win_allocate` is a collective call executed by all processes +in the group of *comm*. On each process, it allocates memory of at +least *size* bytes, returns a pointer to it, and returns a window +object that can be used by all processes in *comm* to perform RMA +operations. The returned memory consists of *size* bytes local to each +process, starting at address *baseptr* and is associated with the +window as if the user called :ref:`MPI_Win_create` on existing +memory. The *size* argument may be different at each process and +*size* = 0 is valid; however, a library might allocate and expose more +memory in order to create a fast, globally symmetric allocation. The +discussion of and rationales for :ref:`MPI_Alloc_mem` and +:ref:`MPI_Free_mem` in MPI-3.1 section 8.2 also apply to +:ref:`MPI_Win_allocate`; in particular, see the rationale in MPI-3.1 +section 8.2 for an explanation of the type used for *baseptr*. + +The displacement unit argument is provided to facilitate address +arithmetic in RMA operations: the target displacement argument of an RMA +operation is scaled by the factor *disp_unit* specified by the target +process, at window creation. + +For supported info keys see :ref:`MPI_Win_create`\ *.* + + +NOTES +----- + +Common choices for *disp_unit are 1 (no scaling), and (in C* syntax) +*sizeof(type), for a window that consists of an array of* elements of +type *type. The later choice will allow one to use* array indices in RMA +calls, and have those scaled correctly to byte displacements, even in a +heterogeneous environment. + +Calling :ref:`MPI_Win_free`\ *will deallocate the memory allocated +by*\ :ref:`MPI_Win_allocate`\ *. It is thus erroneous to manually free +baseptr.* + + +C NOTES +------- + +While *baseptr is a void \* type, this is to allow easy use of any +pointer object for this parameter. This argument is really a void \*\* +type.* + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. + + +.. seealso:: + :ref:`MPI_Alloc_mem` :ref:`MPI_Free_mem` :ref:`MPI_Win_create` :ref:`MPI_Win_allocate_shared` + :ref:`MPI_Win_free` diff --git a/docs/man-openmpi/man3/MPI_Win_allocate_shared.3.rst b/docs/man-openmpi/man3/MPI_Win_allocate_shared.3.rst new file mode 100644 index 00000000000..63d29a01bd3 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Win_allocate_shared.3.rst @@ -0,0 +1,154 @@ +.. _mpi_win_allocate_shared: + + +MPI_Win_allocate_shared +======================= + +.. include_body + +:ref:`MPI_Win_allocate_shared` - One-sided MPI call that allocates shared +memory and returns a window object for RMA operations. + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Win_allocate_shared (MPI_Aint size, int disp_unit, MPI_Info info, + MPI_Comm comm, void *baseptr, MPI_Win *win) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_WIN_ALLOCATE_SHARED(SIZE, DISP_UNIT, INFO, COMM, BASEPTR, WIN, IERROR) + INTEGER(KIND=MPI_ADDRESS_KIND) SIZE, BASEPTR + INTEGER DISP_UNIT, INFO, COMM, WIN, IERROR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Win_allocate_shared(size, disp_unit, info, comm, baseptr, win, ierror) + USE, INTRINSIC :: ISO_C_BINDING, ONLY : C_PTR + INTEGER(KIND=MPI_ADDRESS_KIND), INTENT(IN) :: size + INTEGER, INTENT(IN) :: disp_unit + TYPE(MPI_Info), INTENT(IN) :: info + TYPE(MPI_Comm), INTENT(IN) :: comm + TYPE(C_PTR), INTENT(OUT) :: baseptr + TYPE(MPI_Win), INTENT(OUT) :: win + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETERS +---------------- +* ``size``: Size of window in bytes (nonnegative integer). +* ``disp_unit``: Local unit size for displacements, in bytes (positive integer). +* ``info``: Info argument (handle). +* ``comm``: Communicator (handle). + +OUTPUT PARAMETERS +----------------- +* ``baseptr``: Initial address of window. +* ``win``: Window object returned by the call (handle). +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +:ref:`MPI_Win_allocate_shared` is a collective call executed by all +processes in the group of *comm*. On each process, it allocates memory +of at least *size* bytes that is shared among all processes in *comm*, +and returns a pointer to the locally allocated segment in *baseptr* +that can be used for load/store accesses on the calling process. The +locally allocated memory can be the target of load/store accesses by +remote processes; the base pointers for other processes can be queried +using the function :ref:`MPI_Win_shared_query`. The call also returns +a window object that can be used by all processes in *comm* to perform +RMA operations. The *size* argument may be different at each process +and *size* = 0 is valid. It is the user's responsibility to ensure +that the communicator *comm* represents a group of processes that can +create a shared memory segment that can be accessed by all processes +in the group. The discussions of rationales for :ref:`MPI_Alloc_mem` +and :ref:`MPI_Free_mem` in MPI-3.1 section 8.2 also apply to +:ref:`MPI_Win_allocate_shared`; in particular, see the rationale in +MPI-3.1 section 8.2 for an explanation of the type used for +*baseptr*. The allocated memory is contiguous across process ranks +unless the info key *alloc_shared_noncontig* is specified. Contiguous +across process ranks means that the first address in the memory +segment of process i is consecutive with the last address in the +memory segment of process i - +1. This may enable the user to calculate remote address offsets with +local information only. + +The following info keys are supported: + +alloc_shared_noncontig + If not set to *true*, the allocation strategy is to allocate + contiguous memory across process ranks. This may limit the + performance on some architectures because it does not allow the + implementation to modify the data layout (e.g., padding to reduce + access latency). + +blocking_fence + If set to *true*, the osc/sm component will use :ref:`MPI_Barrier` for + :ref:`MPI_Win_fence`. If set to *false* a condition variable and counter + will be used instead. The default value is *false*. This info key is + Open MPI specific. + +For additional supported info keys see :ref:`MPI_Win_create`. + + +NOTES +----- + +Common choices for *disp_unit* are 1 (no scaling), and (in C syntax) +*sizeof(type)*, for a window that consists of an array of elements of +type *type*. The later choice will allow one to use array indices in RMA +calls, and have those scaled correctly to byte displacements, even in a +heterogeneous environment. + +Calling :ref:`MPI_Win_free` will deallocate the memory allocated by +:ref:`MPI_Win_allocate_shared`. It is thus erroneous to manually free +*baseptr*. + + +C NOTES +------- + +While *baseptr* is a *void \** type, this is to allow easy use of any +pointer object for this parameter. This argument is really a *void \*\** +type. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. + + +.. seealso:: + :ref:`MPI_Alloc_mem` :ref:`MPI_Free_mem` :ref:`MPI_Win_allocate` :ref:`MPI_Win_create` + :ref:`MPI_Win_shared_query` :ref:`MPI_Win_free` diff --git a/docs/man-openmpi/man3/MPI_Win_attach.3.rst b/docs/man-openmpi/man3/MPI_Win_attach.3.rst new file mode 100644 index 00000000000..b69995bb05f --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Win_attach.3.rst @@ -0,0 +1,113 @@ +.. _mpi_win_attach: + + +MPI_Win_attach +============== + +.. include_body + +:ref:`MPI_Win_attach`, :ref:`MPI_Win_detach` - One-sided MPI call that attaches / +detaches a memory region to / from a window object for RMA operations. + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + MPI_Win_attach(MPI_Win win, void *base, MPI_Aint size) + + MPI_Win_detach(MPI_Win win, void *base) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_WIN_ATTACH(WIN, BASE, SIZE, IERROR) + BASE(*) + INTEGER(KIND=MPI_ADDRESS_KIND) SIZE + INTEGER WIN, IERROR + + MPI_WIN_DETACH(WIN, BASE, IERROR) + BASE(*) + INTEGER WIN, IERROR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Win_attach(win, base, size, ierror) + TYPE(MPI_Win), INTENT(IN) :: win + TYPE(*), DIMENSION(..), INTENT(IN) :: base + INTEGER(KIND=MPI_ADDRESS_KIND), INTENT(IN) :: size + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + MPI_Win_detach(win, base, ierror) + TYPE(MPI_Win), INTENT(IN) :: win + TYPE(*), DIMENSION(..), INTENT(IN) :: base + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETERS +---------------- +* ``win``: A window that was created with *MPI_Win_create_dynamic* +* ``base``: Initial address of window (choice). +* ``size``: Size of window in bytes (nonnegative integer). + +OUTPUT PARAMETERS +----------------- +* ``win``: Window object returned by the call (handle). +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +:ref:`MPI_Win_attach` is a one-sided MPI communication call used to attach a +memory region of *size* bytes starting at address *base* to a window for +RMA access. The window *win* must have been created using +:ref:`MPI_Win_create_dynamic`. Multiple non-overlapping memory regions may be +attached to the same dynamic window. Attaching overlapping memory +regions to the same dynamic window is erroneous. + +If the *base* value used by :ref:`MPI_Win_attach` was allocated by +:ref:`MPI_Alloc_mem`, the size of the window can be no larger than the value +set by the :ref:`MPI_ALLOC_MEM` function. + +:ref:`MPI_Win_detach` can be used to detach a previously attached memory region +from *win*. The memory address *base* and *win* must match arguments +passed to a previous call to :ref:`MPI_Win_attach`. + + +NOTES +----- + +Use memory allocated by :ref:`MPI_Alloc_mem` to guarantee properly aligned +window boundaries (such as word, double-word, cache line, page frame, +and so on). + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. diff --git a/docs/man-openmpi/man3/MPI_Win_c2f.3.rst b/docs/man-openmpi/man3/MPI_Win_c2f.3.rst new file mode 100644 index 00000000000..55d42511c7a --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Win_c2f.3.rst @@ -0,0 +1,9 @@ +.. _mpi_win_c2f: + +MPI_Win_c2f +=========== + .. include_body + +.. include:: ../man3/MPI_Comm_f2c.3.rst + :start-after: .. include_body + diff --git a/docs/man-openmpi/man3/MPI_Win_call_errhandler.3.rst b/docs/man-openmpi/man3/MPI_Win_call_errhandler.3.rst new file mode 100644 index 00000000000..53efe0a77b8 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Win_call_errhandler.3.rst @@ -0,0 +1,87 @@ +.. _mpi_win_call_errhandler: + + +MPI_Win_call_errhandler +======================= + +.. include_body + +:ref:`MPI_Win_call_errhandler` - Passes the supplied error code to the +error handler assigned to a window + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Win_call_errhandler(MPI_Win win, int errorcode) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_WIN_CALL_ERRHANDLER(WIN, ERRORCODE, IERROR) + INTEGER WIN, ERRORCODE, IERROR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Win_call_errhandler(win, errorcode, ierror) + TYPE(MPI_Win), INTENT(IN) :: win + INTEGER, INTENT(IN) :: errorcode + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETERS +---------------- +* ``win``: Window with error handler (handle). +* ``errorcode``: MPI error code (integer). + +OUTPUT PARAMETER +---------------- +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +This function invokes the error handler assigned to the window *win* +with the supplied error code *errorcode*. If the error handler was +successfully called, the process is not aborted, and the error handler +returns, this function returns MPI_SUCCESS. + + +NOTES +----- + +Users should note that the default error handler is +MPI_ERRORS_ARE_FATAL. Thus, calling this function will abort the window +processes if the default error handler has not been changed for this +window. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +See the MPI man page for a full list of MPI error codes. + + +.. seealso:: + :ref:`MPI_Win_create_errhandler` :ref:`MPI_Win_set_errhandler` diff --git a/docs/man-openmpi/man3/MPI_Win_complete.3.rst b/docs/man-openmpi/man3/MPI_Win_complete.3.rst new file mode 100644 index 00000000000..7a19c80e5ce --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Win_complete.3.rst @@ -0,0 +1,81 @@ +.. _mpi_win_complete: + + +MPI_Win_complete +================ + +.. include_body + +:ref:`MPI_Win_complete` - Completes an RMA access epoch on *win* started by +a call to :ref:`MPI_Win_start` + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + MPI_Win_complete(MPI_Win win) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_WIN_COMPLETE(WIN, IERROR) + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Win_complete(win, ierror) + TYPE(MPI_Win), INTENT(IN) :: win + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETERS +---------------- +* ``win``: Window object (handle). + +OUTPUT PARAMETERS +----------------- +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +:ref:`MPI_Win_complete` is a one-sided MPI communication synchronization call, +completing an RMA access epoch on *win* started by a call to +:ref:`MPI_Win_start`. :ref:`MPI_Win_complete` enforces the completion of preceding RMA +calls at the origin and not at the target. A put or accumulate call may +not have completed at the target when it has completed at the origin. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Win_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. + + +.. seealso:: + :ref:`MPI_Win_start` diff --git a/docs/man-openmpi/man3/MPI_Win_create.3.rst b/docs/man-openmpi/man3/MPI_Win_create.3.rst new file mode 100644 index 00000000000..bb5d699d76b --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Win_create.3.rst @@ -0,0 +1,171 @@ +.. _mpi_win_create: + + +MPI_Win_create +============== + +.. include_body + +:ref:`MPI_Win_create` - One-sided MPI call that returns a window object for +RMA operations. + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + MPI_Win_create(void *base, MPI_Aint size, int disp_unit, + MPI_Info info, MPI_Comm comm, MPI_Win *win) + + +Fortran Syntax (see FORTRAN 77 NOTES) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_WIN_CREATE(BASE, SIZE, DISP_UNIT, INFO, COMM, WIN, IERROR) + BASE(*) + INTEGER(KIND=MPI_ADDRESS_KIND) SIZE + INTEGER DISP_UNIT, INFO, COMM, WIN, IERROR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Win_create(base, size, disp_unit, info, comm, win, ierror) + TYPE(*), DIMENSION(..), ASYNCHRONOUS :: base + INTEGER(KIND=MPI_ADDRESS_KIND), INTENT(IN) :: size + INTEGER, INTENT(IN) :: disp_unit + TYPE(MPI_Info), INTENT(IN) :: info + TYPE(MPI_Comm), INTENT(IN) :: comm + TYPE(MPI_Win), INTENT(OUT) :: win + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETERS +---------------- +* ``base``: Initial address of window (choice). +* ``size``: Size of window in bytes (nonnegative integer). +* ``disp_unit``: Local unit size for displacements, in bytes (positive integer). +* ``info``: Info argument (handle). +* ``comm``: Communicator (handle). + +OUTPUT PARAMETERS +----------------- +* ``win``: Window object returned by the call (handle). +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +:ref:`MPI_Win_create` is a one-sided MPI communication collective call executed +by all processes in the group of *comm*. It returns a window object that +can be used by these processes to perform RMA operations. Each process +specifies a window of existing memory that it exposes to RMA accesses by +the processes in the group of *comm*. The window consists of *size* +bytes, starting at address *base*. A process may elect to expose no +memory by specifying *size* = 0. + +If the *base* value used by :ref:`MPI_Win_create` was allocated by +:ref:`MPI_Alloc_mem`, the size of the window can be no larger than the value +set by the :ref:`MPI_ALLOC_MEM` function. + +The displacement unit argument is provided to facilitate address +arithmetic in RMA operations: the target displacement argument of an RMA +operation is scaled by the factor *disp_unit* specified by the target +process, at window creation. + +The following info keys are supported: + +no_locks + If set to *true*, then the implementation may assume that the local + window is never locked (by a call to :ref:`MPI_Win_lock` or + MPI_Win_lock_all). Setting this value if only active synchronization + may allow the implementation to enable certain optimizations. + +accumulate_ordering + By default, accumulate operations from one initiator to one target on + the same window memory location are strictly ordered. If the info key + accumulate_ordering is set to *none*, no ordering of accumulate + operations guaranteed. They key can also be a comma-separated list of + required orderings consisting of *rar*, *war*, *raw*, and *waw* for + read-after-read, write-after-read, read-after-write, and + write-after-write, respectively. Looser ordering constraints are + likely to result in improved performance. + +accumulate_ops + If set to *same_op*, the implementation will assume that all + concurrent accumulate calls to the same target address will use the + same operation. If set to *same_op_no_op*, then the implementation + will assume that all concurrent accumulate calls to the same target + address will use the same operation or MPI_NO_OP. The default is + *same_op_no_op*. + +same_size + If set to *true*, then the implementation may assume that the + argument *size* is identical on all processes, and that all processes + have provided this info key with the same value. + +same_disp_unit + If set to *true*, then the implementation may assume that the + argument *disp_unit* is identical on all processes, and that all + processes have provided this info key with the same value. + + +NOTES +----- + +Common choices for *disp_unit* are 1 (no scaling), and (in C syntax) +*sizeof(type)*, for a window that consists of an array of elements of +type *type*. The later choice will allow one to use array indices in RMA +calls, and have those scaled correctly to byte displacements, even in a +heterogeneous environment. + +Use memory allocated by :ref:`MPI_Alloc_mem` to guarantee properly aligned +window boundaries (such as word, double-word, cache line, page frame, +and so on). + + +FORTRAN 77 NOTES +---------------- + +The MPI standard prescribes portable Fortran syntax for the *SIZE* +argument only for Fortran 90. FORTRAN 77 users may use the non-portable +syntax + +:: + + INTEGER*MPI_ADDRESS_KIND SIZE + +where MPI_ADDRESS_KIND is a constant defined in mpif.h and gives the +length of the declared integer in bytes. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. + + +.. seealso:: + :ref:`MPI_Alloc_mem` :ref:`MPI_Free_mem` :ref:`MPI_Win_allocate` :ref:`MPI_Win_allocate_shared` diff --git a/docs/man-openmpi/man3/MPI_Win_create_dynamic.3.rst b/docs/man-openmpi/man3/MPI_Win_create_dynamic.3.rst new file mode 100644 index 00000000000..dc9df473917 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Win_create_dynamic.3.rst @@ -0,0 +1,126 @@ +.. _mpi_win_create_dynamic: + + +MPI_Win_create_dynamic +====================== + +.. include_body + +:ref:`MPI_Win_create_dynamic` - One-sided MPI call that returns a window +object for RMA operations. + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + MPI_Win_create_dynamic(MPI_Info info, MPI_Comm comm, MPI_Win *win) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_WIN_CREATE_DYNAMIC(INFO, COMM, WIN, IERROR) + INTEGER INFO, COMM, WIN, IERROR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Win_create_dynamic(info, comm, win, ierror) + TYPE(MPI_Info), INTENT(IN) :: info + TYPE(MPI_Comm), INTENT(IN) :: comm + TYPE(MPI_Win), INTENT(OUT) :: win + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETERS +---------------- +* ``info``: Info argument (handle). +* ``comm``: Communicator (handle). + +OUTPUT PARAMETERS +----------------- +* ``win``: Window object returned by the call (handle). +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +:ref:`MPI_Win_create_dynamic` is a one-sided MPI communication collective call +executed by all processes in the group of *comm*. It returns a window +object without memory attached that can be used by these processes to +perform RMA operations. + +A window created with :ref:`MPI_Win_create_dynamic` requires the +*target_disp* argument for all RMA communication functions to be the +actual address at the target. + +The following info keys are supported: + +no_locks + If set to *true*, then the implementation may assume that the local + window is never locked (by a call to :ref:`MPI_Win_lock` or + MPI_Win_lock_all). Setting this value if only active synchronization + may allow the implementation to enable certain optimizations. + +accumulate_ordering + By default, accumulate operations from one initiator to one target on + the same window memory location are strictly ordered. If the info key + accumulate_ordering is set to *none*, no ordering of accumulate + operations guaranteed. They key can also be a comma-separated list of + required orderings consisting of *rar*, *war*, *raw*, and *waw* for + read-after-read, write-after-read, read-after-write, and + write-after-write, respectively. Looser ordering constraints are + likely to result in improved performance. + +accumulate_ops + If set to *same_op*, the implementation will assume that all + concurrent accumulate calls to the same target address will use the + same operation. If set to *same_op_no_op*, then the implementation + will assume that all concurrent accumulate calls to the same target + address will use the same operation or MPI_NO_OP. The default is + *same_op_no_op*. + + +NOTES +----- + +Since dynamically attaching memory to a window is a local operation, one +has to communicate the actual address at the target using +:ref:`MPI_Get_address` and some communication. + +Dynamic memory does not have any *disp_unit* associated and requires +correct offset calculations with proper type handling. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. + + +.. seealso:: + :ref:`MPI_Win_attach` :ref:`MPI_Win_detach` :ref:`MPI_Get_address` diff --git a/docs/man-openmpi/man3/MPI_Win_create_errhandler.3.rst b/docs/man-openmpi/man3/MPI_Win_create_errhandler.3.rst new file mode 100644 index 00000000000..98d4c9d2a3e --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Win_create_errhandler.3.rst @@ -0,0 +1,101 @@ +.. _mpi_win_create_errhandler: + + +MPI_Win_create_errhandler +========================= + +.. include_body + +:ref:`MPI_Win_create_errhandler` - Creates an error handler for a window. + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Win_create_errhandler(MPI_Win_errhandler_function *function, + MPI_Errhandler *errhandler) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_WIN_CREATE_ERRHANDLER(FUNCTION, ERRHANDLER, IERROR) + EXTERNAL FUNCTION + INTEGER ERRHANDLER, IERROR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Win_create_errhandler(win_errhandler_fn, errhandler, ierror) + PROCEDURE(MPI_Win_errhandler_function) :: win_errhandler_fn + TYPE(MPI_Errhandler), INTENT(OUT) :: errhandler + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +DEPRECATED TYPE NAME NOTE +------------------------- + +MPI-2.2 deprecated the MPI_Win_errhandler_fn and MPI::Win::Errhandler_fn +types in favor of MPI_Win_errhandler_function and +MPI::Win::Errhandler_function, respectively. Open MPI supports both +names (indeed, the \_fn names are typedefs to the \_function names). + + +INPUT PARAMETER +--------------- +* ``function``: User-defined error-handling procedure (function). + +OUTPUT PARAMETERS +----------------- +* ``errhandler``: MPI error handler (handle). +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +:ref:`MPI_Win_create_errhandler` should be, in C, a function of type +MPI_Win_errhandler_function, which is defined as + +:: + + typedef void MPI_Win_errhandler_function(MPI Win *, int *, ...); + +The first argument is the window in use, the second is the error code to +be returned. + +In Fortran, the user routine should be of the form: + +.. code-block:: fortran + + SUBROUTINE WIN_ERRHANDLER_FUNCTION(WIN, ERROR_CODE, ...) + INTEGER WIN, ERROR_CODE + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Win_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. diff --git a/docs/man-openmpi/man3/MPI_Win_create_keyval.3.rst b/docs/man-openmpi/man3/MPI_Win_create_keyval.3.rst new file mode 100644 index 00000000000..0a210c757d1 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Win_create_keyval.3.rst @@ -0,0 +1,144 @@ +.. _mpi_win_create_keyval: + + +MPI_Win_create_keyval +===================== + +.. include_body + +:ref:`MPI_Win_create_keyval` - Creates a keyval for a window. + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Win_create_keyval(MPI_Win_copy_attr_function *win_copy_attr_fn, + MPI_Win_delete_attr_function *win_delete_attr_fn, + int *win_keyval, void *extra_state) + + +Fortran Syntax (see FORTRAN 77 NOTES) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_WIN_CREATE_KEYVAL(WIN_COPY_ATTR_FN, WIN_DELETE_ATTR_FN, + WIN_KEYVAL, EXTRA_STATE, IERROR) + EXTERNAL WIN_COPY_ATTR_FN, WIN_DELETE_ATTR_FN + INTEGER WIN_KEYVAL, IERROR + INTEGER(KIND=MPI_ADDRESS_KIND) EXTRA_STATE + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Win_create_keyval(win_copy_attr_fn, win_delete_attr_fn, win_keyval, + extra_state, ierror) + PROCEDURE(MPI_Win_copy_attr_function) :: win_copy_attr_fn + PROCEDURE(MPI_Win_delete_attr_function) :: win_delete_attr_fn + INTEGER, INTENT(OUT) :: win_keyval + INTEGER(KIND=MPI_ADDRESS_KIND), INTENT(IN) :: extra_state + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETERS +---------------- +* ``win_copy_attr_fn``: Copy callback function for *win_keyval* (function). +* ``win_delete_attr_fn``: Delete callback function for *win_keyval* (function). +* ``extra_state``: Extra state for callback functions. + +OUTPUT PARAMETERS +----------------- +* ``win_keyval``: Key value for future access (integer). +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +The argument *win_copy_attr_fn* may be specified as MPI_WIN_NULL_COPY_FN +or MPI_WIN_DUP_FN from either C or Fortran. MPI_WIN_NULL_COPY_FN is a +function that serves only to return *flag* = 0 and MPI_SUCCESS. +MPI_WIN_DUP_FN is a simple-minded copy function that sets *flag* = 1, +returns the value of *attribute_val_in* in *attribute_val_out*, and +returns MPI_SUCCESS. + +The argument *win_delete_attr_fn* may be specified as +MPI_WIN_NULL_DELETE_FN from either C or Fortran. MPI_WIN_NULL_DELETE_FN +is a function that serves only to return MPI_SUCCESS. + +The C callback functions are: + +.. code-block:: c + + typedef int MPI_Win_copy_attr_function(MPI_Win oldwin, int win_keyval, + void *extra_state, void *attribute_val_in, + void *attribute_val_out, int *flag); + +and + +:: + + typedef int MPI_Win_delete_attr_function(MPI_Win win, int win_keyval, + void *attribute_val, void *extra_state); + +The Fortran callback functions are: + +.. code-block:: fortran + + SUBROUTINE WIN_COPY_ATTR_FN(OLDWIN, WIN_KEYVAL, EXTRA_STATE, + ATTRIBUTE_VAL_IN, ATTRIBUTE_VAL_OUT, FLAG, IERROR) + INTEGER OLDWIN, WIN_KEYVAL, IERROR + INTEGER(KIND=MPI_ADDRESS_KIND) EXTRA_STATE, ATTRIBUTE_VAL_IN, + ATTRIBUTE_VAL_OUT + LOGICAL FLAG + +and + +:: + + SUBROUTINE WIN_DELETE_ATTR_FN(WIN, WIN_KEYVAL, ATTRIBUTE_VAL, + EXTRA_STATE, IERROR) + INTEGER WIN, WIN_KEYVAL, IERROR + INTEGER(KIND=MPI_ADDRESS_KIND) ATTRIBUTE_VAL, EXTRA_STATE + + +FORTRAN 77 NOTES +---------------- + +The MPI standard prescribes portable Fortran syntax for the +*EXTRA_STATE* argument only for Fortran 90. FORTRAN 77 users may use the +non-portable syntax + +:: + + INTEGER*MPI_ADDRESS_KIND EXTRA_STATE + +where MPI_ADDRESS_KIND is a constant defined in mpif.h and gives the +length of the declared integer in bytes. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. diff --git a/docs/man-openmpi/man3/MPI_Win_delete_attr.3.rst b/docs/man-openmpi/man3/MPI_Win_delete_attr.3.rst new file mode 100644 index 00000000000..4032a818c55 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Win_delete_attr.3.rst @@ -0,0 +1,82 @@ +.. _mpi_win_delete_attr: + + +MPI_Win_delete_attr +=================== + +.. include_body + +:ref:`MPI_Win_delete_attr` - Deletes an attribute from a window. + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Win_delete_attr(MPI_Win win, int win_keyval) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_WIN_DELETE_ATTR(WIN, WIN_KEYVAL, IERROR) + INTEGER WIN, WIN_KEYVAL, IERROR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Win_delete_attr(win, win_keyval, ierror) + TYPE(MPI_Win), INTENT(IN) :: win + INTEGER, INTENT(IN) :: win_keyval + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT/OUTPUT PARAMETER +---------------------- +* ``win``: Window from which the attribute is deleted (handle). + +INPUT PARAMETER +--------------- +* ``win_keyval``: Key value (integer). + +OUTPUT PARAMETER +---------------- +* ``IERROR``: Fortran only: Error status (integer). + +NOTES +----- + +Note that it is not defined by the MPI standard what happens if the +delete_fn callback invokes other MPI functions. In Open MPI, it is not +valid for delete_fn callbacks (or any of their children) to add or +delete attributes on the same object on which the delete_fn callback is +being invoked. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. diff --git a/docs/man-openmpi/man3/MPI_Win_detach.3.rst b/docs/man-openmpi/man3/MPI_Win_detach.3.rst new file mode 100644 index 00000000000..2add9b3a7bb --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Win_detach.3.rst @@ -0,0 +1,9 @@ +.. _mpi_win_detach: + +MPI_Win_detach +============== + .. include_body + +.. include:: ../man3/MPI_Win_attach.3.rst + :start-after: .. include_body + diff --git a/docs/man-openmpi/man3/MPI_Win_f2c.3.rst b/docs/man-openmpi/man3/MPI_Win_f2c.3.rst new file mode 100644 index 00000000000..7b17a3ce163 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Win_f2c.3.rst @@ -0,0 +1,9 @@ +.. _mpi_win_f2c: + +MPI_Win_f2c +=========== + .. include_body + +.. include:: ../man3/MPI_Comm_f2c.3.rst + :start-after: .. include_body + diff --git a/docs/man-openmpi/man3/MPI_Win_fence.3.rst b/docs/man-openmpi/man3/MPI_Win_fence.3.rst new file mode 100644 index 00000000000..3630dff201c --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Win_fence.3.rst @@ -0,0 +1,128 @@ +.. _mpi_win_fence: + + +MPI_Win_fence +============= + +.. include_body + +:ref:`MPI_Win_fence` - Synchronizes RMA calls on a window. + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Win_fence(int assert, MPI_Win win) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_WIN_FENCE(ASSERT, WIN, IERROR) + INTEGER ASSERT, WIN, IERROR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Win_fence(assert, win, ierror) + INTEGER, INTENT(IN) :: assert + TYPE(MPI_Win), INTENT(IN) :: win + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETERS +---------------- +* ``assert``: Program assertion (integer). +* ``win``: Window object (handle). + +OUTPUT PARAMETER +---------------- +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +:ref:`MPI_Win_fence` synchronizes RMA calls on *win*. The call is collective on +the group of *win*. All RMA operations on *win* originating at a given +process and started before the fence call will complete at that process +before the fence call returns. They will be completed at their target +before the fence call returns at the target. RMA operations on *win* +started by a process after the fence call returns will access their +target window only after :ref:`MPI_Win_fence` has been called by the target +process. + +The call completes an RMA access epoch if it was preceded by another +fence call and the local process issued RMA communication calls on *win* +between these two calls. The call completes an RMA exposure epoch if it +was preceded by another fence call and the local window was the target +of RMA accesses between these two calls. The call starts an RMA access +epoch if it is followed by another fence call and by RMA communication +calls issued between these two fence calls. The call starts an exposure +epoch if it is followed by another fence call and the local window is +the target of RMA accesses between these two fence calls. Thus, the +fence call is equivalent to calls to a subset of *post, start, complete, +wait*. + +The *assert* argument is used to provide assertions on the context of + +the call that may be used for various optimizations. A value of *assert* +^ 0 is always valid. The following assertion value is supported: + +MPI_MODE_NOPRECEDE + No local RMA calls have been issued before this fence. This assertion + must be provided by all or no members of the group of the window. It + may enable faster fence call by avoiding unnecessary synchronization. + +MPI_MODE_NOSTORE + Informs that the local window was not updated by local stores or get + calls in the preceding epoch. + +MPI_MODE_NOPUT + Informs that the local window will not be updated by any put or + accummulate calls in the ensuing epoch (until next fence call). + +MPI_MODE_NOSUCCEED + No local RMA calls will be issued after this fence. This assertion + must be provided by all or no members of the group of the window. It + may enable faster fence call by avoiding unnecessary synchronization. + + +NOTE +---- + +Calls to :ref:`MPI_Win_fence` should both precede and follow calls to put, get +or accumulate that are synchronized with fence calls. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. + + +.. seealso:: + :ref:`MPI_Win_create` :ref:`MPI_Win_start` :ref:`MPI_Win_post` :ref:`MPI_Win_complete` :ref:`MPI_Win_wait` diff --git a/docs/man-openmpi/man3/MPI_Win_flush.3.rst b/docs/man-openmpi/man3/MPI_Win_flush.3.rst new file mode 100644 index 00000000000..e6ac436b237 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Win_flush.3.rst @@ -0,0 +1,96 @@ +.. _mpi_win_flush: + + +MPI_Win_flush +============= + +.. include_body + +:ref:`MPI_Win_flush`, :ref:`MPI_Win_flush_all` - Complete all outstanding RMA +operations at both the origin and the target + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Win_flush (int rank, MPI_Win win) + + int MPI_Win_flush_all (MPI_Win win) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_WIN_FLUSH(RANK, WIN, IERROR) + INTEGER RANK, WIN, IERROR + + MPI_WIN_FLUSH_ALL(WIN, IERROR) + INTEGER WIN, IERROR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Win_flush(rank, win, ierror) + INTEGER, INTENT(IN) :: rank + TYPE(MPI_Win), INTENT(IN) :: win + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + MPI_Win_flush_all(win, ierror) + TYPE(MPI_Win), INTENT(IN) :: win + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETERS +---------------- +* ``rank``: Rank of window (nonnegative integer). +* ``win``: Window object (handle). + +OUTPUT PARAMETER +---------------- +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +:ref:`MPI_Win_flush` completes all outstanding RMA operations initiated by +the calling process to the target rank on the specified window. The +operations are completed both at the origin and at the target. +:ref:`MPI_Win_flush_all` completes all outstanding RMA operations to all +targets. + +Can only be called from within a passive target epoch. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler +MPI_ERRORS_RETURN may be used to cause error values to be returned. Note +that MPI does not guarantee that an MPI program can continue past an +error. + + +.. seealso:: + :ref:`MPI_Win_flush_local` :ref:`MPI_Win_lock` :ref:`MPI_Win_lock_all` diff --git a/docs/man-openmpi/man3/MPI_Win_flush_all.3.rst b/docs/man-openmpi/man3/MPI_Win_flush_all.3.rst new file mode 100644 index 00000000000..9e673e621c3 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Win_flush_all.3.rst @@ -0,0 +1,9 @@ +.. _mpi_win_flush_all: + +MPI_Win_flush_all +================= + .. include_body + +.. include:: ../man3/MPI_Win_flush.3.rst + :start-after: .. include_body + diff --git a/docs/man-openmpi/man3/MPI_Win_flush_local.3.rst b/docs/man-openmpi/man3/MPI_Win_flush_local.3.rst new file mode 100644 index 00000000000..ec711b502a8 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Win_flush_local.3.rst @@ -0,0 +1,97 @@ +.. _mpi_win_flush_local: + + +MPI_Win_flush_local +=================== + +.. include_body + +:ref:`MPI_Win_flush_local`, :ref:`MPI_Win_flush_local_all` - Complete all +outstanding RMA operations at both the origin + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Win_flush_local (int rank, MPI_Win win) + + int MPI_Win_flush_local_all (MPI_Win win) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_WIN_FLUSH_LOCAL(RANK, WIN, IERROR) + INTEGER RANK, WIN, IERROR + + MPI_WIN_FLUSH_LOCAL_ALL(WIN, IERROR) + INTEGER WIN, IERROR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Win_flush_local(rank, win, ierror) + INTEGER, INTENT(IN) :: rank + TYPE(MPI_Win), INTENT(IN) :: win + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + MPI_Win_flush_local_all(win, ierror) + TYPE(MPI_Win), INTENT(IN) :: win + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETERS +---------------- +* ``rank``: Rank of window (nonnegative integer). +* ``win``: Window object (handle). + +OUTPUT PARAMETER +---------------- +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +:ref:`MPI_Win_flush_local` locally completes at the origin all outstanding +RMA operations initiated by the calling process to the target process +specified by rank on the specified window. For example, after this +routine completes, the user may reuse any buffers provided to put, get, +or accumulate operations. :ref:`MPI_Win_flush_local_all` locally completes +at the origin all outstanding RMA operations to all targets. + +Can only be called from within a passive target epoch. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler +MPI_ERRORS_RETURN may be used to cause error values to be returned. Note +that MPI does not guarantee that an MPI program can continue past an +error. + + +.. seealso:: + :ref:`MPI_Win_flush` :ref:`MPI_Win_lock` :ref:`MPI_Win_lock_all` diff --git a/docs/man-openmpi/man3/MPI_Win_flush_local_all.3.rst b/docs/man-openmpi/man3/MPI_Win_flush_local_all.3.rst new file mode 100644 index 00000000000..2729eeb21e8 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Win_flush_local_all.3.rst @@ -0,0 +1,9 @@ +.. _mpi_win_flush_local_all: + +MPI_Win_flush_local_all +======================= + .. include_body + +.. include:: ../man3/MPI_Win_flush_local.3.rst + :start-after: .. include_body + diff --git a/docs/man-openmpi/man3/MPI_Win_free.3.rst b/docs/man-openmpi/man3/MPI_Win_free.3.rst new file mode 100644 index 00000000000..24e945b4e72 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Win_free.3.rst @@ -0,0 +1,91 @@ +.. _mpi_win_free: + + +MPI_Win_free +============ + +.. include_body + +:ref:`MPI_Win_free` - Frees the window object and returns a null handle. + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Win_free(MPI_Win *win) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_WIN_FREE(WIN, IERROR) + INTEGER WIN, IERROR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Win_free(win, ierror) + TYPE(MPI_Win), INTENT(INOUT) :: win + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT/OUTPUT PARAMETER +---------------------- +* ``win``: Window object (handle). + +OUTPUT PARAMETER +---------------- +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +:ref:`MPI_Win_free` frees the window object *win* and returns a null handle +(equal to MPI_WIN_NULL). This collective call is executed by all +processes in the group associated with *win*. It can be invoked by a +process only after it has completed its involvement in RMA +communications on window *win*, that is, the process has called +:ref:`MPI_Win_fence`, or called :ref:`MPI_Win_unlock` to match a previous call to +:ref:`MPI_Win_lock`. When the call returns, the window memory can be freed. + + +NOTES +----- + +If the window was created through :ref:`MPI_Win_allocate` or +:ref:`MPI_Win_allocate_shared` then the memory buffer allocated in that +call will be freed when calling :ref:`MPI_Win_free`. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. + + +.. seealso:: + :ref:`MPI_Win_create` :ref:`MPI_Win_allocate` :ref:`MPI_Win_allocate_shared` diff --git a/docs/man-openmpi/man3/MPI_Win_free_keyval.3.rst b/docs/man-openmpi/man3/MPI_Win_free_keyval.3.rst new file mode 100644 index 00000000000..df0bf95e475 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Win_free_keyval.3.rst @@ -0,0 +1,67 @@ +.. _mpi_win_free_keyval: + + +MPI_Win_free_keyval +=================== + +.. include_body + +:ref:`MPI_Win_free_keyval` - Frees a window keyval. + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Win_free_keyval(int *win_keyval) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_WIN_FREE_KEYVAL(WIN_KEYVAL, IERROR) + INTEGER WIN_KEYVAL, IERROR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Win_free_keyval(win_keyval, ierror) + INTEGER, INTENT(INOUT) :: win_keyval + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT/OUTPUT PARAMETER +---------------------- +* ``win_keyval``: Key value (integer). + +OUTPUT PARAMETER +---------------- +* ``IERROR``: Fortran only: Error status (integer). + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. diff --git a/docs/man-openmpi/man3/MPI_Win_get_attr.3.rst b/docs/man-openmpi/man3/MPI_Win_get_attr.3.rst new file mode 100644 index 00000000000..ca4eacb7eba --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Win_get_attr.3.rst @@ -0,0 +1,97 @@ +.. _mpi_win_get_attr: + + +MPI_Win_get_attr +================ + +.. include_body + +:ref:`MPI_Win_get_attr` - Obtains the value of a window attribute. + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Win_get_attr(MPI_Win win, int win_keyval, + void *attribute_val, int *flag) + + +Fortran Syntax (see FORTRAN 77 NOTES) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_WIN_GET_ATTR(WIN, WIN_KEYVAL, ATTRIBUTE_VAL, FLAG, IERROR) + INTEGER WIN, WIN_KEYVAL, IERROR + INTEGER(KIND=MPI_ADDRESS_KIND) ATTRIBUTE_VAL + LOGICAL FLAG + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Win_get_attr(win, win_keyval, attribute_val, flag, ierror) + TYPE(MPI_Win), INTENT(IN) :: win + INTEGER, INTENT(IN) :: win_keyval + INTEGER(KIND=MPI_ADDRESS_KIND), INTENT(OUT) :: attribute_val + LOGICAL, INTENT(OUT) :: flag + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETERS +---------------- +* ``win``: Window to which the attribute is attached (handle). +* ``win_keyval``: Key value (integer). + +OUTPUT PARAMETERS +----------------- +* ``attribute_val``: Attribute value, unless *ag* = false +* ``flag``: False if no attribute is associated with the key (logical). +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +Obtains the value of a window attribute. + + +FORTRAN 77 NOTES +---------------- + +The MPI standard prescribes portable Fortran syntax for the +*ATTRIBUTE_VAL* argument only for Fortran 90. FORTRAN 77 users may use +the non-portable syntax + +:: + + INTEGER*MPI_ADDRESS_KIND ATTRIBUTE_VAL + +where MPI_ADDRESS_KIND is a constant defined in mpif.h and gives the +length of the declared integer in bytes. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. diff --git a/docs/man-openmpi/man3/MPI_Win_get_errhandler.3.rst b/docs/man-openmpi/man3/MPI_Win_get_errhandler.3.rst new file mode 100644 index 00000000000..50676923c48 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Win_get_errhandler.3.rst @@ -0,0 +1,77 @@ +.. _mpi_win_get_errhandler: + + +MPI_Win_get_errhandler +====================== + +.. include_body + +:ref:`MPI_Win_get_errhandler` - Retrieves the error handler currently +associated with a window. + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Win_get_errhandler(MPI_Win win, MPI_Errhandler *errhandler) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_WIN_GET_ERRHANDLER(WIN, ERRHANDLER, IERROR) + INTEGER WIN, ERRHANDLER, IERROR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Win_get_errhandler(win, errhandler, ierror) + TYPE(MPI_Win), INTENT(IN) :: win + TYPE(MPI_Errhandler), INTENT(OUT) :: errhandler + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETER +--------------- +* ``win``: Window (handle). + +OUTPUT PARAMETERS +----------------- +* ``errhandler``: Error handler currently associated with window (handle). +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +:ref:`MPI_Win_get_errhandler` retrieves the error handler currently associated +with a window. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. diff --git a/docs/man-openmpi/man3/MPI_Win_get_group.3.rst b/docs/man-openmpi/man3/MPI_Win_get_group.3.rst new file mode 100644 index 00000000000..0509ac708c1 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Win_get_group.3.rst @@ -0,0 +1,78 @@ +.. _mpi_win_get_group: + + +MPI_Win_get_group +================= + +.. include_body + +:ref:`MPI_Win_get_group` - Returns a duplicate of the group of the +communicator used to create the window. + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + MPI_Win_get_group(MPI_Win win, MPI_Group *group) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_WIN_GET_GROUP(WIN, GROUP, IERROR) + INTEGER WIN, GROUP, IERROR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Win_get_group(win, group, ierror) + TYPE(MPI_Win), INTENT(IN) :: win + TYPE(MPI_Group), INTENT(OUT) :: group + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETER +--------------- +* ``win``: Window object (handle). + +OUTPUT PARAMETERS +----------------- +* ``group``: Group of processes that share access to the window (handle). +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +:ref:`MPI_Win_get_group` returns a duplicate of the group of the communicator +used to create the window associated with *win*. The group is returned +in *group*. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. diff --git a/docs/man-openmpi/man3/MPI_Win_get_info.3.rst b/docs/man-openmpi/man3/MPI_Win_get_info.3.rst new file mode 100644 index 00000000000..14ab3481bf2 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Win_get_info.3.rst @@ -0,0 +1,84 @@ +.. _mpi_win_get_info: + + +MPI_Win_get_info +================ + +.. include_body + +:ref:`MPI_Win_get_info` - Retrieves active window info hints + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Win_get_info(MPI_Win win, MPI_Info *info_used) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_WIN_GET_INFO(WIN, INFO_USED, IERROR) + INTEGER WIN, INFO_USED, IERROR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Win_get_info(win, info_used, ierror) + TYPE(MPI_Win), INTENT(IN) :: win + TYPE(MPI_Info), INTENT(OUT) :: info_used + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETERS +---------------- +* ``win``: Window from which to receive active info hints + +OUTPUT PARAMETERS +----------------- +* ``info_used``: New info object returned with all active hints on this window. +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +:ref:`MPI_Win_get_info` returns a new info object containing the hints of the +window associated with *win*. The current setting of all hints actually +used by the system related to this window is returned in *info_used*. If +no such hints exist, a handle to a newly created info object is returned +that contains no key/value pair. The user is responsible for freeing +info_used via :ref:`MPI_Info_free`. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. + + +.. seealso:: + :ref:`MPI_Win_set_info` :ref:`MPI_Win_free` diff --git a/docs/man-openmpi/man3/MPI_Win_get_name.3.rst b/docs/man-openmpi/man3/MPI_Win_get_name.3.rst new file mode 100644 index 00000000000..118b0ff3de1 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Win_get_name.3.rst @@ -0,0 +1,76 @@ +.. _mpi_win_get_name: + + +MPI_Win_get_name +================ + +.. include_body + +:ref:`MPI_Win_get_name` - Obtains the name of a window. + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Win_get_name(MPI_Win win, char *win_name, int *resultlen) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_WIN_GET_NAME(WIN, WIN_NAME, RESULTLEN, IERROR) + INTEGER WIN, RESULTLEN, IERROR + CHARACTER*(*) WIN_NAME + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Win_get_name(win, win_name, resultlen, ierror) + TYPE(MPI_Win), INTENT(IN) :: win + CHARACTER(LEN=MPI_MAX_OBJECT_NAME), INTENT(OUT) :: win_name + INTEGER, INTENT(OUT) :: resultlen + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETER +--------------- +* ``win``: Window whose name is to be returned (handle). + +OUTPUT PARAMETERS +----------------- +* ``win_name``: the name previously stored on the window, or an empty string if no such name exists (string). +* ``resultlen``: Length of returned name (integer). +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. diff --git a/docs/man-openmpi/man3/MPI_Win_lock.3.rst b/docs/man-openmpi/man3/MPI_Win_lock.3.rst new file mode 100644 index 00000000000..e14d30b338d --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Win_lock.3.rst @@ -0,0 +1,112 @@ +.. _mpi_win_lock: + + +MPI_Win_lock +============ + +.. include_body + +:ref:`MPI_Win_lock` - Starts an RMA access epoch locking access to a +particular rank. + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Win_lock(int lock_type, int rank, int assert, MPI_Win win) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_WIN_LOCK(LOCK_TYPE, RANK, ASSERT, WIN, IERROR) + INTEGER LOCK_TYPE, RANK, ASSERT, WIN, IERROR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Win_lock(lock_type, rank, assert, win, ierror) + INTEGER, INTENT(IN) :: lock_type, rank, assert + TYPE(MPI_Win), INTENT(IN) :: win + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETERS +---------------- +* ``lock_type``: Either MPI_LOCK_EXCLUSIVE or MPI_LOCK_SHARED (state). +* ``rank``: Rank of locked window (nonnegative integer). +* ``assert``: Program assertion (integer). +* ``win``: Window object (handle). + +OUTPUT PARAMETER +---------------- +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +Starts an RMA access epoch. Locks ensure that only the windows created +by specific processes can be accessed by those processes (and by no +other processes) during that epoch. + +Locks are used to protect accesses to the locked target window effected +by RMA calls issued between the lock and unlock call, and to protect +local load/store accesses to a locked local window executed between the +lock and unlock call. Accesses that are protected by an exclusive lock +will not be concurrent at the window site with other accesses to the +same window that are lock protected. Accesses that are protected by a +shared lock will not be concurrent at the window site with accesses +protected by an exclusive lock to the same window. + +The *assert* argument is used to provide assertions on the context of +the call that may be used for various optimizations. (See Section 6.4.4 +of the MPI-2 Standard.) A value of *assert* = 0 is always valid. The +following assertion value is supported: + +MPI_MODE_NOCHECK + No other processes will hold or attempt to acquire a conflicting lock + while the caller holds the window lock. + + +NOTES +----- + +In a client/server environment in which clients connect to a server and +create windows that span both the client and the server, if a client or +server that has obtained a lock on such a window and then terminates +abnormally, the server or other clients may hang in a :ref:`MPI_Win_lock` call, +failing to notice that the peer MPI job has terminated. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. + + +.. seealso:: + :ref:`MPI_Win_unlock` :ref:`MPI_Win_lock_all` diff --git a/docs/man-openmpi/man3/MPI_Win_lock_all.3.rst b/docs/man-openmpi/man3/MPI_Win_lock_all.3.rst new file mode 100644 index 00000000000..9f16f5ea14d --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Win_lock_all.3.rst @@ -0,0 +1,113 @@ +.. _mpi_win_lock_all: + + +MPI_Win_lock_all +================ + +.. include_body + +:ref:`MPI_Win_lock_all` - Starts an RMA access epoch locking access to all +processes in the window + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Win_lock_all(int assert, MPI_Win win) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_WIN_LOCK_ALL(ASSERT, WIN, IERROR) + INTEGER ASSERT, WIN, IERROR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Win_lock_all(assert, win, ierror) + INTEGER, INTENT(IN) :: assert + TYPE(MPI_Win), INTENT(IN) :: win + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETERS +---------------- +* ``assert``: Program assertion (integer). +* ``win``: Window object (handle). + +OUTPUT PARAMETER +---------------- +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +Starts an RMA access epoch to all processes in *win*, with a lock type +of MPI_LOCK_SHARED. During the epoch, the calling process can access the +window memory on all processes in *win* by using RMA operations. A +window locked with :ref:`MPI_Win_lock_all` must be unlocked with +:ref:`MPI_Win_unlock_all`. This routine is not collective — the ALL refers to a +lock on all members of the group of the window. + +Locks are used to protect accesses to the locked target window effected +by RMA calls issued between the lock and unlock call, and to protect +local load/store accesses to a locked local window executed between the +lock and unlock call. Accesses that are protected by an exclusive lock +will not be concurrent at the window site with other accesses to the +same window that are lock protected. Accesses that are protected by a +shared lock will not be concurrent at the window site with accesses +protected by an exclusive lock to the same window. + +The *assert* argument is used to provide assertions on the context of +the call that may be used for various optimizations. (See Section 6.4.4 +of the MPI-2 Standard.) A value of *assert* = 0 is always valid. The +following assertion value is supported: + +MPI_MODE_NOCHECK + No other processes will hold or attempt to acquire a conflicting lock + while the caller holds the window lock. + + +NOTES +----- + +In a client/server environment in which clients connect to a server and +create windows that span both the client and the server, if a client or +server that has obtained a lock on such a window and then terminates +abnormally, the server or other clients may hang in a :ref:`MPI_Win_lock_all` +call, failing to notice that the peer MPI job has terminated. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. + + +.. seealso:: + :ref:`MPI_Win_unlock_all` :ref:`MPI_Win_lock` diff --git a/docs/man-openmpi/man3/MPI_Win_post.3.rst b/docs/man-openmpi/man3/MPI_Win_post.3.rst new file mode 100644 index 00000000000..049aeb59c86 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Win_post.3.rst @@ -0,0 +1,103 @@ +.. _mpi_win_post: + + +MPI_Win_post +============ + +.. include_body + +:ref:`MPI_Win_post` - Starts an RMA exposure epoch for the local window +associated with *win* + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Win_post(MPI_Group group, int assert, MPI_Win win) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_WIN_POST(GROUP, ASSERT, WIN, IERROR) + INTEGER GROUP, ASSERT, WIN, IERROR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Win_post(group, assert, win, ierror) + TYPE(MPI_Group), INTENT(IN) :: group + INTEGER, INTENT(IN) :: assert + TYPE(MPI_Win), INTENT(IN) :: win + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETERS +---------------- +* ``group``: The group of origin processes (handle) +* ``assert``: Program assertion (integer) +* ``win``: Window object (handle) + +OUTPUT PARAMETERS +----------------- +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +Starts an RMA exposure epoch for the local window associated with *win*. +Only the processes belonging to *group* should access the window with +RMA calls on *win* during this epoch. Each process in *group* must issue +a matching call to :ref:`MPI_Win_start`. :ref:`MPI_Win_post` does not block. + +The *assert* argument is used to provide assertions on the context of + +the call that may be used for various optimizations. A value of *assert* +^ 0 is always valid. The following assertion values are supported: + +MPI_MODE_NOCHECK + The matching calls to :ref:`MPI_Win_start` have not yet occurred on any + origin processes when this call is made. This assertion must be + present for all matching :ref:`MPI_Win_start` calls if used. + +MPI_MODE_NOSTORE + Informs that the local window was not updated by local stores or get + calls in the preceding epoch. + +MPI_MODE_NOPUT + Informs that the local window will not be updated by put or + accummulate calls until the ensuing wait synchronization. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Win_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. + + +.. seealso:: + :ref:`MPI_Win_start` :ref:`MPI_Win_wait` diff --git a/docs/man-openmpi/man3/MPI_Win_set_attr.3.rst b/docs/man-openmpi/man3/MPI_Win_set_attr.3.rst new file mode 100644 index 00000000000..6a25651a5a3 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Win_set_attr.3.rst @@ -0,0 +1,94 @@ +.. _mpi_win_set_attr: + + +MPI_Win_set_attr +================ + +.. include_body + +:ref:`MPI_Win_set_attr` - Sets the value of a window attribute. + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Win_set_attr(MPI_Win win, int win_keyval, void *attribute_val) + + +Fortran Syntax (see FORTRAN 77 NOTES) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_WIN_SET_ATTR(WIN, WIN_KEYVAL, ATTRIBUTE_VAL, IERROR) + INTEGER WIN, WIN_KEYVAL, IERROR + INTEGER(KIND=MPI_ADDRESS_KIND) ATTRIBUTE_VAL + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Win_set_attr(win, win_keyval, attribute_val, ierror) + TYPE(MPI_Win), INTENT(IN) :: win + INTEGER, INTENT(IN) :: win_keyval + INTEGER(KIND=MPI_ADDRESS_KIND), INTENT(IN) :: attribute_val + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT/OUTPUT PARAMETER +---------------------- +* ``win``: Window to which attribute will be attached (handle). + +INPUT PARAMETERS +---------------- +* ``win_keyval``: Key value (integer). +* ``attribute_val``: Attribute value. + +OUTPUT PARAMETER +---------------- +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + + +FORTRAN 77 NOTES +---------------- + +The MPI standard prescribes portable Fortran syntax for the +*ATTRIBUTE_VAL* argument only for Fortran 90. FORTRAN 77 users may use +the non-portable syntax + +:: + + INTEGER*MPI_ADDRESS_KIND ATTRIBUTE_VAL + +where MPI_ADDRESS_KIND is a constant defined in mpif.h and gives the +length of the declared integer in bytes. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. diff --git a/docs/man-openmpi/man3/MPI_Win_set_errhandler.3.rst b/docs/man-openmpi/man3/MPI_Win_set_errhandler.3.rst new file mode 100644 index 00000000000..35d8be91719 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Win_set_errhandler.3.rst @@ -0,0 +1,80 @@ +.. _mpi_win_set_errhandler: + + +MPI_Win_set_errhandler +====================== + +.. include_body + +:ref:`MPI_Win_set_errhandler` - Attaches a new error handler to a window. + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Win_set_errhandler(MPI_Win win, MPI_Errhandler errhandler) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_WIN_SET_ERRHANDLER(WIN, ERRHANDLER, IERROR) + INTEGER WIN, ERRHANDLER, IERROR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Win_set_errhandler(win, errhandler, ierror) + TYPE(MPI_Win), INTENT(IN) :: win + TYPE(MPI_Errhandler), INTENT(IN) :: errhandler + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT/OUTPUT PARAMETER +---------------------- +* ``win``: Window (handle). + +INPUT PARAMETER +--------------- +* ``errhandler``: New error handler for window (handle). + +OUTPUT PARAMETER +---------------- +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +:ref:`MPI_Win_set_errhandler` attaches a new error handler to a window. The +error handler must be either a predefined error handler or an error +handler created by a call to :ref:`MPI_Win_create_errhandler`. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. diff --git a/docs/man-openmpi/man3/MPI_Win_set_info.3.rst b/docs/man-openmpi/man3/MPI_Win_set_info.3.rst new file mode 100644 index 00000000000..612041bba3e --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Win_set_info.3.rst @@ -0,0 +1,83 @@ +.. _mpi_win_set_info: + + +MPI_Win_set_info +================ + +.. include_body + +:ref:`MPI_Win_set_info` - Set window info hints + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Win_set_info(MPI_Win win, MPI_Info info) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_WIN_SET_INFO(WIN, INFO, IERROR) + INTEGER WIN, INFO, IERROR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Win_set_info(win, info, ierror) + TYPE(MPI_Win), INTENT(IN) :: win + TYPE(MPI_Info), INTENT(IN) :: info + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETERS +---------------- +* ``win``: Window on which to set info hints +* ``info``: Info object containing hints to be set on *win* + +OUTPUT PARAMETERS +----------------- +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +:ref:`MPI_WIN_SET_INFO` sets new values for the hints of the window associated +with *win.* :ref:`MPI_WIN_SET_INFO` is a collective routine. The info object +may be different on each process, but any info entries that an +implementation requires to be the same on all processes must appear with +the same value in each process's *info* object. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. + + +.. seealso:: + :ref:`MPI_Win_get_info` :ref:`MPI_Info_create` :ref:`MPI_Info_set` :ref:`MPI_Info_free` diff --git a/docs/man-openmpi/man3/MPI_Win_set_name.3.rst b/docs/man-openmpi/man3/MPI_Win_set_name.3.rst new file mode 100644 index 00000000000..a8efa1e236d --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Win_set_name.3.rst @@ -0,0 +1,77 @@ +.. _mpi_win_set_name: + + +MPI_Win_set_name +================ + +.. include_body + +:ref:`MPI_Win_set_name` - Sets the name of a window. + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Win_set_name(MPI_Win win, const char *win_name) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_WIN_SET_NAME(WIN, WIN_NAME, IERROR) + INTEGER WIN, IERROR + CHARACTER*(*) WIN_NAME + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Win_set_name(win, win_name, ierror) + TYPE(MPI_Win), INTENT(IN) :: win + CHARACTER(LEN=*), INTENT(IN) :: win_name + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT/OUTPUT PARAMETER +---------------------- +* ``win``: Window whose identifier is to be set (handle). + +INPUT PARAMETER +--------------- +* ``win_name``: The character string used as the name (string). + +OUTPUT PARAMETER +---------------- +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. diff --git a/docs/man-openmpi/man3/MPI_Win_shared_query.3.rst b/docs/man-openmpi/man3/MPI_Win_shared_query.3.rst new file mode 100644 index 00000000000..a7b06cdc29a --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Win_shared_query.3.rst @@ -0,0 +1,110 @@ +.. _mpi_win_shared_query: + + +MPI_Win_shared_query +==================== + +.. include_body + +:ref:`MPI_Win_shared_query` - Query a shared memory window + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Win_shared_query (MPI_Win win, int rank, MPI_Aint *size, + int *disp_unit, void *baseptr) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_WIN_SHARED_QUERY(WIN, RANK, SIZE, DISP_UNIT, BASEPTR, IERROR) + INTEGER WIN, RANK, DISP_UNIT, IERROR + INTEGER(KIND=MPI_ADDRESS_KIND) SIZE, BASEPTR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Win_shared_query(win, rank, size, disp_unit, baseptr, ierror) + USE, INTRINSIC :: ISO_C_BINDING, ONLY : C_PTR + TYPE(MPI_Win), INTENT(IN) :: win + INTEGER, INTENT(IN) :: rank + INTEGER(KIND=MPI_ADDRESS_KIND), INTENT(OUT) :: size + INTEGER, INTENT(OUT) :: disp_unit + TYPE(C_PTR), INTENT(OUT) :: baseptr + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETERS +---------------- +* ``win``: Shared memory window object (handle). +* ``rank``: Rank in the group of window *win* (non-negative integer) or MPI_PROC_NULL. + +OUTPUT PARAMETERS +----------------- +* ``size``: Size of the window segment (non-negative integer). +* ``disp_unit``: Local unit size for displacements, in bytes (positive integer). +* ``baseptr``: Address for load/store access to window segment (choice). +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +:ref:`MPI_Win_shared_query` queries the process-local address for +remote memory segments created with +:ref:`MPI_Win_allocate_shared`. This function can return different +process-local addresses for the same physical memory on different +processes. The returned memory can be used for load/store accesses +subject to the constraints defined in MPI-3.1 section 11.7. This +function can only be called with windows of flavor +MPI_WIN_FLAVOR_SHARED. If the passed window is not of flavor +MPI_WIN_FLAVOR_SHARED, the error MPI_ERR_RMA_FLAVOR is raised. When +rank is MPI_PROC_NULL, the *pointer*, *disp_unit*, and *size* returned +are the pointer, disp_unit, and size of the memory segment belonging +the lowest rank that specified *size* > 0. If all processes in the +group attached to the window specified *size* = 0, then the call +returns *size* = 0 and a *baseptr* as if :ref:`MPI_Alloc_mem` was +called with *size* = 0. + + +C NOTES +------- + +The parameter *baseptr* is of type *void \** to allow passing any +pointer object for this parameter. The provided argument should be a +pointer to a pointer of arbitrary type (e.g. *void \*\**). + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. + + +.. seealso:: + :ref:`MPI_Alloc_mem` :ref:`MPI_Win_allocate_shared` diff --git a/docs/man-openmpi/man3/MPI_Win_start.3.rst b/docs/man-openmpi/man3/MPI_Win_start.3.rst new file mode 100644 index 00000000000..bdf73419ebb --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Win_start.3.rst @@ -0,0 +1,96 @@ +.. _mpi_win_start: + + +MPI_Win_start +============= + +.. include_body + +:ref:`MPI_Win_start` - Starts an RMA access epoch for *win* + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Win_start(MPI_Group group, int assert, MPI_Win win) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_WIN_START(GROUP, ASSERT, WIN, IERROR) + INTEGER GROUP, ASSERT, WIN, IERROR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Win_start(group, assert, win, ierror) + TYPE(MPI_Group), INTENT(IN) :: group + INTEGER, INTENT(IN) :: assert + TYPE(MPI_Win), INTENT(IN) :: win + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETERS +---------------- +* ``group``: The group of target processes (handle). +* ``assert``: Program assertion (integer). +* ``win``: Window object (handle). + +OUTPUT PARAMETERS +----------------- +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +:ref:`MPI_Win_start` is a one-sided MPI communication synchronization call that +starts an RMA access epoch for *win*. RMA calls issued on *win* during +this epoch must access only windows at processes in *group*. Each +process in *group* must issue a matching call to :ref:`MPI_Win_post`. +:ref:`MPI_Win_start` is allowed to block until the corresponding :ref:`MPI_Win_post` +calls have been executed, but is not required to. + +The *assert* argument is used to provide assertions on the context of +the call that may be used for various optimizations. (See Section 6.4.4 +of the MPI-2 Standard.) A value of *assert* = 0 is always valid. The +following assertion value is supported: + +MPI_MODE_NOCHECK + When this value is passed in to this call, the library assumes that + the post call on the target has been called and it is not necessary + for the library to check to see if such a call has been made. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Win_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. + + +.. seealso:: + :ref:`MPI_Win_post` :ref:`MPI_Win_complete` diff --git a/docs/man-openmpi/man3/MPI_Win_sync.3.rst b/docs/man-openmpi/man3/MPI_Win_sync.3.rst new file mode 100644 index 00000000000..7b9079d67b5 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Win_sync.3.rst @@ -0,0 +1,79 @@ +.. _mpi_win_sync: + + +MPI_Win_sync +============ + +.. include_body + +:ref:`MPI_Win_sync`, - Synchronize the private and public copies of the +window + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Win_sync (MPI_Win win) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_WIN_SYNC(WIN, IERROR) + INTEGER WIN, IERROR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Win_sync(win, ierror) + TYPE(MPI_Win), INTENT(IN) :: win + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETERS +---------------- +* ``win``: Window object (handle). + +OUTPUT PARAMETER +---------------- +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +:ref:`MPI_Win_sync` synchronizes the private and public window copies of +*win*. For the purposes of synchronizing the private and public window, +:ref:`MPI_Win_sync` has the effect of ending and reopening an access and +exposure epoch on the window (note that it does not actually end an +epoch or complete any pending MPI RMA operations). + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler +MPI_ERRORS_RETURN may be used to cause error values to be returned. Note +that MPI does not guarantee that an MPI program can continue past an +error. diff --git a/docs/man-openmpi/man3/MPI_Win_test.3.rst b/docs/man-openmpi/man3/MPI_Win_test.3.rst new file mode 100644 index 00000000000..21dd3d0781f --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Win_test.3.rst @@ -0,0 +1,89 @@ +.. _mpi_win_test: + + +MPI_Win_test +============ + +.. include_body + +:ref:`MPI_Win_test` - Attempts to complete an RMA exposure epoch; a +nonblocking version of :ref:`MPI_Win_wait` + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Win_test(MPI_Win win, int *flag) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_WIN_TEST( WIN, FLAG, IERROR) + INTEGER WIN, IERROR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Win_test(win, flag, ierror) + TYPE(MPI_Win), INTENT(IN) :: win + LOGICAL, INTENT(OUT) :: flag + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETERS +---------------- +* ``win``: Window object (handle) + +OUTPUT PARAMETERS +----------------- +* ``IERROR``: Fortran only: Error status (integer). +* ``flag``: The returning state of the test for epoch closure. + +DESCRIPTION +----------- + +:ref:`MPI_Win_test` is a one-sided MPI communication synchronization call, a +nonblocking version of :ref:`MPI_Win_wait`. It returns *flag = true* if +:ref:`MPI_Win_wait` would return, *flag = false* otherwise. The effect of +return of :ref:`MPI_Win_test` with *flag = true* is the same as the effect of a +return of :ref:`MPI_Win_wait`. If *flag = false* is returned, then the call has +no visible effect. + +Invoke :ref:`MPI_Win_test` only where :ref:`MPI_Win_wait` can be invoked. Once the +call has returned *flag = true*, it must not be invoked anew, until the +window is posted anew. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Win_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. + + +.. seealso:: + :ref:`MPI_Win_post` :ref:`MPI_Win_wait` diff --git a/docs/man-openmpi/man3/MPI_Win_unlock.3.rst b/docs/man-openmpi/man3/MPI_Win_unlock.3.rst new file mode 100644 index 00000000000..c80b91b0a38 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Win_unlock.3.rst @@ -0,0 +1,91 @@ +.. _mpi_win_unlock: + + +MPI_Win_unlock +============== + +.. include_body + +:ref:`MPI_Win_unlock` - Completes an RMA access epoch started by a call to +:ref:`MPI_Win_lock`. + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Win_unlock(int rank, MPI_Win win) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_WIN_UNLOCK(RANK, WIN, IERROR) + INTEGER RANK, WIN, IERROR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Win_unlock(rank, win, ierror) + INTEGER, INTENT(IN) :: rank + TYPE(MPI_Win), INTENT(IN) :: win + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETERS +---------------- +* ``rank``: Rank of window (nonnegative integer). +* ``win``: Window object (handle). + +OUTPUT PARAMETER +---------------- +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +:ref:`MPI_Win_unlock` completes an RMA access epoch started by a call to +:ref:`MPI_Win_lock`. RMA operations issued during this period will have +completed both at the origin and at the target when the call returns. + +Locks are used to protect accesses to the locked target window effected +by RMA calls issued between the lock and unlock call, and to protect +local load/store accesses to a locked local window executed between the +lock and unlock call. Accesses that are protected by an exclusive lock +will not be concurrent at the window site with other accesses to the +same window that are lock protected. Accesses that are protected by a +shared lock will not be concurrent at the window site with accesses +protected by an exclusive lock to the same window. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. + + +.. seealso:: + :ref:`MPI_Win_lock` :ref:`MPI_Win_unlock_all` diff --git a/docs/man-openmpi/man3/MPI_Win_unlock_all.3.rst b/docs/man-openmpi/man3/MPI_Win_unlock_all.3.rst new file mode 100644 index 00000000000..b199fb40c06 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Win_unlock_all.3.rst @@ -0,0 +1,89 @@ +.. _mpi_win_unlock_all: + + +MPI_Win_unlock_all +================== + +.. include_body + +:ref:`MPI_Win_unlock_all` - Completes an RMA access epoch started by a call +to :ref:`MPI_Win_lock_all`. + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Win_unlock_all(MPI_Win win) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_WIN_UNLOCK_ALL(WIN, IERROR) + INTEGER WIN, IERROR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Win_unlock_all(win, ierror) + TYPE(MPI_Win), INTENT(IN) :: win + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETERS +---------------- +* ``win``: Window object (handle). + +OUTPUT PARAMETER +---------------- +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +:ref:`MPI_Win_unlock_all` completes an RMA access epoch started by a call to +:ref:`MPI_Win_lock_all`. RMA operations issued during this period will have +completed both at the origin and at the target when the call returns. + +Locks are used to protect accesses to the locked target window effected +by RMA calls issued between the lock and unlock call, and to protect +local load/store accesses to a locked local window executed between the +lock and unlock call. Accesses that are protected by an exclusive lock +will not be concurrent at the window site with other accesses to the +same window that are lock protected. Accesses that are protected by a +shared lock will not be concurrent at the window site with accesses +protected by an exclusive lock to the same window. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. + + +.. seealso:: + :ref:`MPI_Win_lock_all` :ref:`MPI_Win_unlock` diff --git a/docs/man-openmpi/man3/MPI_Win_wait.3.rst b/docs/man-openmpi/man3/MPI_Win_wait.3.rst new file mode 100644 index 00000000000..aec955bffa2 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Win_wait.3.rst @@ -0,0 +1,86 @@ +.. _mpi_win_wait: + + +MPI_Win_wait +============ + +.. include_body + +:ref:`MPI_Win_wait` - Completes an RMA exposure epoch started by a call to +:ref:`MPI_Win_post` on *win* + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + int MPI_Win_wait(MPI_Win win) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + MPI_WIN_WAIT( WIN, IERROR) + INTEGER WIN, IERROR + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + MPI_Win_wait(win, ierror) + TYPE(MPI_Win), INTENT(IN) :: win + INTEGER, OPTIONAL, INTENT(OUT) :: ierror + + +INPUT PARAMETERS +---------------- +* ``win``: Window object (handle). + +OUTPUT PARAMETERS +----------------- +* ``IERROR``: Fortran only: Error status (integer). + +DESCRIPTION +----------- + +:ref:`MPI_Win_wait` is a one-sided MPI communication synchronization call that +completes an RMA exposure epoch started by a call to :ref:`MPI_Win_post` on +*win*. This call matches calls to MPI_Win_complete(win) issued by each +of the processes that were granted access to the window during this +epoch. The call to :ref:`MPI_Win_wait` blocks until all matching calls to +:ref:`MPI_Win_complete` have occurred. This guarantees that all these origin +processes have completed their RMA accesses to the local window. When +the call returns, all these RMA accesses will have completed at the +target window. + + +ERRORS +------ + +Almost all MPI routines return an error value; C routines as the value +of the function and Fortran routines in the last argument. + +Before the error value is returned, the current MPI error handler is +called. By default, this error handler aborts the MPI job, except for +I/O function errors. The error handler may be changed with +:ref:`MPI_Win_set_errhandler`; the predefined error handler MPI_ERRORS_RETURN +may be used to cause error values to be returned. Note that MPI does not +guarantee that an MPI program can continue past an error. + + +.. seealso:: + :ref:`MPI_Win_post` diff --git a/docs/man-openmpi/man3/MPI_Wtick.3.rst b/docs/man-openmpi/man3/MPI_Wtick.3.rst new file mode 100644 index 00000000000..1d8b421af1d --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Wtick.3.rst @@ -0,0 +1,69 @@ +.. _mpi_wtick: + + +MPI_Wtick +========= + +.. include_body + +:ref:`MPI_Wtick` - Returns the resolution of :ref:`MPI_Wtime`. + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + double MPI_Wtick() + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + DOUBLE PRECISION MPI_WTICK() + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + DOUBLE PRECISION MPI_WTICK() + + +RETURN VALUE +------------ + +Time in seconds of resolution of :ref:`MPI_Wtime`. + + +DESCRIPTION +----------- + +:ref:`MPI_Wtick` returns the resolution of :ref:`MPI_Wtime` in seconds. That is, it +returns, as a double-precision value, the number of seconds between +successive clock ticks. For example, if the clock is implemented by the +hardware as a counter that is incremented every millisecond, the value +returned by :ref:`MPI_Wtick` should be 10^-3. + + +NOTE +---- + +This function does not return an error value. Consequently, the result +of calling it before :ref:`MPI_Init` or after :ref:`MPI_Finalize` is undefined. + + +.. seealso:: + :ref:`MPI_Wtime` diff --git a/docs/man-openmpi/man3/MPI_Wtime.3.rst b/docs/man-openmpi/man3/MPI_Wtime.3.rst new file mode 100644 index 00000000000..0b6bc535c64 --- /dev/null +++ b/docs/man-openmpi/man3/MPI_Wtime.3.rst @@ -0,0 +1,115 @@ +.. _mpi_wtime: + + +MPI_Wtime +========= + +.. include_body + +:ref:`MPI_Wtime` - Returns an elapsed time on the calling processor. + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + + double MPI_Wtime() + + +Fortran Syntax +^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE MPI + ! or the older form: INCLUDE 'mpif.h' + DOUBLE PRECISION MPI_WTIME() + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: fortran + + USE mpi_f08 + DOUBLE PRECISION MPI_WTIME() + + +RETURN VALUE +------------ + +Time in seconds since an arbitrary time in the past. + + +DESCRIPTION +----------- + +:ref:`MPI_Wtime` returns a floating-point number of seconds, representing +elapsed wall-clock time since some time in the past. + +The "time in the past" is guaranteed not to change during the life of +the process. The user is responsible for converting large numbers of +seconds to other units if they are preferred. + +This function is portable (it returns seconds, not "ticks"), it allows +high resolution, and carries no unnecessary baggage. One would use it +like this: + +:: + + { + double starttime, endtime; + starttime = MPI_Wtime(); + .... stuff to be timed ... + endtime = MPI_Wtime(); + printf("That took %f seconds\n",endtime-starttime); + } + +The times returned are local to the node that called them. There is no +requirement that different nodes return the "same" time. + + +NOTES +----- + +The boolean variable MPI_WTIME_IS_GLOBAL, a predefined attribute key +that indicates whether clocks are synchronized, does not have a valid +value in Open MPI, as the clocks are not guaranteed to be synchronized. + +This function is intended to be a high-resolution, elapsed (or wall) +clock. See :ref:`MPI_Wtick` to determine the resolution of :ref:`MPI_Wtime`. + +On POSIX platforms, this function may utilize a timer that is cheaper to +invoke than the gettimeofday() system call, but will fall back to +gettimeofday() if a cheap high-resolution timer is not available. The +ompi_info command can be consulted to see if Open MPI supports a native +high-resolution timer on your platform; see the value for ":ref:`MPI_WTIME` +support" (or "options:mpi-wtime" when viewing the parsable output). If +this value is "native", a method that is likely to be cheaper than +gettimeofday() will be used to obtain the time when :ref:`MPI_Wtime` is +invoked. + +For example, on platforms that support it, the *clock_gettime()* +function will be used to obtain a monotonic clock value with whatever +precision is supported on that platform (e.g., nanoseconds). + +Note, too, that the MCA parameter opal_timer_require_monotonic can +influcence this behavior. It defaults to true, but if set to false, Open +MPI may use a finer-grained timing mechanism (e.g., the RDTSC/RDTSCP +clock ticks on x86_64 platforms), but is not guaranteed to be monotonic +in some cases (e.g., if the MPI process is not bound to a single +processor core). + +This function does not return an error value. Consequently, the result +of calling it before :ref:`MPI_Init` or after :ref:`MPI_Finalize` is undefined. + + +.. seealso:: + :ref:`MPI_Wtick` diff --git a/docs/man-openmpi/man3/OMPI_Affinity_str.3.rst b/docs/man-openmpi/man3/OMPI_Affinity_str.3.rst new file mode 100644 index 00000000000..5728c7ee2a0 --- /dev/null +++ b/docs/man-openmpi/man3/OMPI_Affinity_str.3.rst @@ -0,0 +1,189 @@ +.. _ompi_affinity_str: + + +OMPI_Affinity_str +================= + +.. include_body + +**OMPI_Affinity_str** - Obtain prettyprint strings of processor affinity +information for this process + + +SYNTAX +------ + + +C Syntax +^^^^^^^^ + +.. code-block:: c + + #include + #include + + int OMPI_Affinity_str(ompi_affinity_fmt_type_t fmt_type, + char ompi_bound[OMPI_AFFINITY_STRING_MAX], + char current_binding[OMPI_AFFINITY_STRING_MAX], + char exists[OMPI_AFFINITY_STRING_MAX]) + + +Fortran Syntax +^^^^^^^^^^^^^^ + +There is no Fortran binding for this function. + + +Fortran 2008 Syntax +^^^^^^^^^^^^^^^^^^^ + +There is no Fortran 2008 binding for this function. + + +C++ Syntax +^^^^^^^^^^ + +There is no C++ binding for this function. + + +INPUT PARAMETERS +---------------- +* ``fmt_type``: An enum indicating how to format the returned ompi_bound and current_binding strings. OMPI_AFFINITY_RSRC_STRING_FMT returns the string as human-readable resource names, such as "socket 0, core 0". +* ``OMPI_AFFINITY_LAYOUT_FMT returns ASCII art representing where this MPI``: process is bound relative to the machine resource layout. For example "[. B][. .]" shows the process that called the routine is bound to socket 0, core 1 in a system with 2 sockets, each containing 2 cores. +* ``See below for more output examples.``: + +OUTPUT PARAMETERS +----------------- +* ``ompi_bound``: A prettyprint string describing what processor(s) Open MPI bound this process to, or a string indicating that Open MPI did not bind this process. +* ``current_binding``: A prettyprint string describing what processor(s) this process is currently bound to, or a string indicating that the process is bound to all available processors (and is therefore considered "unbound"). +* ``exists``: A prettyprint string describing the available sockets and sockets on this host. + +DESCRIPTION +----------- + +Open MPI may bind a process to specific sockets and/or cores at process +launch time. This non-standard Open MPI function call returns +prettyprint information about three things: + +Where Open MPI bound this process. + The string returned in **ompi_bound** will either indicate that Open + MPI did not bind this process to anything, or it will contain a + prettyprint description of the processor(s) to which Open MPI bound + this process. + +Where this process is currently bound. + Regardless of whether Open MPI bound this process or not, another + entity may have bound it. The string returned in **current_binding** + will indicate what the *current* binding is of this process, + regardless of what Open MPI may have done earlier. The string + returned will either indicate that the process is unbound (meaning + that it is bound to all available processors) or it will contain a + prettyprint description of the sockets and cores to which the process + is currently bound. + +What processors exist. + As a convenience to the user, the **exists** string will contain a + prettyprint description of the sockets and cores that this process + can see (which is *usually* all processors in the system). + + +Examples +^^^^^^^^ + +**Example 1:** Print out processes binding using resource string format. + +:: + + int rank; + char ompi_bound[OMPI_AFFINITY_STRING_MAX]; + char current_binding[OMPI_AFFINITY_STRING_MAX]; + char exists[OMPI_AFFINITY_STRING_MAX]; + + MPI_Init(&argc, &argv); + MPI_Comm_rank(MPI_COMM_WORLD, &rank); + + OMPI_Affinity_str(OMPI_AFFINITY_RSRC_STRING_FMT, + ompi_bound, current_binding, exists); + printf("rank %d: \n" + " ompi_bound: %s\n" + " current_binding: %s\n" + " exists: %s\n", + rank, ompi_bound, current_binding, exists); + ... + +Output of mpirun -np 2 -bind-to-core a.out: + +:: + + rank 0: + ompi_bound: socket 0[core 0] + current_binding: socket 0[core 0] + exists: socket 0 has 4 cores + rank 1: + ompi_bound: socket 0[core 1] + current_binding: socket 0[core 1] + exists: socket 0 has 4 cores + +Output of mpirun -np 2 -bind-to-socket a.out: + +:: + + rank 0: + ompi_bound: socket 0[core 0-3] + current_binding: Not bound (or bound to all available processors) + exists: socket 0 has 4 cores + rank 1: + ompi_bound: socket 0[core 0-3] + current_binding: Not bound (or bound to all available processors) + exists: socket 0 has 4 cores + +| +| **Example 2:** Print out processes binding using layout string format. + +:: + + int rank; + char ompi_bound[OMPI_AFFINITY_STRING_MAX]; + char current_binding[OMPI_AFFINITY_STRING_MAX]; + char exists[OMPI_AFFINITY_STRING_MAX]; + + MPI_Init(&argc, &argv); + MPI_Comm_rank(MPI_COMM_WORLD, &rank); + + OMPI_Affinity_str(OMPI_AFFINITY_LAYOUT_FMT, + ompi_bound, current_binding, exists); + printf("rank %d: \n" + " ompi_bound: %s\n" + " current_binding: %s\n" + " exists: %s\n", + rank, ompi_bound, current_binding, exists); + ... + +Output of mpirun -np 2 -bind-to-core a.out: + +:: + + rank 0: + ompi_bound: [B . . .] + current_binding: [B . . .] + exists: [. . . .] + rank 1: + ompi_bound: [. B . .] + current_binding: [. B . .] + exists: [. . . .] + +Output of mpirun -np 2 -bind-to-socket a.out: + +:: + + rank 0: + ompi_bound: [B B B B] + current_binding: [B B B B] + exists: [. . . .] + rank 1: + ompi_bound: [B B B B] + current_binding: [B B B B] + exists: [. . . .] + + +.. seealso:: mpirun(1) diff --git a/docs/man-openmpi/man3/index.rst b/docs/man-openmpi/man3/index.rst new file mode 100644 index 00000000000..bbcb2b70bd9 --- /dev/null +++ b/docs/man-openmpi/man3/index.rst @@ -0,0 +1,469 @@ +MPI API manual pages (section 3) +================================ + +.. toctree:: + :maxdepth: 1 + + MPI_Abort.3.rst + MPI_Accumulate.3.rst + MPI_Add_error_class.3.rst + MPI_Add_error_code.3.rst + MPI_Add_error_string.3.rst + MPI_Address.3.rst + MPI_Aint_add.3.rst + MPI_Aint_diff.3.rst + MPI_Allgather.3.rst + MPI_Allgather_init.3.rst + MPI_Allgatherv.3.rst + MPI_Allgatherv_init.3.rst + MPI_Alloc_mem.3.rst + MPI_Allreduce.3.rst + MPI_Allreduce_init.3.rst + MPI_Alltoall.3.rst + MPI_Alltoall_init.3.rst + MPI_Alltoallv.3.rst + MPI_Alltoallv_init.3.rst + MPI_Alltoallw.3.rst + MPI_Alltoallw_init.3.rst + MPI_Attr_delete.3.rst + MPI_Attr_get.3.rst + MPI_Attr_put.3.rst + MPI_Barrier.3.rst + MPI_Barrier_init.3.rst + MPI_Bcast.3.rst + MPI_Bcast_init.3.rst + MPI_Bsend.3.rst + MPI_Bsend_init.3.rst + MPI_Buffer_attach.3.rst + MPI_Buffer_detach.3.rst + MPI_Cancel.3.rst + MPI_Cart_coords.3.rst + MPI_Cart_create.3.rst + MPI_Cart_get.3.rst + MPI_Cart_map.3.rst + MPI_Cart_rank.3.rst + MPI_Cart_shift.3.rst + MPI_Cart_sub.3.rst + MPI_Cartdim_get.3.rst + MPI_Close_port.3.rst + MPI_Comm_accept.3.rst + MPI_Comm_c2f.3.rst + MPI_Comm_call_errhandler.3.rst + MPI_Comm_compare.3.rst + MPI_Comm_connect.3.rst + MPI_Comm_create.3.rst + MPI_Comm_create_errhandler.3.rst + MPI_Comm_create_from_group.3.rst + MPI_Comm_create_group.3.rst + MPI_Comm_create_keyval.3.rst + MPI_Comm_delete_attr.3.rst + MPI_Comm_disconnect.3.rst + MPI_Comm_dup.3.rst + MPI_Comm_dup_with_info.3.rst + MPI_Comm_f2c.3.rst + MPI_Comm_free.3.rst + MPI_Comm_free_keyval.3.rst + MPI_Comm_get_attr.3.rst + MPI_Comm_get_errhandler.3.rst + MPI_Comm_get_info.3.rst + MPI_Comm_get_name.3.rst + MPI_Comm_get_parent.3.rst + MPI_Comm_group.3.rst + MPI_Comm_idup.3.rst + MPI_Comm_idup_with_info.3.rst + MPI_Comm_join.3.rst + MPI_Comm_rank.3.rst + MPI_Comm_remote_group.3.rst + MPI_Comm_remote_size.3.rst + MPI_Comm_set_attr.3.rst + MPI_Comm_set_errhandler.3.rst + MPI_Comm_set_info.3.rst + MPI_Comm_set_name.3.rst + MPI_Comm_size.3.rst + MPI_Comm_spawn.3.rst + MPI_Comm_spawn_multiple.3.rst + MPI_Comm_split.3.rst + MPI_Comm_split_type.3.rst + MPI_Comm_test_inter.3.rst + MPI_Compare_and_swap.3.rst + MPI_Dims_create.3.rst + MPI_Dist_graph_create.3.rst + MPI_Dist_graph_create_adjacent.3.rst + MPI_Dist_graph_neighbors.3.rst + MPI_Dist_graph_neighbors_count.3.rst + MPI_Errhandler_create.3.rst + MPI_Errhandler_free.3.rst + MPI_Errhandler_get.3.rst + MPI_Errhandler_set.3.rst + MPI_Error_class.3.rst + MPI_Error_string.3.rst + MPI_Exscan.3.rst + MPI_Exscan_init.3.rst + MPI_Fetch_and_op.3.rst + MPI_File_c2f.3.rst + MPI_File_call_errhandler.3.rst + MPI_File_close.3.rst + MPI_File_create_errhandler.3.rst + MPI_File_delete.3.rst + MPI_File_f2c.3.rst + MPI_File_get_amode.3.rst + MPI_File_get_atomicity.3.rst + MPI_File_get_byte_offset.3.rst + MPI_File_get_errhandler.3.rst + MPI_File_get_group.3.rst + MPI_File_get_info.3.rst + MPI_File_get_position.3.rst + MPI_File_get_position_shared.3.rst + MPI_File_get_size.3.rst + MPI_File_get_type_extent.3.rst + MPI_File_get_view.3.rst + MPI_File_iread.3.rst + MPI_File_iread_all.3.rst + MPI_File_iread_at.3.rst + MPI_File_iread_at_all.3.rst + MPI_File_iread_shared.3.rst + MPI_File_iwrite.3.rst + MPI_File_iwrite_all.3.rst + MPI_File_iwrite_at.3.rst + MPI_File_iwrite_at_all.3.rst + MPI_File_iwrite_shared.3.rst + MPI_File_open.3.rst + MPI_File_preallocate.3.rst + MPI_File_read.3.rst + MPI_File_read_all.3.rst + MPI_File_read_all_begin.3.rst + MPI_File_read_all_end.3.rst + MPI_File_read_at.3.rst + MPI_File_read_at_all.3.rst + MPI_File_read_at_all_begin.3.rst + MPI_File_read_at_all_end.3.rst + MPI_File_read_ordered.3.rst + MPI_File_read_ordered_begin.3.rst + MPI_File_read_ordered_end.3.rst + MPI_File_read_shared.3.rst + MPI_File_seek.3.rst + MPI_File_seek_shared.3.rst + MPI_File_set_atomicity.3.rst + MPI_File_set_errhandler.3.rst + MPI_File_set_info.3.rst + MPI_File_set_size.3.rst + MPI_File_set_view.3.rst + MPI_File_sync.3.rst + MPI_File_write.3.rst + MPI_File_write_all.3.rst + MPI_File_write_all_begin.3.rst + MPI_File_write_all_end.3.rst + MPI_File_write_at.3.rst + MPI_File_write_at_all.3.rst + MPI_File_write_at_all_begin.3.rst + MPI_File_write_at_all_end.3.rst + MPI_File_write_ordered.3.rst + MPI_File_write_ordered_begin.3.rst + MPI_File_write_ordered_end.3.rst + MPI_File_write_shared.3.rst + MPI_Finalize.3.rst + MPI_Finalized.3.rst + MPI_Free_mem.3.rst + MPI_Gather.3.rst + MPI_Gather_init.3.rst + MPI_Gatherv.3.rst + MPI_Gatherv_init.3.rst + MPI_Get.3.rst + MPI_Get_accumulate.3.rst + MPI_Get_address.3.rst + MPI_Get_count.3.rst + MPI_Get_elements.3.rst + MPI_Get_elements_x.3.rst + MPI_Get_library_version.3.rst + MPI_Get_processor_name.3.rst + MPI_Get_version.3.rst + MPI_Graph_create.3.rst + MPI_Graph_get.3.rst + MPI_Graph_map.3.rst + MPI_Graph_neighbors.3.rst + MPI_Graph_neighbors_count.3.rst + MPI_Graphdims_get.3.rst + MPI_Grequest_complete.3.rst + MPI_Grequest_start.3.rst + MPI_Group_c2f.3.rst + MPI_Group_compare.3.rst + MPI_Group_difference.3.rst + MPI_Group_excl.3.rst + MPI_Group_f2c.3.rst + MPI_Group_free.3.rst + MPI_Group_from_session_pset.3.rst + MPI_Group_incl.3.rst + MPI_Group_intersection.3.rst + MPI_Group_range_excl.3.rst + MPI_Group_range_incl.3.rst + MPI_Group_rank.3.rst + MPI_Group_size.3.rst + MPI_Group_translate_ranks.3.rst + MPI_Group_union.3.rst + MPI_Iallgather.3.rst + MPI_Iallgatherv.3.rst + MPI_Iallreduce.3.rst + MPI_Ialltoall.3.rst + MPI_Ialltoallv.3.rst + MPI_Ialltoallw.3.rst + MPI_Ibarrier.3.rst + MPI_Ibcast.3.rst + MPI_Ibsend.3.rst + MPI_Iexscan.3.rst + MPI_Igather.3.rst + MPI_Igatherv.3.rst + MPI_Improbe.3.rst + MPI_Imrecv.3.rst + MPI_Ineighbor_allgather.3.rst + MPI_Ineighbor_allgatherv.3.rst + MPI_Ineighbor_alltoall.3.rst + MPI_Ineighbor_alltoallv.3.rst + MPI_Ineighbor_alltoallw.3.rst + MPI_Info_c2f.3.rst + MPI_Info_create.3.rst + MPI_Info_delete.3.rst + MPI_Info_dup.3.rst + MPI_Info_env.3.rst + MPI_Info_f2c.3.rst + MPI_Info_free.3.rst + MPI_Info_get.3.rst + MPI_Info_get_nkeys.3.rst + MPI_Info_get_nthkey.3.rst + MPI_Info_get_string.3.rst + MPI_Info_get_valuelen.3.rst + MPI_Info_set.3.rst + MPI_Init.3.rst + MPI_Init_thread.3.rst + MPI_Initialized.3.rst + MPI_Intercomm_create.3.rst + MPI_Intercomm_create_from_groups.3.rst + MPI_Intercomm_merge.3.rst + MPI_Iprobe.3.rst + MPI_Irecv.3.rst + MPI_Ireduce.3.rst + MPI_Ireduce_scatter.3.rst + MPI_Ireduce_scatter_block.3.rst + MPI_Irsend.3.rst + MPI_Is_thread_main.3.rst + MPI_Iscan.3.rst + MPI_Iscatter.3.rst + MPI_Iscatterv.3.rst + MPI_Isend.3.rst + MPI_Isendrecv.3.rst + MPI_Isendrecv_replace.3.rst + MPI_Issend.3.rst + MPI_Keyval_create.3.rst + MPI_Keyval_free.3.rst + MPI_Lookup_name.3.rst + MPI_Message_c2f.3.rst + MPI_Message_f2c.3.rst + MPI_Mprobe.3.rst + MPI_Mrecv.3.rst + MPI_Neighbor_allgather.3.rst + MPI_Neighbor_allgather_init.3.rst + MPI_Neighbor_allgatherv.3.rst + MPI_Neighbor_allgatherv_init.3.rst + MPI_Neighbor_alltoall.3.rst + MPI_Neighbor_alltoall_init.3.rst + MPI_Neighbor_alltoallv.3.rst + MPI_Neighbor_alltoallv_init.3.rst + MPI_Neighbor_alltoallw.3.rst + MPI_Neighbor_alltoallw_init.3.rst + MPI_Op_c2f.3.rst + MPI_Op_commutative.3.rst + MPI_Op_create.3.rst + MPI_Op_f2c.3.rst + MPI_Op_free.3.rst + MPI_Open_port.3.rst + MPI_Pack.3.rst + MPI_Pack_external.3.rst + MPI_Pack_external_size.3.rst + MPI_Pack_size.3.rst + MPI_Parrived.3.rst + MPI_Pcontrol.3.rst + MPI_Pready.3.rst + MPI_Pready_list.3.rst + MPI_Pready_range.3.rst + MPI_Precv_init.3.rst + MPI_Probe.3.rst + MPI_Psend_init.3.rst + MPI_Publish_name.3.rst + MPI_Put.3.rst + MPI_Query_thread.3.rst + MPI_Raccumulate.3.rst + MPI_Recv.3.rst + MPI_Recv_init.3.rst + MPI_Reduce.3.rst + MPI_Reduce_init.3.rst + MPI_Reduce_local.3.rst + MPI_Reduce_scatter.3.rst + MPI_Reduce_scatter_block.3.rst + MPI_Reduce_scatter_block_init.3.rst + MPI_Reduce_scatter_init.3.rst + MPI_Register_datarep.3.rst + MPI_Request_c2f.3.rst + MPI_Request_f2c.3.rst + MPI_Request_free.3.rst + MPI_Request_get_status.3.rst + MPI_Rget.3.rst + MPI_Rget_accumulate.3.rst + MPI_Rput.3.rst + MPI_Rsend.3.rst + MPI_Rsend_init.3.rst + MPI_Scan.3.rst + MPI_Scan_init.3.rst + MPI_Scatter.3.rst + MPI_Scatter_init.3.rst + MPI_Scatterv.3.rst + MPI_Scatterv_init.3.rst + MPI_Send.3.rst + MPI_Send_init.3.rst + MPI_Sendrecv.3.rst + MPI_Sendrecv_replace.3.rst + MPI_Session_create_errhandler.3.rst + MPI_Session_f2c.3.rst + MPI_Session_finalize.3.rst + MPI_Session_get_info.3.rst + MPI_Session_get_nth_pset.3.rst + MPI_Session_get_num_psets.3.rst + MPI_Session_get_pset_info.3.rst + MPI_Session_init.3.rst + MPI_Sizeof.3.rst + MPI_Ssend.3.rst + MPI_Ssend_init.3.rst + MPI_Start.3.rst + MPI_Startall.3.rst + MPI_Status_c2f.3.rst + MPI_Status_c2f08.3.rst + MPI_Status_f082c.3.rst + MPI_Status_f082f.3.rst + MPI_Status_f2c.3.rst + MPI_Status_f2f08.3.rst + MPI_Status_set_cancelled.3.rst + MPI_Status_set_elements.3.rst + MPI_Status_set_elements_x.3.rst + MPI_T_category_changed.3.rst + MPI_T_category_get_categories.3.rst + MPI_T_category_get_cvars.3.rst + MPI_T_category_get_info.3.rst + MPI_T_category_get_num.3.rst + MPI_T_category_get_pvars.3.rst + MPI_T_cvar_get_info.3.rst + MPI_T_cvar_get_num.3.rst + MPI_T_cvar_handle_alloc.3.rst + MPI_T_cvar_handle_free.3.rst + MPI_T_cvar_read.3.rst + MPI_T_cvar_write.3.rst + MPI_T_enum_get_info.3.rst + MPI_T_enum_get_item.3.rst + MPI_T_finalize.3.rst + MPI_T_init_thread.3.rst + MPI_T_pvar_get_info.3.rst + MPI_T_pvar_get_num.3.rst + MPI_T_pvar_handle_alloc.3.rst + MPI_T_pvar_handle_free.3.rst + MPI_T_pvar_read.3.rst + MPI_T_pvar_readreset.3.rst + MPI_T_pvar_reset.3.rst + MPI_T_pvar_session_create.3.rst + MPI_T_pvar_session_free.3.rst + MPI_T_pvar_start.3.rst + MPI_T_pvar_stop.3.rst + MPI_T_pvar_write.3.rst + MPI_Test.3.rst + MPI_Test_cancelled.3.rst + MPI_Testall.3.rst + MPI_Testany.3.rst + MPI_Testsome.3.rst + MPI_Topo_test.3.rst + MPI_Type_c2f.3.rst + MPI_Type_commit.3.rst + MPI_Type_contiguous.3.rst + MPI_Type_create_darray.3.rst + MPI_Type_create_f90_complex.3.rst + MPI_Type_create_f90_integer.3.rst + MPI_Type_create_f90_real.3.rst + MPI_Type_create_hindexed.3.rst + MPI_Type_create_hindexed_block.3.rst + MPI_Type_create_hvector.3.rst + MPI_Type_create_indexed_block.3.rst + MPI_Type_create_keyval.3.rst + MPI_Type_create_resized.3.rst + MPI_Type_create_struct.3.rst + MPI_Type_create_subarray.3.rst + MPI_Type_delete_attr.3.rst + MPI_Type_dup.3.rst + MPI_Type_extent.3.rst + MPI_Type_f2c.3.rst + MPI_Type_free.3.rst + MPI_Type_free_keyval.3.rst + MPI_Type_get_attr.3.rst + MPI_Type_get_contents.3.rst + MPI_Type_get_envelope.3.rst + MPI_Type_get_extent.3.rst + MPI_Type_get_extent_x.3.rst + MPI_Type_get_name.3.rst + MPI_Type_get_true_extent.3.rst + MPI_Type_get_true_extent_x.3.rst + MPI_Type_hindexed.3.rst + MPI_Type_hvector.3.rst + MPI_Type_indexed.3.rst + MPI_Type_lb.3.rst + MPI_Type_match_size.3.rst + MPI_Type_set_attr.3.rst + MPI_Type_set_name.3.rst + MPI_Type_size.3.rst + MPI_Type_size_x.3.rst + MPI_Type_struct.3.rst + MPI_Type_ub.3.rst + MPI_Type_vector.3.rst + MPI_Unpack.3.rst + MPI_Unpack_external.3.rst + MPI_Unpublish_name.3.rst + MPI_Wait.3.rst + MPI_Waitall.3.rst + MPI_Waitany.3.rst + MPI_Waitsome.3.rst + MPI_Win_allocate.3.rst + MPI_Win_allocate_shared.3.rst + MPI_Win_attach.3.rst + MPI_Win_c2f.3.rst + MPI_Win_call_errhandler.3.rst + MPI_Win_complete.3.rst + MPI_Win_create.3.rst + MPI_Win_create_dynamic.3.rst + MPI_Win_create_errhandler.3.rst + MPI_Win_create_keyval.3.rst + MPI_Win_delete_attr.3.rst + MPI_Win_detach.3.rst + MPI_Win_f2c.3.rst + MPI_Win_fence.3.rst + MPI_Win_flush.3.rst + MPI_Win_flush_all.3.rst + MPI_Win_flush_local.3.rst + MPI_Win_flush_local_all.3.rst + MPI_Win_free.3.rst + MPI_Win_free_keyval.3.rst + MPI_Win_get_attr.3.rst + MPI_Win_get_errhandler.3.rst + MPI_Win_get_group.3.rst + MPI_Win_get_info.3.rst + MPI_Win_get_name.3.rst + MPI_Win_lock.3.rst + MPI_Win_lock_all.3.rst + MPI_Win_post.3.rst + MPI_Win_set_attr.3.rst + MPI_Win_set_errhandler.3.rst + MPI_Win_set_info.3.rst + MPI_Win_set_name.3.rst + MPI_Win_shared_query.3.rst + MPI_Win_start.3.rst + MPI_Win_sync.3.rst + MPI_Win_test.3.rst + MPI_Win_unlock.3.rst + MPI_Win_unlock_all.3.rst + MPI_Win_wait.3.rst + MPI_Wtick.3.rst + MPI_Wtime.3.rst + MPIX_Query_cuda_support.3.rst + OMPI_Affinity_str.3.rst diff --git a/docs/man-openmpi/man5/MPI_T.5.rst b/docs/man-openmpi/man5/MPI_T.5.rst new file mode 100644 index 00000000000..0b2074e58b6 --- /dev/null +++ b/docs/man-openmpi/man5/MPI_T.5.rst @@ -0,0 +1,125 @@ +.. _mpi_t: + +MPI_T +===== + +.. include_body + +Open MPI's MPI_T interface - General information + +DESCRIPTION +----------- + +There are a few Open MPI-specific notes worth mentioning about its MPI_T +interface implementation. + +MPI_T Control Variables +^^^^^^^^^^^^^^^^^^^^^^^ + +Open MPI's implementation of the MPI_T Control Variable ("cvar") APIs is +an interface to Open MPI's underlying Modular Component Architecture +(MCA) parameters/variables. Simply put: using the MPI_T cvar interface +is another mechanism to get/set Open MPI MCA parameters. + +In order of precedence (highest to lowest), Open MPI provides the +following mechanisms to set MCA parameters: + +1. The MPI_T interface has the highest precedence. Specifically: values + set via the MPI_T interface will override all other settings. +2. The mpirun(1) / mpiexec(1) command line (e.g., via the --mca + parameter). +3. Environment variables. +4. Parameter files have the lowest precedence. Specifically: values set + via parameter files can be overridden by any of the other + MCA-variable setting mechanisms. + +MPI initialization +^^^^^^^^^^^^^^^^^^ + +An application may use the MPI_T interface before MPI is initialized to +set MCA parameters. Setting MPI-level MCA parameters before MPI is +initialized may affect *how* MPI is initialized (e.g., by influencing +which frameworks and components are selected). + +The following example sets the pml and btl MCA params before invoking +:ref:`MPI_Init` in order to force a specific selection of PML and BTL +components: + +.. code:: c + + int provided, index, count; + MPI_T_cvar_handle pml_handle, btl_handle; + char pml_value[64], btl_value[64]; + + MPI_T_init_thread(MPI_THREAD_SINGLE, &provided); + + MPI_T_cvar_get_index("pml", &index); + MPI_T_cvar_handle_alloc(index, NULL, &pml_handle, &count); + MPI_T_cvar_write(pml_handle, "ob1"); + + MPI_T_cvar_get_index("btl", &index); + MPI_T_cvar_handle_alloc(index, NULL, &btl_handle, &count); + MPI_T_cvar_write(btl_handle, "tcp,vader,self"); + + MPI_T_cvar_read(pml_handle, pml_value); + MPI_T_cvar_read(btl_handle, btl_value); + printf("Set value of cvars: PML: %s, BTL: %s\n", + pml_value, btl_value); + + MPI_T_cvar_handle_free(&pml_handle); + MPI_T_cvar_handle_free(&btl_handle); + + MPI_Init(NULL, NULL); + + // ... + + MPI_Finalize(); + + MPI_T_finalize(); + +Note that once MPI is initialized, most Open MPI cvars become read-only. + +For example, after MPI is initialized, it is no longer possible to set +the PML and BTL selection mechanisms. This is because many of these MCA +parameters are only used during MPI initialization; setting them after +MPI has already been initialized would be meaningless, anyway. + +MPI_T Categories +^^^^^^^^^^^^^^^^ + +Open MPI's MPI_T categories are organized hierarchically: + +1. Layer (or "project"). There are two layers in Open MPI: + + - ompi: This layer contains cvars, pvars, and sub categories related + to MPI characteristics. + - opal: This layer generally contains cvars, pvars, and sub + categories of lower-layer constructions, such as operating system + issues, networking issues, etc. + +2. Framework or section. + + - In most cases, the next level in the hierarchy is the Open MPI MCA + framework. + + - For example, you can find the btl framework under the opal + layer (because it has to do with the underlying networking). + - Additionally, the pml framework is under the ompi layer + (because it has to do with MPI semantics of point-to-point + messaging). + + - There are a few non-MCA-framework entities under the layer, + however. + + - For example, there is an mpi section under both the opal and + ompi layers for general/core MPI constructs. + +3. Component. + + - If relevant, the third level in the hierarchy is the MCA + component. + - For example, the tcp component can be found under the opal + framework in the opal layer. + + +.. seealso:: :ref:`MPI_T_init_thread(3) ` diff --git a/docs/man-openmpi/man5/Open-MPI.5.rst b/docs/man-openmpi/man5/Open-MPI.5.rst new file mode 100644 index 00000000000..416ad4fe97e --- /dev/null +++ b/docs/man-openmpi/man5/Open-MPI.5.rst @@ -0,0 +1,398 @@ +.. _open-mpi: +.. _mpi: + +Open-MPI +======== + +.. include_body + +Open MPI - General information + +OPEN MPI +-------- + +Open MPI is an open source implementation of MPI (message-passing +interface), the industry-standard specification for writing +message-passing programs. Message passing is a programming model that +gives the programmer explicit control over interprocess communication. + +The MPI specification was developed by the MPI Forum, a group of +software developers, computer vendors, academics, and computer-science +researchers whose goal was to develop a standard for writing +message-passing programs that would be efficient, flexible, and +portable. + +The outcome, known as the MPI Standard, was first published in 1993; its +most recent version (MPI-3.1) was published in June 2015. Open MPI +includes all MPI 3.1-compliant routines. + +For more information about Open MPI, see https://www.open-mpi.org. + +The MPI standards are available at https://www.mpi-forum.org. + +MAN PAGE SYNTAX +--------------- + +Man pages for Open MPI and Open MPI I/O routines are named according to +C syntax, that is, they begin with the prefix ``MPI_``, all in +uppercase, and the first letter following the ``MPI_`` prefix is also +uppercase. The rest of the letters in the routine are all lowercase, for +example, :ref:`MPI_Comm_get_attr`. + +ENVIRONMENT +----------- + +To fine-tune your Open MPI environment, you can either use arguments +to the :ref:`mpirun ` or :ref:`mpiexec ` +commands, or you can use MCA parameters. + +For more information on arguments, see the :ref:`mpirun(1) +` man page. + +For a complete listing of MCA parameters and their descriptions, issue +the command ``ompi_info --all``. See :ref:`ompi_info(1) +` for more information. + +ERRORS +------ + +All MPI routines (except :ref:`MPI_Wtime` and :ref:`MPI_Wtick`) return an +error value; C routines as the value of the function and Fortran +routines in the last argument. Before the value is returned, the current +MPI error handler is called. By default, this error handler aborts the +MPI job. The error handler may be changed with +:ref:`MPI_Comm_set_errhandler`; the predefined error handler +MPI_ERRORS_RETURN may be used to cause error values to be returned. +Note that MPI does not guarantee that an MPI program can continue past +an error. + +For more information on Open MPI error codes, see ``mpi.h`` in the +``include`` directory. + +Standard error return classes for Open MPI: + +.. list-table:: + :header-rows: 1 + + * - Error name + - Error value + - Description + + * - MPI_SUCCESS + - 0 + - Successful return code. + + * - MPI_ERR_BUFFER + - 1 + - Invalid buffer pointer. + + * - MPI_ERR_COUNT + - 2 + - Invalid count argument. + + * - MPI_ERR_TYPE + - 3 + - Invalid datatype argument. + + * - MPI_ERR_TAG + - 4 + - Invalid tag argument. + + * - MPI_ERR_COMM + - 5 + - Invalid communicator. + + * - MPI_ERR_RANK + - 6 + - Invalid rank. + + * - MPI_ERR_REQUEST + - 7 + - Invalid MPI_Request handle. + + * - MPI_ERR_ROOT + - 8 + - Invalid root. + + * - MPI_ERR_GROUP + - 9 + - Null group passed to function. + + * - MPI_ERR_OP + - 10 + - Invalid operation. + + * - MPI_ERR_TOPOLOGY + - 11 + - Invalid topology. + + * - MPI_ERR_DIMS + - 12 + - Illegal dimension argument. + + * - MPI_ERR_ARG + - 13 + - Invalid argument. + + * - MPI_ERR_UNKNOWN + - 14 + - Unknown error. + + * - MPI_ERR_TRUNCATE + - 15 + - Message truncated on receive. + + * - MPI_ERR_OTHER + - 16 + - Other error; use Error_string. + + * - MPI_ERR_INTERN + - 17 + - Internal error code. + + * - MPI_ERR_IN_STATUS + - 18 + - Look in status for error value. + + * - MPI_ERR_PENDING + - 19 + - Pending request. + + * - MPI_ERR_ACCESS + - 20 + - Permission denied. + + * - MPI_ERR_AMODE + - 21 + - Unsupported amode passed to open. + + * - MPI_ERR_ASSERT + - 22 + - Invalid assert. + + * - MPI_ERR_BAD_FILE + - 23 + - Invalid file name (for example, path name too long). + + * - MPI_ERR_BASE + - 24 + - Invalid base. + + * - MPI_ERR_CONVERSION + - 25 + - An error occurred in a user-supplied data-conversion function. + + * - MPI_ERR_DISP + - 26 + - Invalid displacement. + + * - MPI_ERR_DUP_DATAREP + - 27 + - Conversion functions could not be registered because a data + representation identifier that was already defined was passed + to :ref:`MPI_REGISTER_DATAREP(3) `. + + * - MPI_ERR_FILE_EXISTS + - 28 + - File exists. + + + * - MPI_ERR_FILE_IN_USE + - 29 + - File operation could not be completed, as the file is currently + open by some process. + + * - MPI_ERR_FILE + - 30 + - Invalid file handle. + + * - MPI_ERR_INFO_KEY + - 31 + - Illegal info key. + + * - MPI_ERR_INFO_NOKEY + - 32 + - No such key. + + * - MPI_ERR_INFO_VALUE + - 33 + - Illegal info value. + + * - MPI_ERR_INFO + - 34 + - Invalid info object. + + * - MPI_ERR_IO + - 35 + - I/O error. + + * - MPI_ERR_KEYVAL + - 36 + - Illegal key value. + + * - MPI_ERR_LOCKTYPE + - 37 + - Invalid locktype. + + * - MPI_ERR_NAME + - 38 + - Name not found. + + * - MPI_ERR_NO_MEM + - 39 + - Memory exhausted. + + * - MPI_ERR_NOT_SAME + - 40 + - Collective argument not identical on all processes, or + collective routines called in a different order by different + processes. + + * - MPI_ERR_NO_SPACE + - 41 + - Not enough space. + + * - MPI_ERR_NO_SUCH_FILE + - 42 + - File (or directory) does not exist. + + * - MPI_ERR_PORT + - 43 + - Invalid port. + + * - MPI_ERR_PROC_ABORTED + - 74 + - Operation failed because a remote peer has aborted. + + * - MPI_ERR_QUOTA + - 44 + - Quota exceeded. + + * - MPI_ERR_READ_ONLY + - 45 + - Read-only file system. + + * - MPI_ERR_RMA_CONFLICT + - 46 + - Conflicting accesses to window. + + * - MPI_ERR_RMA_SYNC + - 47 + - Erroneous RMA synchronization. + + * - MPI_ERR_SERVICE + - 48 + - Invalid publish/unpublish. + + * - MPI_ERR_SIZE + - 49 + - Invalid size. + + * - MPI_ERR_SPAWN + - 50 + - Error spawning. + + * - MPI_ERR_UNSUPPORTED_DATAREP + - 51 + - Unsupported datarep passed to :ref:`MPI_FILE_SET_VIEW(3) + `. + + * - MPI_ERR_UNSUPPORTED_OPERATION + - 52 + - Unsupported operation, such as seeking on a file that supports + only sequential access. + + * - MPI_ERR_WIN + - 53 + - Invalid window. + + * - MPI_T_ERR_MEMORY + - 54 + - Out of memory. + + * - MPI_T_ERR_NOT_INITIALIZED + - 55 + - Interface not initialized. + + * - MPI_T_ERR_CANNOT_INIT + - 56 + - Interface not in the state to be initialized. + + * - MPI_T_ERR_INVALID_INDEX + - 57 + - The enumeration index is invalid. + + * - MPI_T_ERR_INVALID_ITEM + - 8 + - The item index queried is out of range. + + * - MPI_T_ERR_INVALID_HANDLE + - 59 + - The handle is invalid. + + * - MPI_T_ERR_OUT_OF_HANDLES + - 60 + - No more handles available. + + * - MPI_T_ERR_OUT_OF_SESSIONS + - 61 + - No more sessions available. + + * - MPI_T_ERR_INVALID_SESSION + - 62 + - Session argument is not a valid session. + + * - MPI_T_ERR_CVAR_SET_NOT_NOW + - 63 + - Variable cannot be set at this moment. + + * - MPI_T_ERR_CVAR_SET_NEVER + - 64 + - Variable cannot be set until end of execution. + + * - MPI_T_ERR_PVAR_NO_STARTSTOP + - 65 + - Variable cannot be started or stopped. + + * - MPI_T_ERR_PVAR_NO_WRITE + - 6 + - Variable cannot be written or reset. + + * - MPI_T_ERR_PVAR_NO_ATOMIC + - 67 + - Variable cannot be read and written atomically. + + * - MPI_ERR_RMA_RANGE + - 68 + - Target memory is not part of the window (in the case of a + window created with :ref:`MPI_WIN_CREATE_DYNAMIC(4) + `, target memory is not attached. + + * - MPI_ERR_RMA_ATTACH + - 69 + - Memory cannot be attached (e.g., because of resource + exhaustion). + + * - MPI_ERR_RMA_FLAVOR + - 70 + - Passed window has the wrong flavor for the called function. + + * - MPI_ERR_RMA_SHARED + - 71 + - Memory cannot be shared (e.g., some process in the group of the + specified communicator cannot expose shared memory). + + * - MPI_T_ERR_INVALID + - 72 + - Invalid use of the interface or bad parameter values(s). + + * - MPI_T_ERR_INVALID_NAME + - 73 + - The variable or category name is invalid. + + * - MPI_ERR_LASTCODE + - 93 + - Last error code. + +.. seealso:: :ref:`MPI_T` :ref:`mpirun(1) ` :ref:`mpiexec(1) + ` :ref:`ompi_info(1) ` diff --git a/docs/man-openmpi/man5/index.rst b/docs/man-openmpi/man5/index.rst new file mode 100644 index 00000000000..6733cbf9a11 --- /dev/null +++ b/docs/man-openmpi/man5/index.rst @@ -0,0 +1,9 @@ +Config file manual pages (section 5) +==================================== + +.. toctree:: + :maxdepth: 1 + + MPI_T.5.rst + Open-MPI.5.rst + diff --git a/docs/man-openshmem/index.rst b/docs/man-openshmem/index.rst new file mode 100644 index 00000000000..eb118aa0ca4 --- /dev/null +++ b/docs/man-openshmem/index.rst @@ -0,0 +1,8 @@ +OpenSHMEM manual pages +====================== + +.. toctree:: + :maxdepth: 1 + + man1/index + man3/index diff --git a/docs/man-openshmem/man1/index.rst b/docs/man-openshmem/man1/index.rst new file mode 100644 index 00000000000..37c171f8d55 --- /dev/null +++ b/docs/man-openshmem/man1/index.rst @@ -0,0 +1,8 @@ +Commands (section 1) +==================== + +.. toctree:: + :maxdepth: 1 + + oshmem-wrapper-compiler.1.rst + oshmem_info.1.rst diff --git a/docs/man-openshmem/man1/oshc++.1 b/docs/man-openshmem/man1/oshc++.1 new file mode 100644 index 00000000000..1dbbdca80df --- /dev/null +++ b/docs/man-openshmem/man1/oshc++.1 @@ -0,0 +1 @@ +.so man1/oshmem-wrapper-compiler.1 diff --git a/docs/man-openshmem/man1/oshcc.1 b/docs/man-openshmem/man1/oshcc.1 new file mode 100644 index 00000000000..1dbbdca80df --- /dev/null +++ b/docs/man-openshmem/man1/oshcc.1 @@ -0,0 +1 @@ +.so man1/oshmem-wrapper-compiler.1 diff --git a/docs/man-openshmem/man1/oshcxx.1 b/docs/man-openshmem/man1/oshcxx.1 new file mode 100644 index 00000000000..1dbbdca80df --- /dev/null +++ b/docs/man-openshmem/man1/oshcxx.1 @@ -0,0 +1 @@ +.so man1/oshmem-wrapper-compiler.1 diff --git a/docs/man-openshmem/man1/oshfort.1 b/docs/man-openshmem/man1/oshfort.1 new file mode 100644 index 00000000000..1dbbdca80df --- /dev/null +++ b/docs/man-openshmem/man1/oshfort.1 @@ -0,0 +1 @@ +.so man1/oshmem-wrapper-compiler.1 diff --git a/docs/man-openshmem/man1/oshmem-wrapper-compiler.1.rst b/docs/man-openshmem/man1/oshmem-wrapper-compiler.1.rst new file mode 100644 index 00000000000..66285c4e387 --- /dev/null +++ b/docs/man-openshmem/man1/oshmem-wrapper-compiler.1.rst @@ -0,0 +1,244 @@ +.. _man1-oshcc: +.. _man1-oshcxx: +.. _man1-oshc++: +.. _man1-oshfort: +.. _man1-shmemcc: +.. _man1-shmemcxx: +.. _man1-shmemc++: +.. _man1-shmemfort: + + +OpenSHMEM Wrapper Compilers +=========================== + +.. include_body + +oshcc, oshcxx, oshc++, oshfort, shmemcc, shmemcxx, shmemc++, shmemfort -- OpenSHMEM wrapper compilers + + +SYNTAX +------ + +``oshcc [-showme|-showme:compile|-showme:link] ...`` + +``oshcxx [-showme|-showme:compile|-showme:link] ...`` + +``oshc++ [-showme|-showme:compile|-showme:link] ...`` + +``oshfort [-showme|-showme:compile|-showme:link] ...`` + +``shmemcc [-showme|-showme:compile|-showme:link] ...`` + +``shmemcxx [-showme|-showme:compile|-showme:link] ...`` + +``shmemc++ [-showme|-showme:compile|-showme:link] ...`` + +``shmemfort [-showme|-showme:compile|-showme:link] ...`` + + +OPTIONS +------- + +The options below apply to all of the wrapper compilers: + +* ``--showme``: This option comes in several different variants (see + below). None of the variants invokes the underlying compiler; they + all provide information on how the underlying compiler would have + been invoked had ``--showme`` not been used. The basic ``--showme`` + option outputs the command line that would be executed to compile + the program. + + .. note:: If a non-filename argument is passed on the command line, + the *-showme* option will *not* display any additional + flags. For example, both ``"oshcc --showme`` and + ``oshcc --showme my_source.c`` will show all the + wrapper-supplied flags. But ``oshcc + --showme -v`` will only show the underlying compiler name + and ``-v``. + +* ``--showme:compile``: Output the compiler flags that would have been + supplied to the underlying compiler. + +* ``--showme:link``: Output the linker flags that would have been + supplied to the underlying compiler. + +* ``--showme:command``: Outputs the underlying compiler + command (which may be one or more tokens). + +* ``--showme:incdirs``: Outputs a space-delimited (but otherwise + undecorated) list of directories that the wrapper compiler would + have provided to the underlying compiler to indicate + where relevant header files are located. + +* ``--showme:libdirs``: Outputs a space-delimited (but otherwise + undecorated) list of directories that the wrapper compiler would + have provided to the underlying linker to indicate where relevant + libraries are located. + +* ``--showme:libs`` Outputs a space-delimited (but otherwise + undecorated) list of library names that the wrapper compiler would + have used to link an application. For example: ``mpi open-pal + util``. + +* ``--showme:version``: Outputs the version number of Open MPI. + +* ``--showme:help``: Output a brief usage help message. + +See the man page for your underlying compiler for other +options that can be passed through oshcc. + + +DESCRIPTION +----------- + +Conceptually, the role of these commands is quite simple: transparently +add relevant compiler and linker flags to the user's command line that +are necessary to compile / link OpenSHMEM programs, and then invoke the +underlying compiler to actually perform the command. + +As such, these commands are frequently referred to as "wrapper" +compilers because they do not actually compile or link applications +themselves; they only add in command line flags and invoke the back-end +compiler. + +Background +---------- + +Open MPI provides wrapper compilers for several languages: + +* ``oshcc``, ``shmemcc``: C + +* ``oshc++``, ``oshcxx``, ``shmemc++``, shmemcxx`:: C++ + +* ``oshfort``, ``shmemfort``: Fortran + +The wrapper compilers for each of the languages are identical; they +can be use interchangably. The different names are provided solely +for backwards compatibility. + + +Fortran Notes +------------- + +The Fortran wrapper compiler for OpenSHMEM (``oshfort`` and +``shmemfort``) can compile and link OpenSHMEM applications that use +any/all of the OpenSHMEM Fortran bindings. + +Note, however, that the Fortran compiler may require additional +command-line options to enforce a specific Fortran dialect. For +example, in some versions of the IBM XLF compiler, if ``xlf90`` is the +underlying Fortran compiler, ``-qfixed`` may be necessary to compile +fixed-format Fortran source files. + +Finally, note that ``oshfort`` will be inoperative and will return an +error on use if Fortran support was not built into the OpenSHMEM +layer. + +Overview +-------- + +``oshcc`` and ``shmemcc`` are convenience wrappers for the underlying +C compiler. Translation of an OpenSHMEM program requires the linkage +of the OpenSHMEM-specific libraries which may not reside in one of the +standard search directories of ``ld(1)``. It also often requires the +inclusion of header files what may also not be found in a standard +location. + +``oshcc`` and ``shmemcc`` pass their arguments to the underlying C +compiler along with the ``-I``, ``-L`` and ``-l`` options required by +OpenSHMEM programs. + +The same is true for all the other language wrapper compilers. + +The OpenSHMEM Team *strongly* encourages using the wrapper compilers +instead of attempting to link to the OpenSHMEM libraries manually. This +allows the specific implementation of OpenSHMEM to change without +forcing changes to linker directives in users' Makefiles. Indeed, the +specific set of flags and libraries used by the wrapper compilers +depends on how OpenSHMEM was configured and built; the values can change +between different installations of the same version of OpenSHMEM. + +Indeed, since the wrappers are simply thin shells on top of an +underlying compiler, there are very, very few compelling reasons *not* +to use ``oshcc``. When it is not possible to use the wrappers +directly, the ``-showme:compile`` and ``-showme:link`` options should be +used to determine what flags the wrappers would have used. For example: + +.. code:: sh + + shell$ cc -c file1.c `shmemcc -showme:compile` + + shell$ cc -c file2.c `shmemcc -showme:compile` + + shell$ cc file1.o file2.o `shmemcc -showme:link` -o my_oshmem_program + + +NOTES +----- + +It is possible to make the wrapper compilers multi-lib aware. That is, +the libraries and includes specified may differ based on the compiler +flags specified (for example, with the GNU compilers on Linux, a +different library path may be used if ``-m32`` is seen versus ``-m64`` +being seen). This is not the default behavior in a standard build, but +can be activated (for example, in a binary package providing both 32 +and 64 bit support). `More information can be found here +`_. + + +.. _man1-oshmem-wrapper-compiler-files: + +FILES +----- + +The strings that the wrapper compilers insert into the command line +before invoking the underlying compiler are stored in a text file +created by OpenSHMEM and installed to +``$pkgdata/NAME-wrapper-data.txt``, where: + +* ``$pkgdata`` is typically ``$prefix/share/openmpi`` +* ``$prefix`` is the top installation directory of OpenSHMEM +* ``NAME`` is the name of the wrapper compiler (e.g., + ``$pkgdata/shmemcc-wrapper-data.txt``) + +It is rarely necessary to edit these files, but they can be examined to +gain insight into what flags the wrappers are placing on the command +line. + + +ENVIRONMENT VARIABLES +--------------------- + +By default, the wrappers use the compilers that were selected when +OpenSHMEM was configured. These compilers were either found +automatically by Open MPI's "configure" script, or were selected by +the user in the ``CC``, ``CXX``, and/or ``FC`` environment variables +before ``configure`` was invoked. Additionally, other arguments specific +to the compiler may have been selected by configure. + +These values can be selectively overridden by either editing the text +files containing this configuration information (see the :ref:`FILES +` section), or by setting selected +environment variables of the form ``oshmem_value``. + +Valid value names are: + +* ``CPPFLAGS``: Flags added when invoking the preprocessor (C or C++) + +* ``LDFLAGS``: Flags added when invoking the linker (C, C++, or + Fortran) + +* ``LIBS``: Libraries added when invoking the linker (C, C++, or + Fortran) + +* ``CC``: C compiler + +* ``CFLAGS``: C compiler flags + +* ``CXX``: C++ compiler + +* ``CXXFLAGS``: C++ compiler flags + +* ``FC``: Fortran compiler + +* ``FCFLAGS``: Fortran compiler flags diff --git a/docs/man-openshmem/man1/oshmem_info.1.rst b/docs/man-openshmem/man1/oshmem_info.1.rst new file mode 100644 index 00000000000..b4f19fb9034 --- /dev/null +++ b/docs/man-openshmem/man1/oshmem_info.1.rst @@ -0,0 +1,171 @@ +.. _oshmem_info: + + +oshmem_info +=========== + +.. include_body + +oshmem_info - Display information about the OpenSHMEM installation + + +SYNOPSIS +-------- + +``oshmem_info [options]`` + + +DESCRIPTION +----------- + +``oshmem_info`` provides detailed information about the OpenSHMEM +installation. It can be useful for at least three common scenarios: + +1. Checking local configuration and seeing how OpenSHMEM was + installed. + +2. Submitting bug reports / help requests to the OpenSHMEM + community + (see :doc:`Getting help `). + +3. Seeing a list of installed OpenSHMEM plugins and querying what MCA + parameters they support. + +.. note:: ``oshmem_info`` defaults to only showing a few MCA parameters + by default (i.e., level 1 parameters). Use the ``--level`` + option to enable showing more options (see the :ref:`LEVELS + ` section for more information). + + +OPTIONS +------- + +``oshmem_info`` accepts the following options: + +* ``-a``, ``--all``: Show all configuration options and MCA parameters + +* ``--arch``: Show architecture OpenSHMEM was compiled on + +* ``-c``, ``--config``: Show configuration options + +* ``-gmca``, ``--gmca *``: Pass global MCA parameters + that are applicable to all contexts. + +* ``-h``, ``--help*``: Shows help / usage message + +* ``--hostname``: Show the hostname that OpenSHMEM was configured + and built on + +* ``--internal``: Show internal MCA parameters (not meant to be + modified by users) + +* ``--level``: Show only variables with at most this level (1-9). The + default is 1 unless ``--all`` is specified without ``--level`` in + which case the default is 9. See the :ref:`LEVELS + ` section for more information. + +* ``-mca``, ``--mca ``: Pass context-specific MCA + parameters; they are considered global if + ``--gmca`` is not used and only one context is specified. + +* ``--param ``: Show MCA parameters. The first + parameter is the type of the component to display; the second + parameter is the specific component to display (or the keyword + ``all``, meaning "display all components of this type"). + +* ``--parsable``: When used in conjunction with other parameters, + the output is displayed in a machine-parsable format ``--parseable`` + Synonym for ``--parsable``. + +* ``--path ``: Show paths that OpenSHMEM was configured + with. Accepts the following parameters: ``prefix``, ``bindir``, + ``libdir``, ``incdir``, ``pkglibdir``, ``sysconfdir``. + +* ``--pretty``: When used in conjunction with other parameters, the + output is displayed in prettyprint format (default) + +* ``--selected-only``: Show only variables from selected components. + +* ``-V``, ``--version*``: Show version of OpenSHMEM. + +.. _man1-oshmem_info-levels: + +LEVELS +------ + +#. Basic information of interest to users +#. Detailed information of interest to users +#. All remaining information of interest to users +#. Basic information required for tuning +#. Detailed information required for tuning +#. All remaining information required for tuning +#. Basic information for OpenSHMEM implementors +#. Detailed information for OpenSHMEM implementors +#. All remaining information for OpenSHMEM implementors + + +EXAMPLES +-------- + +.. code:: sh + + shell$ oshmem_info + +Show the default output of options and listing of installed components +in a human-readable / prettyprint format. + +.. code:: sh + + shell$ oshmem_info --parsable + +Show the default output of options and listing of installed components +in a machine-parsable format. + +.. code:: sh + + shell$ oshmem_info --param btl openib + +Show the MCA parameters of the "openib" BTL component in a +human-readable / prettyprint format. + +.. code:: sh + + shell$ oshmem_info --param btl openib --parsable + +Show the MCA parameters of the "openib" BTL component in a +machine-parsable format. + +.. code:: sh + + shell$ oshmem_info --path bindir + +Show the "bindir" that OpenSHMEM was configured with. + +.. code:: sh + + shell$ oshmem_info --version ompi full --parsable + +Show the full version numbers of OpenSHMEM (including the ORTE +and OPAL version numbers) in a machine-readable format. + +.. code:: sh + + shell$ oshmem_info --version btl major + +Show the major version number of all BTL components in a prettyprint +format. + +.. code:: sh + + shell$ oshmem_info --version btl:tcp minor + +Show the minor version number of the TCP BTL component in a +prettyprint format. + +.. code:: sh + + shell$ oshmem_info --all + +Show *all* information about the OpenSHMEM installation, including all +components that can be found, the MCA parameters that they support, +versions of OpenSHMEM and the components, etc. diff --git a/docs/man-openshmem/man1/shmemc++.1 b/docs/man-openshmem/man1/shmemc++.1 new file mode 100644 index 00000000000..1dbbdca80df --- /dev/null +++ b/docs/man-openshmem/man1/shmemc++.1 @@ -0,0 +1 @@ +.so man1/oshmem-wrapper-compiler.1 diff --git a/docs/man-openshmem/man1/shmemcc.1 b/docs/man-openshmem/man1/shmemcc.1 new file mode 100644 index 00000000000..1dbbdca80df --- /dev/null +++ b/docs/man-openshmem/man1/shmemcc.1 @@ -0,0 +1 @@ +.so man1/oshmem-wrapper-compiler.1 diff --git a/docs/man-openshmem/man1/shmemcxx.1 b/docs/man-openshmem/man1/shmemcxx.1 new file mode 100644 index 00000000000..1dbbdca80df --- /dev/null +++ b/docs/man-openshmem/man1/shmemcxx.1 @@ -0,0 +1 @@ +.so man1/oshmem-wrapper-compiler.1 diff --git a/docs/man-openshmem/man1/shmemfort.1 b/docs/man-openshmem/man1/shmemfort.1 new file mode 100644 index 00000000000..1dbbdca80df --- /dev/null +++ b/docs/man-openshmem/man1/shmemfort.1 @@ -0,0 +1 @@ +.so man1/oshmem-wrapper-compiler.1 diff --git a/docs/man-openshmem/man3/OpenSHMEM.3.rst b/docs/man-openshmem/man3/OpenSHMEM.3.rst new file mode 100644 index 00000000000..ad1dab7892c --- /dev/null +++ b/docs/man-openshmem/man3/OpenSHMEM.3.rst @@ -0,0 +1,9 @@ +.. _openshmem: + +OpenSHMEM +========= + .. include_body + +.. include:: ../man3/intro_shmem.3.rst + :start-after: .. include_body + diff --git a/docs/man-openshmem/man3/_my_pe.3.rst b/docs/man-openshmem/man3/_my_pe.3.rst new file mode 100644 index 00000000000..73b7725a0dd --- /dev/null +++ b/docs/man-openshmem/man3/_my_pe.3.rst @@ -0,0 +1,9 @@ +.. _my_pe: + +_my_pe +====== + .. include_body + +.. include:: ../man3/shmem_my_pe.3.rst + :start-after: .. include_body + diff --git a/docs/man-openshmem/man3/_num_pes.3.rst b/docs/man-openshmem/man3/_num_pes.3.rst new file mode 100644 index 00000000000..3806706b1d1 --- /dev/null +++ b/docs/man-openshmem/man3/_num_pes.3.rst @@ -0,0 +1,9 @@ +.. _num_pes: + +_num_pes +======== + .. include_body + +.. include:: ../man3/shmem_n_pes.3.rst + :start-after: .. include_body + diff --git a/docs/man-openshmem/man3/index.rst b/docs/man-openshmem/man3/index.rst new file mode 100644 index 00000000000..37ebd60128d --- /dev/null +++ b/docs/man-openshmem/man3/index.rst @@ -0,0 +1,225 @@ +OpenSHMEM API manual pages (section 3) +====================================== + +.. toctree:: + :maxdepth: 1 + + OpenSHMEM.3.rst + _my_pe.3.rst + _num_pes.3.rst + intro_shmem.3.rst + shfree.3.rst + shmalloc.3.rst + shmem_addr_accessible.3.rst + shmem_align.3.rst + shmem_alltoall32.3.rst + shmem_alltoall64.3.rst + shmem_alltoalls32.3.rst + shmem_alltoalls64.3.rst + shmem_barrier.3.rst + shmem_barrier_all.3.rst + shmem_broadcast32.3.rst + shmem_broadcast64.3.rst + shmem_char_g.3.rst + shmem_char_get.3.rst + shmem_char_get_nbi.3.rst + shmem_char_p.3.rst + shmem_char_put.3.rst + shmem_char_put_nbi.3.rst + shmem_clear_cache_inv.3.rst + shmem_clear_cache_line_inv.3.rst + shmem_clear_lock.3.rst + shmem_collect32.3.rst + shmem_collect64.3.rst + shmem_complexd_prod_to_all.3.rst + shmem_complexd_sum_to_all.3.rst + shmem_complexf_prod_to_all.3.rst + shmem_complexf_sum_to_all.3.rst + shmem_double_fetch.3.rst + shmem_double_g.3.rst + shmem_double_get.3.rst + shmem_double_get_nbi.3.rst + shmem_double_iget.3.rst + shmem_double_iput.3.rst + shmem_double_max_to_all.3.rst + shmem_double_min_to_all.3.rst + shmem_double_p.3.rst + shmem_double_prod_to_all.3.rst + shmem_double_put.3.rst + shmem_double_put_nbi.3.rst + shmem_double_set.3.rst + shmem_double_sum_to_all.3.rst + shmem_double_swap.3.rst + shmem_fcollect32.3.rst + shmem_fcollect64.3.rst + shmem_fence.3.rst + shmem_finalize.3.rst + shmem_float_fetch.3.rst + shmem_float_g.3.rst + shmem_float_get.3.rst + shmem_float_get_nbi.3.rst + shmem_float_iget.3.rst + shmem_float_iput.3.rst + shmem_float_max_to_all.3.rst + shmem_float_min_to_all.3.rst + shmem_float_p.3.rst + shmem_float_prod_to_all.3.rst + shmem_float_put.3.rst + shmem_float_put_nbi.3.rst + shmem_float_set.3.rst + shmem_float_sum_to_all.3.rst + shmem_float_swap.3.rst + shmem_free.3.rst + shmem_get128.3.rst + shmem_get128_nbi.3.rst + shmem_get16_nbi.3.rst + shmem_get32.3.rst + shmem_get32_nbi.3.rst + shmem_get64.3.rst + shmem_get64_nbi.3.rst + shmem_get8_nbi.3.rst + shmem_getmem.3.rst + shmem_getmem_nbi.3.rst + shmem_global_exit.3.rst + shmem_iget128.3.rst + shmem_iget32.3.rst + shmem_iget64.3.rst + shmem_info_get_name.3.rst + shmem_info_get_version.3.rst + shmem_init.3.rst + shmem_int_add.3.rst + shmem_int_and_to_all.3.rst + shmem_int_cswap.3.rst + shmem_int_fadd.3.rst + shmem_int_fetch.3.rst + shmem_int_finc.3.rst + shmem_int_g.3.rst + shmem_int_get.3.rst + shmem_int_get_nbi.3.rst + shmem_int_iget.3.rst + shmem_int_inc.3.rst + shmem_int_iput.3.rst + shmem_int_max_to_all.3.rst + shmem_int_min_to_all.3.rst + shmem_int_or_to_all.3.rst + shmem_int_p.3.rst + shmem_int_prod_to_all.3.rst + shmem_int_put.3.rst + shmem_int_put_nbi.3.rst + shmem_int_set.3.rst + shmem_int_sum_to_all.3.rst + shmem_int_swap.3.rst + shmem_int_wait.3.rst + shmem_int_wait_until.3.rst + shmem_int_xor_to_all.3.rst + shmem_iput128.3.rst + shmem_iput32.3.rst + shmem_iput64.3.rst + shmem_long_add.3.rst + shmem_long_and_to_all.3.rst + shmem_long_cswap.3.rst + shmem_long_fadd.3.rst + shmem_long_fetch.3.rst + shmem_long_finc.3.rst + shmem_long_g.3.rst + shmem_long_get.3.rst + shmem_long_get_nbi.3.rst + shmem_long_iget.3.rst + shmem_long_inc.3.rst + shmem_long_iput.3.rst + shmem_long_max_to_all.3.rst + shmem_long_min_to_all.3.rst + shmem_long_or_to_all.3.rst + shmem_long_p.3.rst + shmem_long_prod_to_all.3.rst + shmem_long_put.3.rst + shmem_long_put_nbi.3.rst + shmem_long_set.3.rst + shmem_long_sum_to_all.3.rst + shmem_long_swap.3.rst + shmem_long_wait.3.rst + shmem_long_wait_until.3.rst + shmem_long_xor_to_all.3.rst + shmem_longdouble_g.3.rst + shmem_longdouble_get.3.rst + shmem_longdouble_get_nbi.3.rst + shmem_longdouble_iget.3.rst + shmem_longdouble_iput.3.rst + shmem_longdouble_max_to_all.3.rst + shmem_longdouble_min_to_all.3.rst + shmem_longdouble_p.3.rst + shmem_longdouble_prod_to_all.3.rst + shmem_longdouble_put.3.rst + shmem_longdouble_put_nbi.3.rst + shmem_longdouble_sum_to_all.3.rst + shmem_longlong_add.3.rst + shmem_longlong_and_to_all.3.rst + shmem_longlong_cswap.3.rst + shmem_longlong_fadd.3.rst + shmem_longlong_fetch.3.rst + shmem_longlong_finc.3.rst + shmem_longlong_g.3.rst + shmem_longlong_get.3.rst + shmem_longlong_get_nbi.3.rst + shmem_longlong_iget.3.rst + shmem_longlong_inc.3.rst + shmem_longlong_iput.3.rst + shmem_longlong_max_to_all.3.rst + shmem_longlong_min_to_all.3.rst + shmem_longlong_or_to_all.3.rst + shmem_longlong_p.3.rst + shmem_longlong_prod_to_all.3.rst + shmem_longlong_put.3.rst + shmem_longlong_put_nbi.3.rst + shmem_longlong_set.3.rst + shmem_longlong_sum_to_all.3.rst + shmem_longlong_swap.3.rst + shmem_longlong_wait.3.rst + shmem_longlong_wait_until.3.rst + shmem_longlong_xor_to_all.3.rst + shmem_malloc.3.rst + shmem_my_pe.3.rst + shmem_n_pes.3.rst + shmem_pe_accessible.3.rst + shmem_ptr.3.rst + shmem_put128.3.rst + shmem_put128_nbi.3.rst + shmem_put16_nbi.3.rst + shmem_put32.3.rst + shmem_put32_nbi.3.rst + shmem_put64.3.rst + shmem_put64_nbi.3.rst + shmem_put8_nbi.3.rst + shmem_putmem.3.rst + shmem_putmem_nbi.3.rst + shmem_quiet.3.rst + shmem_realloc.3.rst + shmem_set_cache_inv.3.rst + shmem_set_cache_line_inv.3.rst + shmem_set_lock.3.rst + shmem_short_and_to_all.3.rst + shmem_short_g.3.rst + shmem_short_get.3.rst + shmem_short_get_nbi.3.rst + shmem_short_iget.3.rst + shmem_short_iput.3.rst + shmem_short_max_to_all.3.rst + shmem_short_min_to_all.3.rst + shmem_short_or_to_all.3.rst + shmem_short_p.3.rst + shmem_short_prod_to_all.3.rst + shmem_short_put.3.rst + shmem_short_put_nbi.3.rst + shmem_short_sum_to_all.3.rst + shmem_short_wait.3.rst + shmem_short_wait_until.3.rst + shmem_short_xor_to_all.3.rst + shmem_swap.3.rst + shmem_test_lock.3.rst + shmem_udcflush.3.rst + shmem_udcflush_line.3.rst + shmem_wait.3.rst + shmem_wait_until.3.rst + shmemalign.3.rst + shrealloc.3.rst + start_pes.3.rst diff --git a/docs/man-openshmem/man3/intro_shmem.3.rst b/docs/man-openshmem/man3/intro_shmem.3.rst new file mode 100644 index 00000000000..e20bf02e4a6 --- /dev/null +++ b/docs/man-openshmem/man3/intro_shmem.3.rst @@ -0,0 +1,738 @@ +.. _intro_shmem: + + +intro_shmem +=========== + +.. include_body + +intro_shmem - Introduction to the OpenSHMEM programming model + + +DESCRIPTION +----------- + +The SHMEM programming model consists of library routines that provide +low-latency, high-bandwidth communication for use in highly parallelized +scalable programs. The routines in the OpenSHMEM application programming +interface (API) provide a programming model for exchanging data between +cooperating parallel processes. The resulting programs are similar in +style to Message Passing Interface (MPI) programs. The SHMEM API can be +used either alone or in combination with MPI routines in the same +parallel program. + +An OpenSHMEM program is SPMD (single program, multiple data) in style. +The SHMEM processes, called processing elements or PEs, all start at the +same time and they all run the same program. Usually the PEs perform +computation on their own subdomains of the larger problem and +periodically communicate with other PEs to exchange information on which +the next computation phase depends. + +The OpenSHMEM routines minimize the overhead associated with data +transfer requests, maximize bandwidth and minimize data latency. Data +latency is the period of time that starts when a PE initiates a transfer +of data and ends when a PE can use the data. OpenSHMEM routines support +remote data transfer through put operations, which transfer data to a +different PE, get operations, which transfer data from a different PE, +and remote pointers, which allow direct references to data objects owned +by another PE. Other operations supported are collective broadcast and +reduction, barrier synchronization, and atomic memory operations. An +atomic memory operation is an atomic read-and-update operation, such as +a fetch-and-increment, on a remote or local data object. + + +OPENSHMEM ROUTINES +------------------ + +This section lists the significant OpenSHMEM message-passing routines. + +PE queries + +.. + + * C/C++ only: + + * *\_num_pes*\ (3) + + * *\_my_pe*\ (3) + + * Fortran only: + + * *NUM_PES*\ (3) + + * *MY_PE*\ (3) + +Elemental data put routines + +.. + + * C/C++ only: + + * :ref:`shmem_double_p`\ (3) + + * :ref:`shmem_float_p`\ (3) + + * :ref:`shmem_int_p`\ (3) + + * :ref:`shmem_long_p`\ (3) + + * :ref:`shmem_short_p`.*\ (3) + +Block data put routines + +.. + + * C/C++ and Fortran: + + * :ref:`shmem_put32`\ (3) + + * :ref:`shmem_put64`\ (3) + + * :ref:`shmem_put128`\ (3) + + * C/C++ only: + + * :ref:`shmem_double_put`\ (3) + + * :ref:`shmem_float_put`\ (3) + + * :ref:`shmem_int_put`\ (3) + + * :ref:`shmem_long_put`\ (3) + + * :ref:`shmem_short_put`.*\ (3) + + * Fortran only: + + * shmem_complex_put\ (3) + + * shmem_integer_put\ (3) + + * shmem_logical_put\ (3) + + * shmem_real_put\ (3) + +Elemental data get routines + +.. + + * C/C++ only: + + * :ref:`shmem_double_g`\ (3) + + * :ref:`shmem_float_g`\ (3) + + * :ref:`shmem_int_g`\ (3) + + * :ref:`shmem_long_g`\ (3) + + * :ref:`shmem_short_g`\ (3) + +Block data get routines + + * C/C++ and Fortran: + + * :ref:`shmem_get32`\ (3) + + * :ref:`shmem_get64`\ (3) + + * :ref:`shmem_get128`\ (3) + + * C/C++ only: + + * :ref:`shmem_double_get`\ (3) + + * :ref:`shmem_float_get`\ (3) + + * :ref:`shmem_int_get`\ (3) + + * :ref:`shmem_long_get`\ (3) + + * :ref:`shmem_short_get`\ (3) + + * Fortran only: + + * shmem_complex_get\ (3) + + * shmem_integer_get\ (3) + + * shmem_logical_get\ (3) + + * shmem_real_get\ (3) + +Strided put routines + + * C/C++ and Fortran: + + * :ref:`shmem_iput32`\ (3) + + * :ref:`shmem_iput64`\ (3) + + * :ref:`shmem_iput128`\ (3) + + * C/C++ only: + + * :ref:`shmem_double_iput`\ (3) + + * :ref:`shmem_float_iput`\ (3) + + * :ref:`shmem_int_iput`\ (3) + + * :ref:`shmem_long_iput`\ (3) + + * :ref:`shmem_short_iput`\ (3) + + * Fortran only: + + * shmem_complex_iput\ (3) + + * shmem_integer_iput\ (3) + + * shmem_logical_iput\ (3) + + * shmem_real_iput\ (3) + +Strided get routines + +.. + + * C/C++ and Fortran: + + * :ref:`shmem_iget32`\ (3) + + * :ref:`shmem_iget64`\ (3) + + * :ref:`shmem_iget128`\ (3) + + * C/C++ only: + + * :ref:`shmem_double_iget`\ (3) + + * :ref:`shmem_float_iget`\ (3) + + * :ref:`shmem_int_iget`\ (3) + + * :ref:`shmem_long_iget`\ (3) + + * :ref:`shmem_short_iget`\ (3) + + * Fortran only: + + * shmem_complex_iget\ (3) + + * shmem_integer_iget\ (3) + + * shmem_logical_iget\ (3) + + * shmem_real_iget\ (3) + +Point-to-point synchronization routines + + * C/C++ only: + + * :ref:`shmem_int_wait`\ (3) + + * :ref:`shmem_int_wait_until`\ (3) + + * :ref:`shmem_long_wait`\ (3) + + * :ref:`shmem_long_wait_until`\ (3) + + * :ref:`shmem_longlong_wait`\ (3) + + * :ref:`shmem_longlong_wait_until`\ (3) + + * :ref:`shmem_short_wait`\ (3) + + * :ref:`shmem_short_wait_until`\ (3) + + * Fortran: + + * shmem_int4_wait\ (3) + + * shmem_int4_wait_until\ (3) + + * shmem_int8_wait\ (3) + + * shmem_int8_wait_until\ (3) + +Barrier synchronization routines + +.. + + * C/C++ and Fortran: + + * :ref:`shmem_barrier_all`\ (3) + + * :ref:`shmem_barrier`\ (3) + +Atomic memory fetch-and-operate (fetch-op) routines + + * C/C++ and Fortran: + + * :ref:`shmem_swap` + +Reduction routines + + * C/C++ only: + + * :ref:`shmem_int_and_to_all`\ (3) + + * :ref:`shmem_long_and_to_all`\ (3) + + * :ref:`shmem_longlong_and_to_all`\ (3) + + * :ref:`shmem_short_and_to_all`\ (3) + + * :ref:`shmem_double_max_to_all`\ (3) + + * :ref:`shmem_float_max_to_all`\ (3) + + * :ref:`shmem_int_max_to_all`\ (3) + + * :ref:`shmem_long_max_to_all`\ (3) + + * :ref:`shmem_longlong_max_to_all`\ (3) + + * :ref:`shmem_short_max_to_all`\ (3) + + * :ref:`shmem_double_min_to_all`\ (3) + + * :ref:`shmem_float_min_to_all`\ (3) + + * :ref:`shmem_int_min_to_all`\ (3) + + * :ref:`shmem_long_min_to_all`\ (3) + + * :ref:`shmem_longlong_min_to_all`\ (3) + + * :ref:`shmem_short_min_to_all`\ (3) + + * :ref:`shmem_double_sum_to_all`\ (3) + + * :ref:`shmem_float_sum_to_all`\ (3) + + * :ref:`shmem_int_sum_to_all`\ (3) + + * :ref:`shmem_long_sum_to_all`\ (3) + + * :ref:`shmem_longlong_sum_to_all`\ (3) + + * :ref:`shmem_short_sum_to_all`\ (3) + + * :ref:`shmem_double_prod_to_all`\ (3) + + * :ref:`shmem_float_prod_to_all`\ (3) + + * :ref:`shmem_int_prod_to_all`\ (3) + + * :ref:`shmem_long_prod_to_all`\ (3) + + * :ref:`shmem_longlong_prod_to_all`\ (3) + + * :ref:`shmem_short_prod_to_all`\ (3) + + * :ref:`shmem_int_or_to_all`\ (3) + + * :ref:`shmem_long_or_to_all`\ (3) + + * :ref:`shmem_longlong_or_to_all`\ (3) + + * :ref:`shmem_short_or_to_all`\ (3) + + * :ref:`shmem_int_xor_to_all`\ (3) + + * :ref:`shmem_long_xor_to_all`\ (3) + + * :ref:`shmem_longlong_xor_to_all`\ (3) + + * :ref:`shmem_short_xor_to_all`\ (3) + + * Fortran only: + + * shmem_int4_and_to_all\ (3) + + * shmem_int8_and_to_all\ (3) + + * shmem_real4_max_to_all\ (3) + + * shmem_real8_max_to_all\ (3) + + * shmem_int4_max_to_all\ (3) + + * shmem_int8_max_to_all\ (3) + + * shmem_real4_min_to_all\ (3) + + * shmem_real8_min_to_all\ (3) + + * shmem_int4_min_to_all\ (3) + + * shmem_int8_min_to_all\ (3) + + * shmem_real4_sum_to_all\ (3) + + * shmem_real8_sum_to_all\ (3) + + * shmem_int4_sum_to_all\ (3) + + * shmem_int8_sum_to_all\ (3) + + * shmem_real4_prod_to_all\ (3) + + * shmem_real8_prod_to_all\ (3) + + * shmem_int4_prod_to_all\ (3) + + * shmem_int8_prod_to_all\ (3) + + * shmem_int4_or_to_all\ (3) + + * shmem_int8_or_to_all\ (3) + + * shmem_int4_xor_to_all\ (3) + + * shmem_int8_xor_to_all\ (3) + +Broadcast routines + +.. + + * C/C++ and Fortran: + + * :ref:`shmem_broadcast32`\ (3) + + * :ref:`shmem_broadcast64`\ (3) + +Cache management routines + +.. + + * C/C++ and Fortran: + + * :ref:`shmem_udcflush`\ (3) + + * :ref:`shmem_udcflush_line`\ (3) + +Byte-granularity block put routines + +.. + + * C/C++ and Fortran + + * :ref:`shmem_putmem`\ (3) + + * :ref:`shmem_getmem`\ (3) + + * Fortran only: + + * shmem_character_put\ (3) + + * shmem_character_get\ (3) + +Collect routines + + * C/C++ and Fortran: + + * :ref:`shmem_collect32`\ (3) + + * :ref:`shmem_collect64`\ (3) + + * :ref:`shmem_fcollect32`\ (3) + + * :ref:`shmem_fcollect64`\ (3) + +Atomic memory fetch-and-operate (fetch-op) routines + + * C/C++ only: + + * :ref:`shmem_double_swap`\ (3) + + * :ref:`shmem_float_swap`\ (3) + + * :ref:`shmem_int_cswap`\ (3) + + * :ref:`shmem_int_fadd`\ (3) + + * :ref:`shmem_int_finc`\ (3) + + * :ref:`shmem_int_swap`\ (3) + + * :ref:`shmem_long_cswap`\ (3) + + * :ref:`shmem_long_fadd`\ (3) + + * :ref:`shmem_long_finc`\ (3) + + * :ref:`shmem_long_swap`\ (3) + + * :ref:`shmem_longlong_cswap`\ (3) + + * :ref:`shmem_longlong_fadd`\ (3) + + * :ref:`shmem_longlong_finc`\ (3) + + * :ref:`shmem_longlong_swap`\ (3) + + * Fortran only: + + * shmem_int4_cswap\ (3) + + * shmem_int4_fadd\ (3) + + * shmem_int4_finc\ (3) + + * shmem_int4_swap\ (3) + + * shmem_int8_swap\ (3) + + * shmem_real4_swap\ (3) + + * shmem_real8_swap\ (3) + + * shmem_int8_cswap\ (3) + +Atomic memory operation routines + + * Fortran only: + + * shmem_int4_add\ (3) + + * shmem_int4_inc\ (3) + +Remote memory pointer function + + * C/C++ and Fortran: + + * :ref:`shmem_ptr`\ (3) + +Reduction routines + + * C/C++ only: + + * :ref:`shmem_longdouble_max_to_all`\ (3) + + * :ref:`shmem_longdouble_min_to_all`\ (3) + + * :ref:`shmem_longdouble_prod_to_all`\ (3) + + * :ref:`shmem_longdouble_sum_to_all`\ (3) + + * Fortran only: + + * shmem_real16_max_to_all\ (3) + + * shmem_real16_min_to_all\ (3) + + * shmem_real16_prod_to_all\ (3) + + * shmem_real16_sum_to_all\ (3) + +Accessibility query routines + + * C/C++ and Fortran: + + * :ref:`shmem_pe_accessible`\ (3) + + * :ref:`shmem_addr_accessible`\ (3) + +Symmetric Data Objects + +Consistent with the SPMD nature of the OpenSHMEM programming model is +the concept of symmetric data objects. These are arrays or variables +that exist with the same size, type, and relative address on all PEs. +Another term for symmetric data objects is "remotely accessible data +objects". In the interface definitions for OpenSHMEM data transfer +routines, one or more of the parameters are typically required to be +symmetric or remotely accessible. + +The following kinds of data objects are symmetric: + + * Fortran data objects in common blocks or with the SAVE + attribute. These data objects must not be defined in a dynamic + shared object (DSO). + + * Non-stack C and C++ variables. These data objects must not be + defined in a DSO. + + * Fortran arrays allocated with *shpalloc*\ (3) + + * C and C++ data allocated by *shmalloc*\ (3) + +.. + +Collective Routines + Some SHMEM routines, for example, shmem_broadcast\ (3) and + :ref:`shmem_float_sum_to_all`\ (3), are classified as collective routines + because they distribute work across a set of PEs. They must be called + concurrently by all PEs in the active set defined by the PE_start, + logPE_stride, PE_size argument triplet. The following man pages + describe the OpenSHMEM collective routines: + + * shmem_and\ (3) + + * :ref:`shmem_barrier`\ (3) + + * shmem_broadcast\ (3) + + * shmem_collect\ (3) + + * shmem_max\ (3) + + * shmem_min\ (3) + + * shmem_or\ (3) + + * shmem_prod\ (3) + + * shmem_sum\ (3) + + * shmem_xor\ (3) + + +USING THE SYMMETRIC WORK ARRAY, PSYNC +------------------------------------- + +Multiple pSync arrays are often needed if a particular PE calls as +OpenSHMEM collective routine twice without intervening barrier +synchronization. Problems would occur if some PEs in the active set for +call 2 arrive at call 2 before processing of call 1 is complete by all +PEs in the call 1 active set. You can use :ref:`shmem_barrier`\ (3) or +:ref:`shmem_barrier_all`\ (3) to perform a barrier synchronization between +consecutive calls to OpenSHMEM collective routines. + +There are two special cases: + +* + The :ref:`shmem_barrier`\ (3) routine allows the same pSync array to be + used on consecutive calls as long as the active PE set does not + change. + +* + If the same collective routine is called multiple times with the same + active set, the calls may alternate between two pSync arrays. The + SHMEM routines guarantee that a first call is completely finished by + all PEs by the time processing of a third call begins on any PE. + +Because the SHMEM routines restore pSync to its original contents, +multiple calls that use the same pSync array do not require that pSync +be reinitialized after the first call. + + +SHMEM ENVIRONMENT VARIABLES +--------------------------- + +This section lists the significant SHMEM environment variables. + +* **SMA_VERSION** print the library version at start-up. + +* **SMA_INFO** print helpful text about all these environment + variables. + +* **SMA_SYMMETRIC_SIZE** number of bytes to allocate for the symmetric + heap. + +* **SMA_DEBUG** enable debugging messages. + +The first call to SHMEM must be *start_pes*\ (3). This routines +initialize the SHMEM runtime. + +Calling any other SHMEM routines beforehand has undefined behavior. +Multiple calls to this routine is not allowed. + + +COMPILING AND RUNNING OPENSHMEM PROGRAMS +---------------------------------------- + +The OpenSHMEM specification is silent regarding how OpenSHMEM programs +are compiled, linked and run. This section shows some examples of how +wrapper programs could be utilized to compile and launch applications. +The commands are styled after wrapper programs found in many MPI +implementations. + +The following sample command line demonstrates running an OpenSHMEM +Program using a wrapper script (**oshrun** in this case): + +* C/C++: + +.. code-block:: c++ + + oshcc c_program.c + +* FORTRAN: + +.. code-block:: fortran + + oshfort fortran_program.f + +The following sample command line demonstrates running an OpenSHMEM +Program assuming that the library provides a wrapper script for such +purpose (named **oshrun** for this example): + +:: + + oshrun -np 32 ./a.out + + +EXAMPLES +-------- + +**Example 1**: The following Fortran OpenSHMEM program directs all PEs +to sum simultaneously the numbers in the VALUES variable across all PEs: + +.. code-block:: fortran + + PROGRAM REDUCTION + REAL VALUES, SUM + COMMON /C/ VALUES + REAL WORK + + CALL START_PES(0) + VALUES = MY_PE() + CALL SHMEM_BARRIER_ALL ! Synchronize all PEs + SUM = 0.0 + DO I = 0, NUM_PES()-1 + CALL SHMEM_REAL_GET(WORK, VALUES, 1, I) ! Get next value + SUM = SUM + WORK ! Sum it + ENDDO + PRINT *, 'PE ', MY_PE(), ' COMPUTED SUM=', SUM + CALL SHMEM_BARRIER_ALL + END + +**Example 2**: The following C OpenSHMEM program transfers an array of +10 longs from PE 0 to PE 1: + +.. code-block:: c + + #include + + main() { + long source[10] = { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 }; + static long target[10]; + + shmem_init(); + if (shmem_my_pe() == 0) { + /* put 10 elements into target on PE 1 */ + shmem_long_put(target, source, 10, 1); + } + shmem_barrier_all(); /* sync sender and receiver */ + if (shmem_my_pe() == 1) + printf("target[0] on PE %d is %d\n", shmem_my_pe(), target[0]); + } + + +.. seealso:: + The following man pages also contain information on OpenSHMEM routines. + See the specific man pages for implementation information. + + shmem_add\ (3) shmem_and\ (3) :ref:`shmem_barrier`\ (3) + :ref:`shmem_barrier_all`\ (3) shmem_broadcast\ (3) shmem_cache\ (3) + shmem_collect\ (3) shmem_cswap\ (3) shmem_fadd\ (3) + :ref:`shmem_fence`\ (3) shmem_finc\ (3) shmem_get\ (3) shmem_iget\ (3) + shmem_inc\ (3) shmem_iput\ (3) shmem_lock\ (3) shmem_max\ (3) + shmem_min\ (3) :ref:`shmem_my_pe`\ (3) shmem_or\ (3) shmem_prod\ (3) + shmem_put\ (3) :ref:`shmem_quiet`\ (3) :ref:`shmem_short_g`\ (3) + :ref:`shmem_short_p`\ (3) shmem_sum\ (3) :ref:`shmem_swap`\ (3) + :ref:`shmem_wait`\ (3) shmem_xor\ (3) :ref:`shmem_pe_accessible`\ (3) + :ref:`shmem_addr_accessible`\ (3) :ref:`shmem_init`\ (3) :ref:`shmem_malloc`\ (3) + *shmem_my_pe*\ (3) *shmem_n_pes*\ (3) diff --git a/docs/man-openshmem/man3/shfree.3.rst b/docs/man-openshmem/man3/shfree.3.rst new file mode 100644 index 00000000000..89d5c356ee5 --- /dev/null +++ b/docs/man-openshmem/man3/shfree.3.rst @@ -0,0 +1,9 @@ +.. _shfree: + +shfree +====== + .. include_body + +.. include:: ../man3/shmem_malloc.3.rst + :start-after: .. include_body + diff --git a/docs/man-openshmem/man3/shmalloc.3.rst b/docs/man-openshmem/man3/shmalloc.3.rst new file mode 100644 index 00000000000..d4fd381ab56 --- /dev/null +++ b/docs/man-openshmem/man3/shmalloc.3.rst @@ -0,0 +1,101 @@ +.. _shmalloc: + + +shmalloc +======== + +.. include_body + +*shmalloc*\ (3), *shfree*\ (3), *shmemalign*\ (3), *shrealloc*\ (3) - +Symmetric heap memory management functions. + + +SYNOPSIS +-------- + +C or C++: + +.. code-block:: c++ + + #include + + void *shmalloc(size_t size) + + void shfree(void *ptr) + + void *shrealloc(void *ptr, size_t size) + + void *shmemalign(size_t alignment, size_t size) + + extern long malloc_error + + +DESCRIPTION +----------- + +The **shmalloc** function returns a pointer to a block of at least size +bytes suitably aligned for any use. This space is allocated from the +symmetric heap (in contrast to *malloc*\ (3), which allocates from the +private heap). + +The **shmemalign** function allocates a block in the symmetric heap that +has a byte alignment specified by the alignment argument. + +The **shfree** function causes the block to which ptr points to, to be +deallocated, that is, made available for further allocation. If ptr is a +null pointer, no action occurs; otherwise, if the argument does not +match a pointer earlier returned by a symmetric heap function, or if the +space has already been deallocated, malloc_error is set to indicate the +error, and shfree returns. + +The **shrealloc** function changes the size of the block to which ptr +points to, to the size (in bytes) specified by size. + +The contents of the block are unchanged up to the lesser of the new and +old sizes. If the new size is larger, the value of the newly allocated +portion of the block is indeterminate. If ptr is a null pointer, the +shrealloc function behaves like the shmalloc function for the specified +size. If size is 0 and ptr is not a null pointer, the block to which it +points to is freed. Otherwise, if ptr does not match a pointer earlier +returned by a symmetric heap function, or if the space has already been +deallocated, the malloc_error variable is set to indicate the error, and +shrealloc returns a null pointer. If the space cannot be allocated, the +block to which ptr points to is unchanged. + +The shmalloc, shfree, and shrealloc functions are provided so that +multiple PEs in an application can allocate symmetric, remotely +accessible memory blocks. These memory blocks can then be used with +(shmem) communication routines. Each of these functions call the +:ref:`shmem_barrier_all`\ (3) function before returning; this ensures that +all PEs participate in the memory allocation, and that the memory on +other PEs can be used as soon as the local PE returns. + +The user is responsible for calling these functions with identical +argument(s) on all PEs; if differing size arguments are used, subsequent +calls may not return the same symmetric heap address on all PEs. + + +NOTES +----- + +The total size of the symmetric heap is determined at job startup. One +can adjust the size of the heap using the SHMEM_SYMMETRIC_HEAP_SIZE +environment variable. See the *intro_shmem*\ (3) man page for futher +details. The shmalloc, shfree, and shrealloc functions differ from the +private heap allocation functions in that all PEs in an application must +call them (a barrier is used to ensure this). + + +RETURN VALUES +------------- + +The **shmalloc** function returns a pointer to the allocated space +(which should be identical on all PEs); otherwise, it returns a null +pointer (with malloc_error set). The **shfree** function returns no +value. The **shrealloc** function returns a pointer to the allocated +space (which may have moved); otherwise, it returns a null pointer (with +malloc_error set). + + +.. seealso:: + *intro_shmem*\ (3) *my_pe*\ (3) *start_pes*\ (3) diff --git a/docs/man-openshmem/man3/shmem_addr_accessible.3.rst b/docs/man-openshmem/man3/shmem_addr_accessible.3.rst new file mode 100644 index 00000000000..ea423b3ba80 --- /dev/null +++ b/docs/man-openshmem/man3/shmem_addr_accessible.3.rst @@ -0,0 +1,60 @@ +.. _shmem_addr_accessible: + + +shmem_addr_accessible +===================== + +.. include_body + +:ref:`shmem_addr_accessible` - Indicates if an address is accessible via +OpenSHMEM operations from the specified remote PE. + + +SYNOPSIS +-------- + +C or C++: + +.. code-block:: c++ + + #include + + int shmem_addr_accessible(const void *addr, int pe) + +Fortran: + +.. code-block:: fortran + + INCLUDE "mpp/shmem.fh" + + LOGICAL LOG, SHMEM_ADDR_ACCESSIBLE + INTEGER pe + + LOG = SHMEM_ADDR_ACCESSIBLE(addr, pe) + + +DESCRIPTION +----------- + +:ref:`shmem_addr_accessible` is a query function that indicates whether a local +address is accessible via SHMEM operations from the specified remote PE. + +This function verifies that the remote PE is accessible via SHMEM data +transfer functions from the local PE, and that the specified address is +in a symmetric data segment with respect to the remote PE. + + +RETURN VALUES +------------- + +C: The return value is 1 if addr is a symmetric data object and +accessible via SHMEM operations from the specified remote PE; otherwise, +it is 0. + +Fortran: The return value is .TRUE. if addr is a symmetric data object +and accessible via SHMEM operations from the specified remote PE; +otherwise, it is .FALSE.. + + +.. seealso:: + *intro_shmem*\ (3) *shmem_pe_accessible*\ (3) diff --git a/docs/man-openshmem/man3/shmem_align.3.rst b/docs/man-openshmem/man3/shmem_align.3.rst new file mode 100644 index 00000000000..b4bf905556d --- /dev/null +++ b/docs/man-openshmem/man3/shmem_align.3.rst @@ -0,0 +1,9 @@ +.. _shmem_align: + +shmem_align +=========== + .. include_body + +.. include:: ../man3/shmem_malloc.3.rst + :start-after: .. include_body + diff --git a/docs/man-openshmem/man3/shmem_alltoall32.3.rst b/docs/man-openshmem/man3/shmem_alltoall32.3.rst new file mode 100644 index 00000000000..b0da99d54d9 --- /dev/null +++ b/docs/man-openshmem/man3/shmem_alltoall32.3.rst @@ -0,0 +1,242 @@ +.. _shmem_alltoall32: + + +shmem_alltoall32 +================ + +.. include_body + +:ref:`shmem_alltoall32`\ (3), :ref:`shmem_alltoall64`\ (3), +:ref:`shmem_alltoalls32`\ (3), :ref:`shmem_alltoalls64`\ (3) - collective routine +where each PE exchanges a fixed amount of data with all other PEs in the +Active set + + +SYNOPSIS +-------- + +C or C++: + +.. code-block:: c++ + + #include + + void shmem_alltoall32(void *target, const void *source, + size_t nelems, int PE_start, int logPE_stride, + int PE_size, long *pSync) + + void shmem_alltoall64(void *target, const void *source, + size_t nelems, int PE_start, int logPE_stride, + int PE_size, long *pSync) + + void shmem_alltoalls32(void *target, const void *source, + ptrdiff_t dst, ptrdiff_t sst, + size_t nelems, int PE_start, int logPE_stride, + int PE_size, long *pSync) + + void shmem_alltoalls64(void *target, const void *source, + ptrdiff_t dst, ptrdiff_t sst, + size_t nelems, int PE_start, int logPE_stride, + int PE_size, long *pSync) + +Fortran: + +.. code-block:: fortran + + INCLUDE "mpp/shmem.fh" + + INTEGER dst, sst, nelems, PE_root, PE_start, logPE_stride, PE_size + INTEGER pSync(SHMEM_ALLTOALL_SYNC_SIZE) + + CALL SHMEM_ALLTOALL32(target, source, nelems, + & PE_start, logPE_stride, PE_size, fIpSync) + + CALL SHMEM_ALLTOALL64(target, source, nelems, + & PE_start, logPE_stride, PE_size, pSync) + + CALL SHMEM_ALLTOALLS32(target, source, dst, sst, nelems, + & PE_start, logPE_stride, PE_size, pSync) + + CALL SHMEM_ALLTOALLS64(target, source, dst, sst, nelems, + & PE_start, logPE_stride, PE_size, pSync) + + +DESCRIPTION +----------- + +The shmem_alltoalls routines are collective routines. Each PE in the +Active set exchanges nelems strided data elements of size 32 bits (for +shmem_alltoalls32) or 64 bits (for shmem_alltoalls64) with all other PEs +in the set. Both strides, dst and sst, must be greater than or equal to +1. The sst*jth block sent from PE i to PE j is placed in the dst*ith +block of the dest data object on PE j. As with all OpenSHMEM collective +routines, these routines assume that only PEs in the Active set call the +routine. If a PE not in the Active set calls an OpenSHMEM collective +routine, undefined behavior results. The values of arguments dst, sst, +nelems, PE_start, logPE_stride, and PE_size must be equal on all PEs in +the Active set. The same dest and source data objects, and the same +pSync work array must be passed to all PEs in the Active set. Before any +PE calls to a shmem_alltoalls routine, the following conditions must +exist (synchronization via a barrier or some other method is often +needed to ensure this): The pSync array on all PEs in the Active set is +not still in use from a prior call to a shmem_alltoalls routine. The +dest data object on all PEs in the Active set is ready to accept the +shmem_alltoalls data. Upon return from a shmem_alltoalls routine, the +following is true for the local PE: Its dest symmetric data object is +completely updated and the data has been copied out of the source data +object. The values in the pSync array are restored to the original +values. + +The arguments are as follows: + +A symmetric data object with one of the following data types: + + :ref:`shmem_alltoall32`: Any noncharacter type that + has an element size of 32 bits. No Fortran derived types or C/C++ + structures are allowed. + + :ref:`shmem_alltoall64`: Any noncharacter type that has an element size + of 64 bits. No Fortran derived types or C/C++ structures are + allowed. + +target A symmetric data object large enough to receive the combined +total of nelems elements from each PE in the Active set. + +source + A symmetric data object that contains nelems elements of data for + each PE in the Active set, ordered according to destination PE. + +dst + The stride between consecutive elements of the dest data object. The + stride is scaled by the element size. A value of 1 indicates + contiguous data. dst must be of type ptrdiff_t. If you are using + Fortran, it must be a default integer value. + +sst + The stride between consecutive elements of the source data object. + The stride is scaled by the element size. A value of 1 indicates + contiguous data. sst must be of type ptrdiff_t. If you are using + Fortran, it must be a default integer value. + +nelems + The number of elements to exchange for each PE. nelems must be of + type size_t for C/C++. If you are using Fortran, it must be a default + integer value + +PE_start + The lowest virtual PE number of the active set of PEs. PE_start must + be of type integer. If you are using Fortran, it must be a default + integer value. + +logPE_stride + The log (base 2) of the stride between consecutive virtual PE numbers + in the active set. log_PE_stride must be of type integer. If you are + using Fortran, it must be a default integer value. + +PE_size + The number of PEs in the active set. PE_size must be of type integer. + If you are using Fortran, it must be a default integer value. + +pSync + A symmetric work array. In C/C++, pSync must be of type long and size + \_SHMEM_ALLTOALL_SYNC_SIZE. In Fortran, pSync must be of type integer + and size SHMEM_ALLTOALL_SYNC_SIZE. Every element of this array must + be initialized with the value \_SHMEM_SYNC_VALUE (in C/C++) or + SHMEM_SYNC_VALUE (in Fortran) before any of the PEs in the active set + enter shmem_barrier(). + +Upon return from a shmem_alltoalls routine, the following is true for +the local PE: Its dest symmetric data object is completely updated and +the data has been copied out of the source data object. The values in +the pSync array are restored to the original values. + +The values of arguments PE_root, PE_start, logPE_stride, and PE_size +must be equal on all PEs in the active set. The same target and source +data objects and the same pSync work array must be passed to all PEs in +the active set. + +Before any PE calls a alltoall routine, you must ensure that the +following conditions exist (synchronization via a barrier or some other +method is often needed to ensure this): The pSync array on all PEs in +the active set is not still in use from a prior call to a alltoall +routine. The target array on all PEs in the active set is ready to +accept the alltoall data. + + +NOTES +----- + +The terms collective and symmetric are defined in *intro_shmem*\ (3). + +All SHMEM alltoall routines restore pSync to its original contents. +Multiple calls to SHMEM routines that use the same pSync array do not +require that pSync be reinitialized after the first call. + +You must ensure the that the pSync array is not being updated by any PE +in the active set while any of the PEs participates in processing of a +SHMEM broadcast routine. Be careful to avoid these situations: If the +pSync array is initialized at run time, some type of synchronization is +needed to ensure that all PEs in the working set have initialized pSync +before any of them enter a SHMEM routine called with the pSync +synchronization array. A pSync array may be reused on a subsequent SHMEM +broadcast routine only if none of the PEs in the active set are still +processing a prior SHMEM alltoall routine call that used the same pSync +array. In general, this can be ensured only by doing some type of +synchronization. However, in the special case of SHMEM routines being +called with the same active set, you can allocate two pSync arrays and +alternate between them on successive calls. + + +EXAMPLES +-------- + +C/C++ example: + +.. code-block:: c++ + + #include + #include + + long pSync[SHMEM_ALLTOALL_SYNC_SIZE]; + int main(void) + { + int64_t *source, *dest; + int i, count, pe; + shmem_init(); + count = 2; + dest = (int64_t*) shmem_malloc(count * shmem_n_pes() * sizeof(int64_t)); + source = (int64_t*) shmem_malloc(count * shmem_n_pes() * sizeof(int64_t)); + /* assign source values */ + for (pe=0; pe + + void shmem_barrier(int PE_start, int logPE_stride, int PE_size, + long *pSync) + +Fortran: + +.. code-block:: fortran + + INCLUDE "mpp/shmem.fh" + + INTEGER PE_start, logPE_stride, PE_size + INTEGER pSync(SHMEM_BARRIER_SYNC_SIZE) + + CALL SHMEM_BARRIER(PE_start, logPE_stride, PE_size, pSync) + + +DESCRIPTION +----------- + +The :ref:`shmem_barrier` routine does not return until the subset of PEs +specified by **PE_start**, **logPE_stride** and **PE_size**, has entered +this routine at the same point of the execution path. + +As with all SHMEM collective routines, each of these routines assumes +that only PEs in the active set call the routine. If a PE not in the +active set calls a SHMEM collective routine, undefined behavior results. + +The arguments are as follows: + +PE_start + The lowest virtual PE number of the active set of PEs. PE_start must + be of type integer. If you are using Fortran, it must be a default + integer value. + +logPE_stride + The log (base 2) of the stride between consecutive virtual PE numbers + in the active set. logPE_stride must be of type integer. If you are + using Fortran, it must be a default integer value. + +PE_size + The number of PEs in the active set. PE_size must be of type integer. + If you are using Fortran, it must be a default integer value. + +pSync + A symmetric work array. In C/C++, pSync must be of type int and size + \_SHMEM_BARRIER_SYNC_SIZE. In Fortran, pSync must be of type integer + and size SHMEM_BARRIER_SYNC_SIZE. If you are using Fortran, it must + be a default integer type. Every element of this array must be + initialized to 0 before any of the PEs in the active set enter + :ref:`shmem_barrier` the first time. + +The values of arguments PE_start, logPE_stride, and PE_size must be +equal on all PEs in the active set. The same work array must be passed +in pSync to all PEs in the active set. + +:ref:`shmem_barrier` ensures that all previously issued local stores and +previously issued remote memory updates done by any of the PEs in the +active set (by using SHMEM calls, for example shmem_put\ (3)) are +complete before returning. + +The same pSync array may be reused on consecutive calls to :ref:`shmem_barrier` +if the same active PE set is used. + + +NOTES +----- + +The term symmetric is defined in *intro_shmem*\ (3). + +If the pSync array is initialized at run time, be sure to use some type +of synchronization, for example, a call to :ref:`shmem_barrier_all`\ (3), +before calling :ref:`shmem_barrier` for the first time. + +If the active set does not change, :ref:`shmem_barrier` can be called +repeatedly with the same pSync array. No additional synchronization +beyond that implied by :ref:`shmem_barrier` itself is necessary in this case. + + +EXAMPLES +-------- + +C/C++ example: + +.. code-block:: c++ + + shmem_barrier(PE_start, logPE_stride, size, pSync); + +Fortran example: + +.. code-block:: fortran + + INTEGER PSYNC(SHMEM_BARRIER_SYNC_SIZE) + INTEGER PE_START, LOGPE_STRIDE, PE_SIZE, PSYNC + DATA PSYNC /SHMEM_BARRIER_SYNC_SIZE*0/ + + CALL SHMEM_BARRIER(PE_START, LOGPE_STRIDE, PE_SIZE, PSYNC) + + +.. seealso:: + *intro_shmem*\ (3) *shmem_barrier_all*\ (3) diff --git a/docs/man-openshmem/man3/shmem_barrier_all.3.rst b/docs/man-openshmem/man3/shmem_barrier_all.3.rst new file mode 100644 index 00000000000..af5ca8ada87 --- /dev/null +++ b/docs/man-openshmem/man3/shmem_barrier_all.3.rst @@ -0,0 +1,61 @@ +.. _shmem_barrier_all: + + +shmem_barrier_all +================= + +.. include_body + +:ref:`shmem_barrier_all` - Suspends the execution of the calling PE until all +other PEs issue a call to this particular shmem_barrier_all() statement. + + +SYNOPSIS +-------- + +C or C++: + +.. code-block:: c++ + + #include + + void shmem_barrier_all(void) + +Fortran: + +.. code-block:: fortran + + include 'mpp/shmem.h' + + CALL SHMEM_BARRIER_ALL + + +DESCRIPTION +----------- + +The :ref:`shmem_barrier_all` routine does not return until all other PEs have +entered this routine at the same point of the execution path. + +Prior to synchronizing with other PEs, :ref:`shmem_barrier_all` ensures +completion of all previously issued local memory stores and remote +memory updates issued via SHMEM functions such as :ref:`shmem_put32`\ (3). + + +EXAMPLES +-------- + +:: + + setup_data() + { + if (shmem_my_pe() == 0) { + setup(); + } + + /* All PEs wait for PE 0 to complete setup(). */ + shmem_barrier_all(); + } + + +.. seealso:: + *shmem_barrier*\ (3) *shmem_init*\ (3) diff --git a/docs/man-openshmem/man3/shmem_broadcast32.3.rst b/docs/man-openshmem/man3/shmem_broadcast32.3.rst new file mode 100644 index 00000000000..a01ea243c88 --- /dev/null +++ b/docs/man-openshmem/man3/shmem_broadcast32.3.rst @@ -0,0 +1,196 @@ +.. _shmem_broadcast32: + + +shmem_broadcast32 +================= + +.. include_body + +shmem_broadcast4\ (3), shmem_broadcast8\ (3), +:ref:`shmem_broadcast32`\ (3), :ref:`shmem_broadcast64`\ (3) - Copy a data object +from a designated PE to a target location on all other PEs of the active +set. + + +SYNOPSIS +-------- + +C or C++: + +.. code-block:: c++ + + #include + + void shmem_broadcast32(void *target, const void *source, + size_t nelems, int PE_root, int PE_start, int logPE_stride, + int PE_size, long *pSync) + + void shmem_broadcast64(void *target, const void *source, + size_t nelems, int PE_root, int PE_start, int logPE_stride, + int PE_size, long *pSync) + +Fortran: + +.. code-block:: fortran + + INCLUDE "mpp/shmem.fh" + + INTEGER nelems, PE_root, PE_start, logPE_stride, PE_size + INTEGER pSync(SHMEM_BCAST_SYNC_SIZE) + + CALL SHMEM_BROADCAST4(target, source, nelems, PE_root, + & PE_start, logPE_stride, PE_size, fIpSync) + + CALL SHMEM_BROADCAST8(target, source, nelems, PE_root, + & PE_start, logPE_stride, PE_size, pSync) + + CALL SHMEM_BROADCAST32(target, source, nelems, + & PE_root, PE_start, logPE_stride, PE_size, pSync) + + CALL SHMEM_BROADCAST64(target, source, nelems, + & PE_root, PE_start, logPE_stride, PE_size, pSync) + + +DESCRIPTION +----------- + +The broadcast routines write the data at address source of the PE +specified by **PE_root** to address **target** on all other PEs in the +active set. The active set of PEs is defined by the triplet +**PE_start**, **logPE_stride** and **PE_size**. The data is not copied +to the target address on the PE specified by **PE_root**. Before +returning, the broadcast routines ensure that the elements of the pSync +array are restored to their initial values. + +As with all SHMEM collective routines, each of these routines assumes +that only PEs in the active set call the routine. If a PE not in the +active set calls a SHMEM collective routine, undefined behavior results. + +The arguments are as follows: + +target + A symmetric data object with one of the following data types: + + shmem_broadcast8, :ref:`shmem_broadcast64`: Any noncharacter type that + has an element size of 64 bits. No Fortran derived types or C/C++ + structures are allowed. + + :ref:`shmem_broadcast32`: Any noncharacter type that has an element size + of 32 bits. No Fortran derived types or C/C++ structures are + allowed. + + shmem_broadcast4: Any noncharacter type that has an element size + of 32 bits. + +source + A symmetric data object that can be of any data type that is + permissible for the target argument. + +nelems + The number of elements in source. For :ref:`shmem_broadcast32` and + shmem_broadcast4, this is the number of 32-bit halfwords. nelems must + be of type integer. If you are using Fortran, it must be a default + integer value. + +PE_root + Zero-based ordinal of the PE, with respect to the active set, from + which the data is copied. Must be greater than or equal to 0 and less + than PE_size. PE_root must be of type integer. If you are using + Fortran, it must be a default integer value. + +PE_start + The lowest virtual PE number of the active set of PEs. PE_start must + be of type integer. If you are using Fortran, it must be a default + integer value. + +logPE_stride + The log (base 2) of the stride between consecutive virtual PE numbers + in the active set. log_PE_stride must be of type integer. If you are + using Fortran, it must be a default integer value. + +PE_size + The number of PEs in the active set. PE_size must be of type integer. + If you are using Fortran, it must be a default integer value. + +pSync + A symmetric work array. In C/C++, pSync must be of type long and size + \_SHMEM_BCAST_SYNC_SIZE. In Fortran, pSync must be of type integer + and size SHMEM_BCAST_SYNC_SIZE. Every element of this array must be + initialized with the value \_SHMEM_SYNC_VALUE (in C/C++) or + SHMEM_SYNC_VALUE (in Fortran) before any of the PEs in the active set + enter shmem_barrier(). + +The values of arguments PE_root, PE_start, logPE_stride, and PE_size +must be equal on all PEs in the active set. The same target and source +data objects and the same pSync work array must be passed to all PEs in +the active set. + +Before any PE calls a broadcast routine, you must ensure that the +following conditions exist (synchronization via a barrier or some other +method is often needed to ensure this): The pSync array on all PEs in +the active set is not still in use from a prior call to a broadcast +routine. The target array on all PEs in the active set is ready to +accept the broadcast data. + +Upon return from a broadcast routine, the following are true for the +local PE: If the current PE is not the root PE, the target data object +is updated. The values in the pSync array are restored to the original +values. + + +NOTES +----- + +The terms collective and symmetric are defined in *intro_shmem*\ (3). + +All SHMEM broadcast routines restore pSync to its original contents. +Multiple calls to SHMEM routines that use the same pSync array do not +require that pSync be reinitialized after the first call. + +You must ensure the that the pSync array is not being updated by any PE +in the active set while any of the PEs participates in processing of a +SHMEM broadcast routine. Be careful to avoid these situations: If the +pSync array is initialized at run time, some type of synchronization is +needed to ensure that all PEs in the working set have initialized pSync +before any of them enter a SHMEM routine called with the pSync +synchronization array. A pSync array may be reused on a subsequent SHMEM +broadcast routine only if none of the PEs in the active set are still +processing a prior SHMEM broadcast routine call that used the same pSync +array. In general, this can be ensured only by doing some type of +synchronization. However, in the special case of SHMEM routines being +called with the same active set, you can allocate two pSync arrays and +alternate between them on successive calls. + + +EXAMPLES +-------- + +In the following examples, the call to :ref:`shmem_broadcast64` copies source +on PE 4 to target on PEs 5, 6, and 7. + +C/C++ example: + +.. code-block:: c++ + + for (i=0; i < _SHMEM_BCAST_SYNC_SIZE; i++) { + pSync[i] = _SHMEM_SYNC_VALUE; + } + shmem_barrier_all(); /* Wait for all PEs to initialize pSync */ + shmem_broadcast64(target, source, nelems, 0, 4, 0, 4, pSync); + +Fortran example: + +.. code-block:: fortran + + INTEGER PSYNC(SHMEM_BCAST_SYNC_SIZE) + INTEGER TARGET, SOURCE, NELEMS, PE_ROOT, PE_START, + & LOGPE_STRIDE, PE_SIZE, PSYNC + COMMON /COM/ TARGET, SOURCE + DATA PSYNC /SHMEM_BCAST_SYNC_SIZE*SHMEM_SYNC_VALUE/ + + CALL SHMEM_BROADCAST64(TARGET, SOURCE, NELEMS, 0, 4, 0, 4, + & PSYNC) + + +.. seealso:: + *intro_shmem*\ (3) diff --git a/docs/man-openshmem/man3/shmem_broadcast64.3.rst b/docs/man-openshmem/man3/shmem_broadcast64.3.rst new file mode 100644 index 00000000000..383cf7aa221 --- /dev/null +++ b/docs/man-openshmem/man3/shmem_broadcast64.3.rst @@ -0,0 +1,9 @@ +.. _shmem_broadcast64: + +shmem_broadcast64 +================= + .. include_body + +.. include:: ../man3/shmem_broadcast32.3.rst + :start-after: .. include_body + diff --git a/docs/man-openshmem/man3/shmem_char_g.3.rst b/docs/man-openshmem/man3/shmem_char_g.3.rst new file mode 100644 index 00000000000..e18e8d1a427 --- /dev/null +++ b/docs/man-openshmem/man3/shmem_char_g.3.rst @@ -0,0 +1,60 @@ +.. _shmem_char_g: + + +shmem_char_g +============ + +.. include_body + +:ref:`shmem_char_g`\ (3), :ref:`shmem_float_g`\ (3), :ref:`shmem_int_g`\ (3), +:ref:`shmem_long_g`\ (3), :ref:`shmem_short_g`\ (3), :ref:`shmem_longlong_g`\ (3), +:ref:`shmem_longdouble_g`\ (3) - These routines provide a low latency +mechanism to read basic types (char, short, int, float, double, long, +long long, long double) from symmetric data objects on remote PEs. + + +SYNOPSIS +-------- + +C or C++: + +.. code-block:: c++ + + #include + + + char shmem_char_g(const char *addr, int pe) + + short shmem_short_g(const short *addr, int pe) + + int shmem_int_g(const int *addr, int pe) + + long shmem_long_g(const long *addr, int pe) + + long shmem_longlong_g(const long long *addr, int pe) + + float shmem_float_g(const float *addr, int pe) + + double shmem_double_g(const double *addr, int pe) + + long shmem_longdouble_g(const long double *addr, int pe) + + +DESCRIPTION +----------- + +These routines provide a very low latency get capability for single +elements of most basic types. + +The arguments are as follows: + +addr + The remotely accessible array element or scalar data object which + will receive the data on the remote PE. + +pe + The number of the remote PE. + + +.. seealso:: + *intro_shmem*\ (3) *shmem_get*\ (3) diff --git a/docs/man-openshmem/man3/shmem_char_get.3.rst b/docs/man-openshmem/man3/shmem_char_get.3.rst new file mode 100644 index 00000000000..c9ebfbecf44 --- /dev/null +++ b/docs/man-openshmem/man3/shmem_char_get.3.rst @@ -0,0 +1,202 @@ +.. _shmem_char_get: + + +shmem_char_get +============== + +.. include_body + +shmem_character_get\ (3), shmem_complex_get\ (3), +:ref:`shmem_double_get`\ (3), :ref:`shmem_float_get`\ (3), shmem_get4\ +(3), shmem_get8\ (3), :ref:`shmem_get32`\ (3), :ref:`shmem_get64`\ +(3), :ref:`shmem_get128`\ (3), :ref:`shmem_getmem`\ (3), +:ref:`shmem_int_get`\ (3), shmem_integer_get\ (3), shmem_logical_get\ +(3), :ref:`shmem_long_get`\ (3), :ref:`shmem_longdouble_get`\ (3), +:ref:`shmem_longlong_get`\ (3), shmem_real_get\ (3), +:ref:`shmem_short_get`\ (3) - Transfers data from a specified +processing element (PE). + + +SYNOPSIS +-------- + +C or C++: + +.. code-block:: c++ + + #include + + void shmem_get32(void *target, const void *source, + size_t len, int pe) + + void shmem_get64(void *target, const void *source, + size_t len, int pe) + + void shmem_get128(void *target, const void *source, + size_t len, int pe) + + void shmem_getmem(void *target, const void *source, + size_t len, int pe) + + void shmem_int_get(int *target, const int *source, + size_t len, int pe) + + void shmem_double_get(double *target, const double *source, + size_t len, int pe) + + void shmem_float_get(float *target, const float *source, + size_t len, int pe) + + void shmem_long_get(long *target, const long *source, + size_t len, int pe) + + void shmem_longdouble_get(long double *target, + const long double *source, size_t len, int pe) + + void shmem_longlong_get(long long *target, + const long long *source, size_t len, int pe) + + void shmem_short_get(short *target, + const short *source, size_t len, int pe) + +Fortran: + +.. code-block:: fortran + + INCLUDE "mpp/shmem.fh" + + INTEGER len, pe + + CALL SHMEM_CHARACTER_GET(target, source, len, pe) + + CALL SHMEM_COMPLEX_GET(target, source, len, pe) + + CALL SHMEM_DOUBLE_GET(target, source, len, pe) + + CALL SHMEM_GET4(target, source, len, pe) + + CALL SHMEM_GET8(target, source, len, pe) + + CALL SHMEM_GET32(target, source, len, pe) + + CALL SHMEM_GET64(target, source, len, pe) + + CALL SHMEM_GET128(target, source, len, pe) + + CALL SHMEM_GETMEM(target, source, len, pe) + + CALL SHMEM_INTEGER_GET(target, source, len, pe) + + CALL SHMEM_LOGICAL_GET(target, source, len, pe) + + CALL SHMEM_REAL_GET(target, source, len, pe) + + +DESCRIPTION +----------- + +The shmem_get routines transfer **nelems** elements of the data object +at address **source** on the remote PE **pe**, to the data object at +address **target** on the local PE. These routines return after the data +has been copied to address **target** on the local PE. + +The arguments are as follows: + +target + Local data object to be updated. + +source + Data object on the PE identified by pe that contains the data to be + copied. This data object must be remotely accessible. + +len + Number of elements in the target and source arrays. len must be of + type integer. If you are using Fortran, it must be a constant, + variable, or array element of default integer type. + +pe + PE number of the remote PE. pe must be of type integer. If you are + using Fortran, it must be a constant, variable, or array element of + default integer type. + +The target and source data objects must conform to typing constraints, +which are as follows: + +:ref:`shmem_getmem`: Fortran: Any noncharacter type. C: Any data type. len is + scaled in bytes. + +shmem_get4, :ref:`shmem_get32`: Any noncharacter type that has a storage size + equal to 32 bits. + +{shmem_get8, :ref:`shmem_get64`}: Any noncharacter type that has a storage size equal to + 64 bits. + +:ref:`shmem_get128`: Any noncharacter type that has a storage size equal to 128 + bits. + +:ref:`shmem_short_get`: Elements of type short. + +:ref:`shmem_int_get`: Elements of type int. + +:ref:`shmem_long_get`: Elements of type long. + +:ref:`shmem_longlong_get`: Elements of type long long. + +:ref:`shmem_float_get`: Elements of type float. + +:ref:`shmem_double_get`: Elements of type double. + +:ref:`shmem_longdouble_get`: Elements of type long double. + +**SHMEM_CHARACTER_GET**: Elements of type character. len is the number of + characters to transfer. The actual character lengths of the source + and target variables are ignored. + +**SHMEM_COMPLEX_GET**: Elements of type complex of default size. + +**SHMEM_DOUBLE_GET**: (Fortran) Elements of type double precision. + +**SHMEM_INTEGER_GET**: Elements of type integer. + +**SHMEM_LOGICAL_GET**: Elements of type logical. + +**SHMEM_REAL_GET**: Elements of type real. + +If you are using Fortran, data types must be of default size. For +example, a real variable must be declared as REAL, REAL*4, or +REAL(KIND=4). + + +NOTES +----- + +See *intro_shmem*\ (3) for a definition of the term remotely accessible. + + +EXAMPLES +-------- + +Consider this simple example for Fortran. + +.. code-block:: fortran + + PROGRAM REDUCTION + REAL VALUES, SUM + COMMON /C/ VALUES + REAL WORK + + CALL START_PES(0) ! ALLOW ANY NUMBER OF PES + VALUES = MY_PE() ! INITIALIZE IT TO SOMETHING + CALL SHMEM_BARRIER_ALL + SUM = 0.0 + DO I = 0,NUM_PES()-1 + CALL SHMEM_REAL_GET(WORK, VALUES, 1, I) + SUM = SUM + WORK + ENDDO + PRINT *, 'PE ', MY_PE(), ' COMPUTED SUM=', SUM + CALL SHMEM_BARRIER_ALL + END + + +.. seealso:: + *intro_shmem*\ (3) *shmem_put*\ (3) *shmem_iget*\ (3) *shmem_quiet*\ (3) diff --git a/docs/man-openshmem/man3/shmem_char_get_nbi.3.rst b/docs/man-openshmem/man3/shmem_char_get_nbi.3.rst new file mode 100644 index 00000000000..88d70ada121 --- /dev/null +++ b/docs/man-openshmem/man3/shmem_char_get_nbi.3.rst @@ -0,0 +1,9 @@ +.. _shmem_char_get_nbi: + +shmem_char_get_nbi +================== + .. include_body + +.. include:: ../man3/shmem_getmem_nbi.3.rst + :start-after: .. include_body + diff --git a/docs/man-openshmem/man3/shmem_char_p.3.rst b/docs/man-openshmem/man3/shmem_char_p.3.rst new file mode 100644 index 00000000000..24204ac2e76 --- /dev/null +++ b/docs/man-openshmem/man3/shmem_char_p.3.rst @@ -0,0 +1,67 @@ +.. _shmem_char_p: + + +shmem_char_p +============ + +.. include_body + +:ref:`shmem_char_p`\ (3), :ref:`shmem_float_p`\ (3), :ref:`shmem_int_p`\ (3), +:ref:`shmem_long_p`\ (3), :ref:`shmem_short_p`\ (3), :ref:`shmem_longlong_p`\ (3), +:ref:`shmem_longdouble_p`\ (3) - These routines provide a low latency +mechanism to write basic types (char, short, int, float, double, long, +long long, long double) to symmetric data objects on remote PEs. + + +SYNOPSIS +-------- + +C or C++: + +.. code-block:: c++ + + #include + + + void shmem_char_p(char *addr, char value, int pe) + + void shmem_short_p(short *addr, short value, int pe) + + void shmem_int_p(int *addr, int value, int pe) + + void shmem_long_p(long *addr, long value, int pe) + + void shmem_longlong_p(long long *addr, long long value, int pe) + + void shmem_float_p(float *addr, float value, int pe) + + void shmem_double_p(double *addr, double value, int pe) + + void shmem_longdouble_p(long double *addr, long double value, int pe) + + +DESCRIPTION +----------- + +These routines provide a very low latency put capability for single +elements of most basic types. + +The arguments are as follows: + +addr + The remotely accessible array element or scalar data object which + will receive the data on the remote PE. + +value + The value to be transferred to addr on the remote PE. + +pe + The number of the remote PE. + +As with shmem_put\ (3), these functions start the remote transfer and +may return before the data is delivered to the remote PE. Use +:ref:`shmem_quiet`\ (3) to force completion of all remote PUT transfers. + + +.. seealso:: + *intro_shmem*\ (3) *shmem_put*\ (3) diff --git a/docs/man-openshmem/man3/shmem_char_put.3.rst b/docs/man-openshmem/man3/shmem_char_put.3.rst new file mode 100644 index 00000000000..b283cf0eb3b --- /dev/null +++ b/docs/man-openshmem/man3/shmem_char_put.3.rst @@ -0,0 +1,203 @@ +.. _shmem_char_put: + + +shmem_char_put +============== + +.. include_body + +shmem_character_put\ (3), shmem_complex_put\ (3), +:ref:`shmem_double_put`\ (3), :ref:`shmem_float_put`\ (3), :ref:`shmem_int_put`\ (3), +shmem_integer_put\ (3), shmem_logical_put\ (3), +:ref:`shmem_long_put`\ (3), :ref:`shmem_longdouble_put`\ (3), +:ref:`shmem_longlong_put`\ (3), shmem_put4\ (3), shmem_put8\ (3), +:ref:`shmem_put32`\ (3), :ref:`shmem_put64`\ (3), :ref:`shmem_put128`\ (3), +:ref:`shmem_putmem`\ (3), shmem_real_put\ (3), :ref:`shmem_short_put`\ (3) - +Transfers data to a specified processing element (PE) + + +SYNOPSIS +-------- + +C or C++: + +.. code-block:: c++ + + #include + + void shmem_double_put(double *target, const double *source, + size_t len, int pe) + + void shmem_float_put(float *target, const float *source, + size_t len, int pe) + + void shmem_int_put(int *target, const int *source, size_t len, + int pe) + + void shmem_long_put(long *target, const long *source, + size_t len, int pe) + + void shmem_longdouble_put(long double *target, + const long double *source, size_t len, int pe) + + void shmem_longlong_put(long long *target, + const long long *source, size_t len, int pe) + + void shmem_put32(void *target, const void *source, size_t len, + int pe) + + void shmem_put64(void *target, const void *source, size_t len, + int pe) + + void shmem_put128(void *target, const void *source, size_t len, + int pe) + + void shmem_putmem(void *target, const void *source, size_t len, + int pe) + + void shmem_short_put(short *target, const short *source, + size_t len, int pe) + +Fortran: + +.. code-block:: fortran + + INCLUDE "mpp/shmem.fh" + + INTEGER len, pe + + CALL SHMEM_CHARACTER_PUT(target, source, len, pe) + + CALL SHMEM_COMPLEX_PUT(target, source, len, pe) + + CALL SHMEM_DOUBLE_PUT(target, source, len, pe) + + CALL SHMEM_INTEGER_PUT(target, source, len, pe) + + CALL SHMEM_LOGICAL_PUT(target, source, len, pe) + + CALL SHMEM_PUT(target, source, len, pe) + + CALL SHMEM_PUT4(target, source, len, pe) + + CALL SHMEM_PUT8(target, source, len, pe) + + CALL SHMEM_PUT32(target, source, len, pe) + + CALL SHMEM_PUT64(target, source, len, pe) + + CALL SHMEM_PUT128(target, source, len, pe) + + CALL SHMEM_PUTMEM(target, source, len, pe) + + CALL SHMEM_REAL_PUT(target, source, len, pe) + + +DESCRIPTION +----------- + +These routines transfer **nelems** elements of the data object at +address **source** on the calling PE, to the data object at address +**target** on the remote PE **pe**. These routines start the remote +transfer and may return before the data is delivered to the remote PE. + +The delivery of data into the data object on the destination PE from +different put calls may occur in any order. Because of this, two +successive put operations may deliver data out of order unless a call to +:ref:`shmem_fence`\ (3) is introduced between the two calls. + +The arguments are as follows: + +target + Data object to be updated on the remote PE. This data object must be + remotely accessible. + +source + Data object containing the data to be copied. + +len + Number of elements in the target and source arrays. len must be of + type integer. If you are using Fortran, it must be a constant, + variable, or array element of default integer type. + +pe + PE number of the remote PE. pe must be of type integer. If you are + using Fortran, it must be a constant, variable, or array element of + default integer type. + +The target and source data objects must conform to certain typing +constraints, which are as follows: + +:ref:`shmem_putmem`: Fortran: Any noncharacter type. C: Any data type. len is scaled in + bytes. + +shmem_put4, :ref:`shmem_put32`:** Any noncharacter type that has a storage size + equal to 32 bits. + +shmem_put8, :ref:`shmem_put64`:** Any noncharacter type that has a storage size + equal to 64 bits. + +:ref:`shmem_put128`:** Any noncharacter type that has a storage size equal to 128 + bits. + +:ref:`shmem_short_put`:** Elements of type short. + +:ref:`shmem_int_put`:** Elements of type int. + +:ref:`shmem_long_put`:** Elements of type long. + +:ref:`shmem_longlong_put`:** Elements of type long long. + +:ref:`shmem_float_put`:** Elements of type float. + +:ref:`shmem_double_put`:** Elements of type double. + +:ref:`shmem_longdouble_put`:** Elements of type long double. + +**SHMEM_CHARACTER_PUT:** Elements of type character. len is the number of + characters to transfer. The actual character lengths of the source + and target variables are ignored. + +**SHMEM_COMPLEX_PUT:** Elements of type complex of default size. + +**SHMEM_DOUBLE_PUT:** (Fortran) Elements of type double precision. + +**SHMEM_INTEGER_PUT:** Elements of type integer. + +**SHMEM_LOGICAL_PUT:** Elements of type logical. + +**SHMEM_REAL_PUT:** Elements of type real. + If you are using Fortran, data types must be of default size. For + example, a real variable must be declared as REAL, REAL*4, or + REAL(KIND=4). + + +EXAMPLES +-------- + +The following shmem_put example is for C/C++ programs: + +.. code-block:: c++ + + #include + #include + + main() + { + long source[10] = { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 }; + static long target[10]; + shmem_init(); + + if (shmem_my_pe() == 0) { + /* put 10 words into target on PE 1 */ + shmem_long_put(target, source, 10, 1); + } + shmem_barrier_all(); /* sync sender and receiver */ + if (shmem_my_pe() == 1) + shmem_udcflush(); /* not required on Altix systems */ + printf("target[0] on PE %d is %d\n", shmem_my_pe(), target[0]); + } + + +.. seealso:: + *intro_shmem*\ (3) *shmem_iput*\ (3) *shmem_quiet*\ (3) diff --git a/docs/man-openshmem/man3/shmem_char_put_nbi.3.rst b/docs/man-openshmem/man3/shmem_char_put_nbi.3.rst new file mode 100644 index 00000000000..9bed46342fc --- /dev/null +++ b/docs/man-openshmem/man3/shmem_char_put_nbi.3.rst @@ -0,0 +1,9 @@ +.. _shmem_char_put_nbi: + +shmem_char_put_nbi +================== + .. include_body + +.. include:: ../man3/shmem_putmem_nbi.3.rst + :start-after: .. include_body + diff --git a/docs/man-openshmem/man3/shmem_clear_cache_inv.3.rst b/docs/man-openshmem/man3/shmem_clear_cache_inv.3.rst new file mode 100644 index 00000000000..ef37bb3f209 --- /dev/null +++ b/docs/man-openshmem/man3/shmem_clear_cache_inv.3.rst @@ -0,0 +1,9 @@ +.. _shmem_clear_cache_inv: + +shmem_clear_cache_inv +===================== + .. include_body + +.. include:: ../man3/shmem_udcflush.3.rst + :start-after: .. include_body + diff --git a/docs/man-openshmem/man3/shmem_clear_cache_line_inv.3.rst b/docs/man-openshmem/man3/shmem_clear_cache_line_inv.3.rst new file mode 100644 index 00000000000..a3d6658b0cc --- /dev/null +++ b/docs/man-openshmem/man3/shmem_clear_cache_line_inv.3.rst @@ -0,0 +1,9 @@ +.. _shmem_clear_cache_line_inv: + +shmem_clear_cache_line_inv +========================== + .. include_body + +.. include:: ../man3/shmem_udcflush.3.rst + :start-after: .. include_body + diff --git a/docs/man-openshmem/man3/shmem_clear_lock.3.rst b/docs/man-openshmem/man3/shmem_clear_lock.3.rst new file mode 100644 index 00000000000..3241ef0ce81 --- /dev/null +++ b/docs/man-openshmem/man3/shmem_clear_lock.3.rst @@ -0,0 +1,9 @@ +.. _shmem_clear_lock: + +shmem_clear_lock +================ + .. include_body + +.. include:: ../man3/shmem_set_lock.3.rst + :start-after: .. include_body + diff --git a/docs/man-openshmem/man3/shmem_collect32.3.rst b/docs/man-openshmem/man3/shmem_collect32.3.rst new file mode 100644 index 00000000000..d65dcdefa06 --- /dev/null +++ b/docs/man-openshmem/man3/shmem_collect32.3.rst @@ -0,0 +1,213 @@ +.. _shmem_collect32: + + +shmem_collect32 +=============== + +.. include_body + +shmem_collect4\ (3), shmem_collect8\ (3), :ref:`shmem_collect32`\ (3), +:ref:`shmem_collect64`\ (3), shmem_fcollect\ (3), shmem_fcollect4\ +(3), shmem_fcollect8\ (3), :ref:`shmem_fcollect32`\ (3), +:ref:`shmem_fcollect64`\ (3) - Concatenates blocks of data from +multiple processing elements (PEs) to an array in every PE + + + +SYNOPSIS +-------- + +C or C++: + +.. code-block:: c++ + + #include + + void shmem_collect32(void *target, const void *source, + size_t nelems, int PE_start, int logPE_stride, int PE_size, + long *pSync) + + void shmem_collect64(void *target, const void *source, + size_t nelems, int PE_start, int logPE_stride, int PE_size, + long *pSync) + + void shmem_fcollect32(void *target, const void *source, + size_t nelems, int PE_start, int logPE_stride, int PE_size, + long *pSync) + + void shmem_fcollect64(void *target, const void *source, + size_t nelems, int PE_start, int logPE_stride, int PE_size, + long *pSync) + +Fortran: + +.. code-block:: fortran + + INCLUDE "mpp/shmem.fh" + + INTEGER nelems + INTEGER PE_start, logPE_stride, PE_size + INTEGER pSync(SHMEM_COLLECT_SYNC_SIZE) + + CALL SHMEM_COLLECT4(target, source, nelems, PE_start, + & logPE_stride, PE_size, pSync) + + CALL SHMEM_COLLECT8(target, source, nelems, PE_start, + & logPE_stride, PE_size, pSync) + + CALL SHMEM_FCOLLECT4(target, source, nelems, PE_start, + & logPE_stride, PE_size, pSync) + + CALL SHMEM_FCOLLECT8(target, source, nelems, PE_start, + & logPE_stride, PE_size, pSync) + + +DESCRIPTION +----------- + +The shared memory (SHMEM) collect and fcollect routines concatenate +nelems 64-bit or 32-bit data items from the source array into the target +array, over the set of PEs defined by PE_start, log2PE_stride, and +PE_size, in processor number order. The resultant target array contains +the contribution from PE PE_start first, then the contribution from PE +PE_start + PE_stride second, and so on. The collected result is written +to the target array for all PEs in the active set. + +The fcollect routines require that nelems be the same value in all +participating PEs, while the collect routines allow nelems to vary from +PE to PE. + +The resulting target array is as follows: + +:: + + ---------------------------------------------------------- + source(1..nelems) + from PE (PE_start + 0 * (2**logPE_stride)) + ---------------------------------------------------------- + source(1..nelems) + from PE (PE_start + 1 * (2**logPE_stride)) + ---------------------------------------------------------- + ... + ---------------------------------------------------------- + source(1..nelems) from + PE (PE_start + (PE_size - 1) * (2**logPE_stride)) + ---------------------------------------------------------- + +As with all SHMEM collective routines, each of these routines assumes +that only PEs in the active set call the routine. If a PE not in the +active set calls a SHMEM collective routine, undefined behavior results. + +The arguments are as follows: + +target + A symmetric array. The target argument must be large enough to accept + the concatenation of the source arrays on all PEs. The data types are + as follows: + + [shmem_collect8, :ref:`shmem_collect64`, shmem_fcollect8, and + shmem_fcollect64] any data type with an element size of 64 bits. + Fortran derived types, Fortran character type, and C/C++ + structures are not permitted. + + [shmem_collect4, :ref:`shmem_collect32`, shmem_fcollect4, and + shmem_fcollect32] any data type with an element size of 32 bits. + Fortran derived types, Fortran character type, and C/C++ + structures are not permitted. + +source + A symmetric data object that can be of any type permissible for the + target argument. + +nelems + The number of elements in the source array. nelems must be of type + integer. If you are using Fortran, it must be a default integer + value. + +PE_start + The lowest virtual PE number of the active set of PEs. PE_start must + be of type integer. If you are using Fortran, it must be a default + integer value. + +logPE_stride + The log (base 2) of the stride between consecutive virtual PE numbers + in the active set. logPE_stride must be of type integer. If you are + using Fortran, it must be a default integer value. + +PE_size + The number of PEs in the active set. PE_size must be of type integer. + If you are using Fortran, it must be a default integer value. + +pSync + A symmetric work array. In C/C++, pSync must be of type int and size + \_SHMEM_COLLECT_SYNC_SIZE. In Fortran, pSync must be of type integer + and size SHMEM_COLLECT_SYNC_SIZE. If you are using Fortran, it must + be a default integer value. Every element of this array must be + initialized with the value \_SHMEM_SYNC_VALUE in C/C++ or + SHMEM_SYNC_VALUE in Fortran before any of the PEs in the active set + enter shmem_barrier(). + +The values of arguments PE_start, logPE_stride, and PE_size must be +equal on all PEs in the active set. The same target and source arrays +and the same pSync work array must be passed to all PEs in the active +set. + +Upon return from a collective routine, the following are true for the +local PE: The target array is updated. The values in the pSync array are +restored to the original values. + + +NOTES +----- + +The terms collective and symmetric are defined in *intro_shmem*\ (3). +All SHMEM collective routines reset the values in pSync before they +return, so a particular pSync buffer need only be initialized the first +time it is used. + +You must ensure that the pSync array is not being updated on any PE in +the active set while any of the PEs participate in processing of a SHMEM +collective routine. Be careful to avoid these situations: If the pSync +array is initialized at run time, some type of synchronization is needed +to ensure that all PEs in the working set have initialized pSync before +any of them enter a SHMEM routine called with the pSync synchronization +array. A pSync array can be reused on a subsequent SHMEM collective +routine only if none of the PEs in the active set are still processing a +prior SHMEM collective routine call that used the same pSync array. In +general, this may be ensured only by doing some type of synchronization. +However, in the special case of SHMEM routines being called with the +same active set, you can allocate two pSync arrays and alternate between +them on successive calls. + +The collective routines operate on active PE sets that have a +non-power-of-two PE_size with some performance degradation. They operate +with no performance degradation when nelems is a non-power-of-two value. + + +EXAMPLES +-------- + +C/C++: + +.. code-block:: c++ + + for (i=0; i < _SHMEM_COLLECT_SYNC_SIZE; i++) { + pSync[i] = _SHMEM_SYNC_VALUE; + } + shmem_barrier_all(); /* Wait for all PEs to initialize pSync */ + shmem_collect32(target, source, 64, pe_start, logPE_stride, + pe_size, pSync); + +Fortran: + +.. code-block:: fortran + + INTEGER PSYNC(SHMEM_COLLECT_SYNC_SIZE) + DATA PSYNC /SHMEM_COLLECT_SYNC_SIZE*SHMEM_SYNC_VALUE/ + + CALL SHMEM_COLLECT4(TARGET, SOURCE, 64, PE_START, + & LOGPE_STRIDE, PE_SIZE, PSYNC) + + +.. seealso:: + *intro_shmem*\ (3) diff --git a/docs/man-openshmem/man3/shmem_collect64.3.rst b/docs/man-openshmem/man3/shmem_collect64.3.rst new file mode 100644 index 00000000000..c031c5bc6f0 --- /dev/null +++ b/docs/man-openshmem/man3/shmem_collect64.3.rst @@ -0,0 +1,9 @@ +.. _shmem_collect64: + +shmem_collect64 +=============== + .. include_body + +.. include:: ../man3/shmem_collect32.3.rst + :start-after: .. include_body + diff --git a/docs/man-openshmem/man3/shmem_complexd_prod_to_all.3.rst b/docs/man-openshmem/man3/shmem_complexd_prod_to_all.3.rst new file mode 100644 index 00000000000..2130bccea92 --- /dev/null +++ b/docs/man-openshmem/man3/shmem_complexd_prod_to_all.3.rst @@ -0,0 +1,9 @@ +.. _shmem_complexd_prod_to_all: + +shmem_complexd_prod_to_all +========================== + .. include_body + +.. include:: ../man3/shmem_short_prod_to_all.3.rst + :start-after: .. include_body + diff --git a/docs/man-openshmem/man3/shmem_complexd_sum_to_all.3.rst b/docs/man-openshmem/man3/shmem_complexd_sum_to_all.3.rst new file mode 100644 index 00000000000..f1ca05396d5 --- /dev/null +++ b/docs/man-openshmem/man3/shmem_complexd_sum_to_all.3.rst @@ -0,0 +1,9 @@ +.. _shmem_complexd_sum_to_all: + +shmem_complexd_sum_to_all +========================= + .. include_body + +.. include:: ../man3/shmem_short_sum_to_all.3.rst + :start-after: .. include_body + diff --git a/docs/man-openshmem/man3/shmem_complexf_prod_to_all.3.rst b/docs/man-openshmem/man3/shmem_complexf_prod_to_all.3.rst new file mode 100644 index 00000000000..be2153700cb --- /dev/null +++ b/docs/man-openshmem/man3/shmem_complexf_prod_to_all.3.rst @@ -0,0 +1,9 @@ +.. _shmem_complexf_prod_to_all: + +shmem_complexf_prod_to_all +========================== + .. include_body + +.. include:: ../man3/shmem_short_prod_to_all.3.rst + :start-after: .. include_body + diff --git a/docs/man-openshmem/man3/shmem_complexf_sum_to_all.3.rst b/docs/man-openshmem/man3/shmem_complexf_sum_to_all.3.rst new file mode 100644 index 00000000000..433e27f0f70 --- /dev/null +++ b/docs/man-openshmem/man3/shmem_complexf_sum_to_all.3.rst @@ -0,0 +1,9 @@ +.. _shmem_complexf_sum_to_all: + +shmem_complexf_sum_to_all +========================= + .. include_body + +.. include:: ../man3/shmem_short_sum_to_all.3.rst + :start-after: .. include_body + diff --git a/docs/man-openshmem/man3/shmem_double_fetch.3.rst b/docs/man-openshmem/man3/shmem_double_fetch.3.rst new file mode 100644 index 00000000000..1d68fe063d7 --- /dev/null +++ b/docs/man-openshmem/man3/shmem_double_fetch.3.rst @@ -0,0 +1,9 @@ +.. _shmem_double_fetch: + +shmem_double_fetch +================== + .. include_body + +.. include:: ../man3/shmem_int_fetch.3.rst + :start-after: .. include_body + diff --git a/docs/man-openshmem/man3/shmem_double_g.3.rst b/docs/man-openshmem/man3/shmem_double_g.3.rst new file mode 100644 index 00000000000..8c7913f9725 --- /dev/null +++ b/docs/man-openshmem/man3/shmem_double_g.3.rst @@ -0,0 +1,9 @@ +.. _shmem_double_g: + +shmem_double_g +============== + .. include_body + +.. include:: ../man3/shmem_char_g.3.rst + :start-after: .. include_body + diff --git a/docs/man-openshmem/man3/shmem_double_get.3.rst b/docs/man-openshmem/man3/shmem_double_get.3.rst new file mode 100644 index 00000000000..a4e9624139d --- /dev/null +++ b/docs/man-openshmem/man3/shmem_double_get.3.rst @@ -0,0 +1,9 @@ +.. _shmem_double_get: + +shmem_double_get +================ + .. include_body + +.. include:: ../man3/shmem_char_get.3.rst + :start-after: .. include_body + diff --git a/docs/man-openshmem/man3/shmem_double_get_nbi.3.rst b/docs/man-openshmem/man3/shmem_double_get_nbi.3.rst new file mode 100644 index 00000000000..29ac6778994 --- /dev/null +++ b/docs/man-openshmem/man3/shmem_double_get_nbi.3.rst @@ -0,0 +1,9 @@ +.. _shmem_double_get_nbi: + +shmem_double_get_nbi +==================== + .. include_body + +.. include:: ../man3/shmem_getmem_nbi.3.rst + :start-after: .. include_body + diff --git a/docs/man-openshmem/man3/shmem_double_iget.3.rst b/docs/man-openshmem/man3/shmem_double_iget.3.rst new file mode 100644 index 00000000000..3b0991b6176 --- /dev/null +++ b/docs/man-openshmem/man3/shmem_double_iget.3.rst @@ -0,0 +1,9 @@ +.. _shmem_double_iget: + +shmem_double_iget +================= + .. include_body + +.. include:: ../man3/shmem_short_iget.3.rst + :start-after: .. include_body + diff --git a/docs/man-openshmem/man3/shmem_double_iput.3.rst b/docs/man-openshmem/man3/shmem_double_iput.3.rst new file mode 100644 index 00000000000..07eb843a5ab --- /dev/null +++ b/docs/man-openshmem/man3/shmem_double_iput.3.rst @@ -0,0 +1,9 @@ +.. _shmem_double_iput: + +shmem_double_iput +================= + .. include_body + +.. include:: ../man3/shmem_short_iput.3.rst + :start-after: .. include_body + diff --git a/docs/man-openshmem/man3/shmem_double_max_to_all.3.rst b/docs/man-openshmem/man3/shmem_double_max_to_all.3.rst new file mode 100644 index 00000000000..85873789dc6 --- /dev/null +++ b/docs/man-openshmem/man3/shmem_double_max_to_all.3.rst @@ -0,0 +1,9 @@ +.. _shmem_double_max_to_all: + +shmem_double_max_to_all +======================= + .. include_body + +.. include:: ../man3/shmem_short_max_to_all.3.rst + :start-after: .. include_body + diff --git a/docs/man-openshmem/man3/shmem_double_min_to_all.3.rst b/docs/man-openshmem/man3/shmem_double_min_to_all.3.rst new file mode 100644 index 00000000000..c82623500eb --- /dev/null +++ b/docs/man-openshmem/man3/shmem_double_min_to_all.3.rst @@ -0,0 +1,9 @@ +.. _shmem_double_min_to_all: + +shmem_double_min_to_all +======================= + .. include_body + +.. include:: ../man3/shmem_short_min_to_all.3.rst + :start-after: .. include_body + diff --git a/docs/man-openshmem/man3/shmem_double_p.3.rst b/docs/man-openshmem/man3/shmem_double_p.3.rst new file mode 100644 index 00000000000..9b2e3a700ac --- /dev/null +++ b/docs/man-openshmem/man3/shmem_double_p.3.rst @@ -0,0 +1,9 @@ +.. _shmem_double_p: + +shmem_double_p +============== + .. include_body + +.. include:: ../man3/shmem_char_p.3.rst + :start-after: .. include_body + diff --git a/docs/man-openshmem/man3/shmem_double_prod_to_all.3.rst b/docs/man-openshmem/man3/shmem_double_prod_to_all.3.rst new file mode 100644 index 00000000000..ccdc47f28e2 --- /dev/null +++ b/docs/man-openshmem/man3/shmem_double_prod_to_all.3.rst @@ -0,0 +1,9 @@ +.. _shmem_double_prod_to_all: + +shmem_double_prod_to_all +======================== + .. include_body + +.. include:: ../man3/shmem_short_prod_to_all.3.rst + :start-after: .. include_body + diff --git a/docs/man-openshmem/man3/shmem_double_put.3.rst b/docs/man-openshmem/man3/shmem_double_put.3.rst new file mode 100644 index 00000000000..5dd13c8b696 --- /dev/null +++ b/docs/man-openshmem/man3/shmem_double_put.3.rst @@ -0,0 +1,9 @@ +.. _shmem_double_put: + +shmem_double_put +================ + .. include_body + +.. include:: ../man3/shmem_char_put.3.rst + :start-after: .. include_body + diff --git a/docs/man-openshmem/man3/shmem_double_put_nbi.3.rst b/docs/man-openshmem/man3/shmem_double_put_nbi.3.rst new file mode 100644 index 00000000000..8d6b6053b51 --- /dev/null +++ b/docs/man-openshmem/man3/shmem_double_put_nbi.3.rst @@ -0,0 +1,9 @@ +.. _shmem_double_put_nbi: + +shmem_double_put_nbi +==================== + .. include_body + +.. include:: ../man3/shmem_putmem_nbi.3.rst + :start-after: .. include_body + diff --git a/docs/man-openshmem/man3/shmem_double_set.3.rst b/docs/man-openshmem/man3/shmem_double_set.3.rst new file mode 100644 index 00000000000..511e7b5dc16 --- /dev/null +++ b/docs/man-openshmem/man3/shmem_double_set.3.rst @@ -0,0 +1,9 @@ +.. _shmem_double_set: + +shmem_double_set +================ + .. include_body + +.. include:: ../man3/shmem_int_set.3.rst + :start-after: .. include_body + diff --git a/docs/man-openshmem/man3/shmem_double_sum_to_all.3.rst b/docs/man-openshmem/man3/shmem_double_sum_to_all.3.rst new file mode 100644 index 00000000000..12e090a0a1d --- /dev/null +++ b/docs/man-openshmem/man3/shmem_double_sum_to_all.3.rst @@ -0,0 +1,9 @@ +.. _shmem_double_sum_to_all: + +shmem_double_sum_to_all +======================= + .. include_body + +.. include:: ../man3/shmem_short_sum_to_all.3.rst + :start-after: .. include_body + diff --git a/docs/man-openshmem/man3/shmem_double_swap.3.rst b/docs/man-openshmem/man3/shmem_double_swap.3.rst new file mode 100644 index 00000000000..8992b958189 --- /dev/null +++ b/docs/man-openshmem/man3/shmem_double_swap.3.rst @@ -0,0 +1,9 @@ +.. _shmem_double_swap: + +shmem_double_swap +================= + .. include_body + +.. include:: ../man3/shmem_swap.3.rst + :start-after: .. include_body + diff --git a/docs/man-openshmem/man3/shmem_fcollect32.3.rst b/docs/man-openshmem/man3/shmem_fcollect32.3.rst new file mode 100644 index 00000000000..9790b2fc2a7 --- /dev/null +++ b/docs/man-openshmem/man3/shmem_fcollect32.3.rst @@ -0,0 +1,9 @@ +.. _shmem_fcollect32: + +shmem_fcollect32 +================ + .. include_body + +.. include:: ../man3/shmem_collect32.3.rst + :start-after: .. include_body + diff --git a/docs/man-openshmem/man3/shmem_fcollect64.3.rst b/docs/man-openshmem/man3/shmem_fcollect64.3.rst new file mode 100644 index 00000000000..5a5fa9a6e79 --- /dev/null +++ b/docs/man-openshmem/man3/shmem_fcollect64.3.rst @@ -0,0 +1,9 @@ +.. _shmem_fcollect64: + +shmem_fcollect64 +================ + .. include_body + +.. include:: ../man3/shmem_collect32.3.rst + :start-after: .. include_body + diff --git a/docs/man-openshmem/man3/shmem_fence.3.rst b/docs/man-openshmem/man3/shmem_fence.3.rst new file mode 100644 index 00000000000..1a61dde2e43 --- /dev/null +++ b/docs/man-openshmem/man3/shmem_fence.3.rst @@ -0,0 +1,57 @@ +.. _shmem_fence: + + +shmem_fence +=========== + +.. include_body + +:ref:`shmem_fence` - Provides a separate ordering on the sequence of puts +issued by this PE to each destination PE. + + +SYNOPSIS +-------- + +C or C++: + +.. code-block:: c++ + + #include + + void shmem_fence(void) + +Fortran: + +.. code-block:: fortran + + INCLUDE "mpp/shmem.fh" + + CALL SHMEM_FENCE + + +DESCRIPTION +----------- + +The shmem_fence() routine provides an ordering on the put operations +issued by the calling PE prior to the call to shmem_fence() relative +to the put operations issued by the calling PE following the call to +shmem_fence(). It guarantees that all such prior put operations +issued to a particular destination PE are fully written to the symmetric +memory of that destination PE, before any such following put operations +to that same destination PE are written to the symmetric memory of that +destination PE. Note that the ordering is provided separately on the +sequences of puts from the calling PE to each distinct destination PE. +The shmem_quiet() routine should be used instead if ordering of puts +is required when multiple destination PEs are involved. + + +NOTES +----- + +The :ref:`shmem_quiet` function should be called if ordering of puts is desired +when multiple remote PEs are involved. + + +.. seealso:: + *intro_shmem*\ (3) diff --git a/docs/man-openshmem/man3/shmem_finalize.3.rst b/docs/man-openshmem/man3/shmem_finalize.3.rst new file mode 100644 index 00000000000..9b61f934e40 --- /dev/null +++ b/docs/man-openshmem/man3/shmem_finalize.3.rst @@ -0,0 +1,51 @@ +.. _shmem_finalize: + + +shmem_finalize +============== + +.. include_body + +:ref:`shmem_finalize` - A collective operation that releases resources used by +the OpenSHMEM library. This only terminates the Open-SHMEM portion of a +program, not the entire program. + + +SYNOPSIS +-------- + +C or C++: + +.. code-block:: c++ + + #include + + void shmem_finalize(void) + +Fortran: + +.. code-block:: fortran + + include 'mpp/shmem.fh' + CALL SHMEM_FINALIZE + + +DESCRIPTION +----------- + +:ref:`shmem_finalize` is a collective operation that ends the OpenSHMEM portion +of a program previously initialized by :ref:`shmem_init` and releases resources +used by the OpenSHMEM library. This collective operation requires all +PEs to participate in the call. There is an implicit global barrier in +:ref:`shmem_finalize` so that pending communication is completed, and no +resources can be released until all PEs have entered :ref:`shmem_finalize`. +:ref:`shmem_finalize` must be the last OpenSHMEM library call encountered in +the OpenSHMEM portion of a program. A call to :ref:`shmem_finalize` will +release any resources initialized by a corresponding call to :ref:`shmem_init`. +All processes and threads that represent the PEs will still exist after +the call to :ref:`shmem_finalize` returns, but they will no longer have access +to any resources that have been released. + + +.. seealso:: + *intro_shmem*\ (3) *shmem_my_pe*\ (3) *shmem_init*\ (3) diff --git a/docs/man-openshmem/man3/shmem_float_fetch.3.rst b/docs/man-openshmem/man3/shmem_float_fetch.3.rst new file mode 100644 index 00000000000..0ebaddc8d68 --- /dev/null +++ b/docs/man-openshmem/man3/shmem_float_fetch.3.rst @@ -0,0 +1,9 @@ +.. _shmem_float_fetch: + +shmem_float_fetch +================= + .. include_body + +.. include:: ../man3/shmem_int_fetch.3.rst + :start-after: .. include_body + diff --git a/docs/man-openshmem/man3/shmem_float_g.3.rst b/docs/man-openshmem/man3/shmem_float_g.3.rst new file mode 100644 index 00000000000..3a4b49c8612 --- /dev/null +++ b/docs/man-openshmem/man3/shmem_float_g.3.rst @@ -0,0 +1,9 @@ +.. _shmem_float_g: + +shmem_float_g +============= + .. include_body + +.. include:: ../man3/shmem_char_g.3.rst + :start-after: .. include_body + diff --git a/docs/man-openshmem/man3/shmem_float_get.3.rst b/docs/man-openshmem/man3/shmem_float_get.3.rst new file mode 100644 index 00000000000..fdcd4917911 --- /dev/null +++ b/docs/man-openshmem/man3/shmem_float_get.3.rst @@ -0,0 +1,9 @@ +.. _shmem_float_get: + +shmem_float_get +=============== + .. include_body + +.. include:: ../man3/shmem_char_get.3.rst + :start-after: .. include_body + diff --git a/docs/man-openshmem/man3/shmem_float_get_nbi.3.rst b/docs/man-openshmem/man3/shmem_float_get_nbi.3.rst new file mode 100644 index 00000000000..59730686ec8 --- /dev/null +++ b/docs/man-openshmem/man3/shmem_float_get_nbi.3.rst @@ -0,0 +1,9 @@ +.. _shmem_float_get_nbi: + +shmem_float_get_nbi +=================== + .. include_body + +.. include:: ../man3/shmem_getmem_nbi.3.rst + :start-after: .. include_body + diff --git a/docs/man-openshmem/man3/shmem_float_iget.3.rst b/docs/man-openshmem/man3/shmem_float_iget.3.rst new file mode 100644 index 00000000000..3bbab94ea38 --- /dev/null +++ b/docs/man-openshmem/man3/shmem_float_iget.3.rst @@ -0,0 +1,9 @@ +.. _shmem_float_iget: + +shmem_float_iget +================ + .. include_body + +.. include:: ../man3/shmem_short_iget.3.rst + :start-after: .. include_body + diff --git a/docs/man-openshmem/man3/shmem_float_iput.3.rst b/docs/man-openshmem/man3/shmem_float_iput.3.rst new file mode 100644 index 00000000000..56c7b5d7fa5 --- /dev/null +++ b/docs/man-openshmem/man3/shmem_float_iput.3.rst @@ -0,0 +1,9 @@ +.. _shmem_float_iput: + +shmem_float_iput +================ + .. include_body + +.. include:: ../man3/shmem_short_iput.3.rst + :start-after: .. include_body + diff --git a/docs/man-openshmem/man3/shmem_float_max_to_all.3.rst b/docs/man-openshmem/man3/shmem_float_max_to_all.3.rst new file mode 100644 index 00000000000..165a44b39f2 --- /dev/null +++ b/docs/man-openshmem/man3/shmem_float_max_to_all.3.rst @@ -0,0 +1,9 @@ +.. _shmem_float_max_to_all: + +shmem_float_max_to_all +====================== + .. include_body + +.. include:: ../man3/shmem_short_max_to_all.3.rst + :start-after: .. include_body + diff --git a/docs/man-openshmem/man3/shmem_float_min_to_all.3.rst b/docs/man-openshmem/man3/shmem_float_min_to_all.3.rst new file mode 100644 index 00000000000..deb66370818 --- /dev/null +++ b/docs/man-openshmem/man3/shmem_float_min_to_all.3.rst @@ -0,0 +1,9 @@ +.. _shmem_float_min_to_all: + +shmem_float_min_to_all +====================== + .. include_body + +.. include:: ../man3/shmem_short_min_to_all.3.rst + :start-after: .. include_body + diff --git a/docs/man-openshmem/man3/shmem_float_p.3.rst b/docs/man-openshmem/man3/shmem_float_p.3.rst new file mode 100644 index 00000000000..50e7bea93f0 --- /dev/null +++ b/docs/man-openshmem/man3/shmem_float_p.3.rst @@ -0,0 +1,9 @@ +.. _shmem_float_p: + +shmem_float_p +============= + .. include_body + +.. include:: ../man3/shmem_char_p.3.rst + :start-after: .. include_body + diff --git a/docs/man-openshmem/man3/shmem_float_prod_to_all.3.rst b/docs/man-openshmem/man3/shmem_float_prod_to_all.3.rst new file mode 100644 index 00000000000..57f810497ff --- /dev/null +++ b/docs/man-openshmem/man3/shmem_float_prod_to_all.3.rst @@ -0,0 +1,9 @@ +.. _shmem_float_prod_to_all: + +shmem_float_prod_to_all +======================= + .. include_body + +.. include:: ../man3/shmem_short_prod_to_all.3.rst + :start-after: .. include_body + diff --git a/docs/man-openshmem/man3/shmem_float_put.3.rst b/docs/man-openshmem/man3/shmem_float_put.3.rst new file mode 100644 index 00000000000..27f99d2318e --- /dev/null +++ b/docs/man-openshmem/man3/shmem_float_put.3.rst @@ -0,0 +1,9 @@ +.. _shmem_float_put: + +shmem_float_put +=============== + .. include_body + +.. include:: ../man3/shmem_char_put.3.rst + :start-after: .. include_body + diff --git a/docs/man-openshmem/man3/shmem_float_put_nbi.3.rst b/docs/man-openshmem/man3/shmem_float_put_nbi.3.rst new file mode 100644 index 00000000000..c239598afb9 --- /dev/null +++ b/docs/man-openshmem/man3/shmem_float_put_nbi.3.rst @@ -0,0 +1,9 @@ +.. _shmem_float_put_nbi: + +shmem_float_put_nbi +=================== + .. include_body + +.. include:: ../man3/shmem_putmem_nbi.3.rst + :start-after: .. include_body + diff --git a/docs/man-openshmem/man3/shmem_float_set.3.rst b/docs/man-openshmem/man3/shmem_float_set.3.rst new file mode 100644 index 00000000000..6893ab04419 --- /dev/null +++ b/docs/man-openshmem/man3/shmem_float_set.3.rst @@ -0,0 +1,9 @@ +.. _shmem_float_set: + +shmem_float_set +=============== + .. include_body + +.. include:: ../man3/shmem_int_set.3.rst + :start-after: .. include_body + diff --git a/docs/man-openshmem/man3/shmem_float_sum_to_all.3.rst b/docs/man-openshmem/man3/shmem_float_sum_to_all.3.rst new file mode 100644 index 00000000000..7b48a7e0b48 --- /dev/null +++ b/docs/man-openshmem/man3/shmem_float_sum_to_all.3.rst @@ -0,0 +1,9 @@ +.. _shmem_float_sum_to_all: + +shmem_float_sum_to_all +====================== + .. include_body + +.. include:: ../man3/shmem_short_sum_to_all.3.rst + :start-after: .. include_body + diff --git a/docs/man-openshmem/man3/shmem_float_swap.3.rst b/docs/man-openshmem/man3/shmem_float_swap.3.rst new file mode 100644 index 00000000000..ae1df063f7b --- /dev/null +++ b/docs/man-openshmem/man3/shmem_float_swap.3.rst @@ -0,0 +1,9 @@ +.. _shmem_float_swap: + +shmem_float_swap +================ + .. include_body + +.. include:: ../man3/shmem_swap.3.rst + :start-after: .. include_body + diff --git a/docs/man-openshmem/man3/shmem_free.3.rst b/docs/man-openshmem/man3/shmem_free.3.rst new file mode 100644 index 00000000000..9ca79f9b3b4 --- /dev/null +++ b/docs/man-openshmem/man3/shmem_free.3.rst @@ -0,0 +1,9 @@ +.. _shmem_free: + +shmem_free +========== + .. include_body + +.. include:: ../man3/shmem_malloc.3.rst + :start-after: .. include_body + diff --git a/docs/man-openshmem/man3/shmem_get128.3.rst b/docs/man-openshmem/man3/shmem_get128.3.rst new file mode 100644 index 00000000000..4480c898398 --- /dev/null +++ b/docs/man-openshmem/man3/shmem_get128.3.rst @@ -0,0 +1,9 @@ +.. _shmem_get128: + +shmem_get128 +============ + .. include_body + +.. include:: ../man3/shmem_char_get.3.rst + :start-after: .. include_body + diff --git a/docs/man-openshmem/man3/shmem_get128_nbi.3.rst b/docs/man-openshmem/man3/shmem_get128_nbi.3.rst new file mode 100644 index 00000000000..1500efbdcc7 --- /dev/null +++ b/docs/man-openshmem/man3/shmem_get128_nbi.3.rst @@ -0,0 +1,9 @@ +.. _shmem_get128_nbi: + +shmem_get128_nbi +================ + .. include_body + +.. include:: ../man3/shmem_getmem_nbi.3.rst + :start-after: .. include_body + diff --git a/docs/man-openshmem/man3/shmem_get16_nbi.3.rst b/docs/man-openshmem/man3/shmem_get16_nbi.3.rst new file mode 100644 index 00000000000..b9e9fa8351b --- /dev/null +++ b/docs/man-openshmem/man3/shmem_get16_nbi.3.rst @@ -0,0 +1,9 @@ +.. _shmem_get16_nbi: + +shmem_get16_nbi +=============== + .. include_body + +.. include:: ../man3/shmem_getmem_nbi.3.rst + :start-after: .. include_body + diff --git a/docs/man-openshmem/man3/shmem_get32.3.rst b/docs/man-openshmem/man3/shmem_get32.3.rst new file mode 100644 index 00000000000..ae5984c1817 --- /dev/null +++ b/docs/man-openshmem/man3/shmem_get32.3.rst @@ -0,0 +1,9 @@ +.. _shmem_get32: + +shmem_get32 +=========== + .. include_body + +.. include:: ../man3/shmem_char_get.3.rst + :start-after: .. include_body + diff --git a/docs/man-openshmem/man3/shmem_get32_nbi.3.rst b/docs/man-openshmem/man3/shmem_get32_nbi.3.rst new file mode 100644 index 00000000000..33e7e145bda --- /dev/null +++ b/docs/man-openshmem/man3/shmem_get32_nbi.3.rst @@ -0,0 +1,9 @@ +.. _shmem_get32_nbi: + +shmem_get32_nbi +=============== + .. include_body + +.. include:: ../man3/shmem_getmem_nbi.3.rst + :start-after: .. include_body + diff --git a/docs/man-openshmem/man3/shmem_get64.3.rst b/docs/man-openshmem/man3/shmem_get64.3.rst new file mode 100644 index 00000000000..d41361f6b07 --- /dev/null +++ b/docs/man-openshmem/man3/shmem_get64.3.rst @@ -0,0 +1,9 @@ +.. _shmem_get64: + +shmem_get64 +=========== + .. include_body + +.. include:: ../man3/shmem_char_get.3.rst + :start-after: .. include_body + diff --git a/docs/man-openshmem/man3/shmem_get64_nbi.3.rst b/docs/man-openshmem/man3/shmem_get64_nbi.3.rst new file mode 100644 index 00000000000..57d1b62d0e9 --- /dev/null +++ b/docs/man-openshmem/man3/shmem_get64_nbi.3.rst @@ -0,0 +1,9 @@ +.. _shmem_get64_nbi: + +shmem_get64_nbi +=============== + .. include_body + +.. include:: ../man3/shmem_getmem_nbi.3.rst + :start-after: .. include_body + diff --git a/docs/man-openshmem/man3/shmem_get8_nbi.3.rst b/docs/man-openshmem/man3/shmem_get8_nbi.3.rst new file mode 100644 index 00000000000..3c30f40f90e --- /dev/null +++ b/docs/man-openshmem/man3/shmem_get8_nbi.3.rst @@ -0,0 +1,9 @@ +.. _shmem_get8_nbi: + +shmem_get8_nbi +============== + .. include_body + +.. include:: ../man3/shmem_getmem_nbi.3.rst + :start-after: .. include_body + diff --git a/docs/man-openshmem/man3/shmem_getmem.3.rst b/docs/man-openshmem/man3/shmem_getmem.3.rst new file mode 100644 index 00000000000..9ac56116ac6 --- /dev/null +++ b/docs/man-openshmem/man3/shmem_getmem.3.rst @@ -0,0 +1,9 @@ +.. _shmem_getmem: + +shmem_getmem +============ + .. include_body + +.. include:: ../man3/shmem_char_get.3.rst + :start-after: .. include_body + diff --git a/docs/man-openshmem/man3/shmem_getmem_nbi.3.rst b/docs/man-openshmem/man3/shmem_getmem_nbi.3.rst new file mode 100644 index 00000000000..2f6fad8214e --- /dev/null +++ b/docs/man-openshmem/man3/shmem_getmem_nbi.3.rst @@ -0,0 +1,171 @@ +.. _shmem_getmem_nbi: + + +shmem_getmem_nbi +================ + +.. include_body + +:ref:`shmem_getmem_nbi`\ (3), :ref:`shmem_char_get_nbi`\ (3), +:ref:`shmem_short_get_nbi`\ (3), :ref:`shmem_int_get_nbi`\ (3), +:ref:`shmem_long_get_nbi`\ (3), :ref:`shmem_longlong_get_nbi`\ (3), +:ref:`shmem_float_get_nbi`\ (3), :ref:`shmem_double_get_nbi`\ (3), +:ref:`shmem_longdouble_get_nbi`\ (3), :ref:`shmem_get8_nbi`\ (3), +:ref:`shmem_get16_nbi`\ (3), :ref:`shmem_get32_nbi`\ (3), :ref:`shmem_get64_nbi`\ (3), +:ref:`shmem_get128_nbi`\ (3), - The nonblocking get routines provide a method +for copying data from a contiguous remote data object on the specified +PE to the local data object. + + +SYNOPSIS +-------- + +C or C++: + +.. code-block:: c++ + + #include + + void shmem_getmem_nbi(void *dest, const void *source, + size_t nelems, int pe) + + void shmem_char_get(char *dest, const char *source, + size_t nelems, int pe) + + void shmem_short_get(short *dest, const short *source, + size_t nelems, int pe) + + void shmem_int_get(int *dest, const int *source, + size_t nelems, int pe) + + void shmem_long_get(long *dest, const long *source, + size_t nelems, int pe) + + void shmem_longlong_get(long long *dest, const long long *source, + size_t nelems, int pe) + + void shmem_float_get(float *dest, const float *source, + size_t nelems, int pe) + + void shmem_double_get(double *dest, const double *source, + size_t nelems, int pe) + + void shmem_longdouble_get(long double *dest, const long double *source, + size_t nelems, int pe) + + void shmem_get8(void *dest, const void *source, + size_t nelems, int pe) + + void shmem_get16(void *dest, const void *source, + size_t nelems, int pe) + + void shmem_get32(void *dest, const void *source, + size_t nelems, int pe) + + void shmem_get64(void *dest, const void *source, + size_t nelems, int pe) + + void shmem_get128(void *dest, const void *source, + size_t nelems, int pe) + +Fortran: + +.. code-block:: fortran + + INCLUDE "mpp/shmem.fh" + + INTEGER nelems, pe + + CALL SHMEM_GETMEM_NBI(dest, source, nelems, pe) + + CALL SHMEM_CHARACTER_GET_NBI(dest, source, nelems, pe) + + CALL SHMEM_COMPLEX_GET_NBI(dest, source, nelems, pe) + + CALL SHMEM_DOUBLE_GET_NBI(dest, source, nelems, pe) + + CALL SHMEM_INTEGER_GET_NBI(dest, source, nelems, pe) + + CALL SHMEM_LOGICAL_GET_NBI(dest, source, nelems, pe) + + CALL SHMEM_REAL_GET_NBI(dest, source, nelems, pe) + + CALL SHMEM_GET4_NBI(dest, source, nelems, pe) + + CALL SHMEM_GET8_NBI(dest, source, nelems, pe) + + CALL SHMEM_GET32_NBI(dest, source, nelems, pe) + + CALL SHMEM_GET64_NBI(dest, source, nelems, pe) + + CALL SHMEM_GET128_NBI(dest, source, nelems, pe) + + +DESCRIPTION +----------- + +The get routines provide a method for copying a contiguous symmetric +data object from a different PE to a contiguous data object on the local +PE. The routines return after posting the operation. The operation is +considered complete after a subsequent call to :ref:`shmem_quiet`. At the +completion of :ref:`shmem_quiet`, the data has been delivered to the dest array +on the local PE. + +The arguments are as follows: + +dest + Local data object to be updated. + +source + Data object on the PE identified by pe that contains the data to be + copied. This data object must be remotely accessible. + +nelems + Number of elements in the target and source arrays. len must be of + type integer. If you are using Fortran, it must be a constant, + variable, or array element of default integer type. + +pe + PE number of the remote PE. pe must be of type integer. If you are + using Fortran, it must be a constant, variable, or array element of + default integer type. + +If you are using Fortran, data types must be of default size. For +example, a real variable must be declared as REAL, REAL*4, or +REAL(KIND=4). + + +NOTES +----- + +See *intro_shmem*\ (3) for a definition of the term remotely accessible. + + +EXAMPLES +-------- + +Consider this simple example for Fortran. + +.. code-block:: fortran + + PROGRAM REDUCTION + REAL VALUES, SUM + COMMON /C/ VALUES + REAL WORK + + CALL START_PES(0) ! ALLOW ANY NUMBER OF PES + VALUES = MY_PE() ! INITIALIZE IT TO SOMETHING + CALL SHMEM_BARRIER_ALL + SUM = 0.0 + DO I = 0,NUM_PES()-1 + CALL SHMEM_REAL_GET_NBI(WORK, VALUES, 1, I) + CALL SHMEM_QUIET ! wait for delivery + SUM = SUM + WORK + ENDDO + PRINT *, 'PE ', MY_PE(), ' COMPUTED SUM=', SUM + CALL SHMEM_BARRIER_ALL + END + + +.. seealso:: + *intro_shmem*\ (3) *shmem_quiet*\ (3) diff --git a/docs/man-openshmem/man3/shmem_global_exit.3.rst b/docs/man-openshmem/man3/shmem_global_exit.3.rst new file mode 100644 index 00000000000..c24fb84532f --- /dev/null +++ b/docs/man-openshmem/man3/shmem_global_exit.3.rst @@ -0,0 +1,51 @@ +.. _shmem_global_exit: + + +shmem_global_exit +================= + +.. include_body + +:ref:`shmem_global_exit` - A routine that allows any PE to force termination of +an entire program. + + +SYNOPSIS +-------- + +C or C++: + +.. code-block:: c++ + + #include + + void shmem_global_exit(int status) + +Fortran: + +.. code-block:: fortran + + include 'mpp/shmem.fh' + INTEGER STATUS + CALL SHMEM_GLOBAL_EXIT(status) + + +DESCRIPTION +----------- + +shmem_global_exit() :ref:`shmem_global_exit` is a non-collective routine that +allows any one PE to force termination of an Open- SHMEM program for all +PEs, passing an exit status to the execution environment. This routine +terminates the entire program, not just the OpenSHMEM portion. When any +PE calls :ref:`shmem_global_exit`, it results in the immediate notification to +all PEs to terminate. :ref:`shmem_global_exit` flushes I/O and releases +resources in accordance with C/C++/Fortran language requirements for +normal program termination. If more than one PE calls :ref:`shmem_global_exit`, +then the exit status returned to the environment shall be one of the +values passed to :ref:`shmem_global_exit` as the status argument. There is no +return to the caller of :ref:`shmem_global_exit`; control is returned from the +OpenSHMEM program to the execution environment for all PEs. + + +.. seealso:: + *intro_shmem*\ (3) *shmem_my_pe*\ (3) *shmem_init*\ (3) diff --git a/docs/man-openshmem/man3/shmem_iget128.3.rst b/docs/man-openshmem/man3/shmem_iget128.3.rst new file mode 100644 index 00000000000..896c1381ce3 --- /dev/null +++ b/docs/man-openshmem/man3/shmem_iget128.3.rst @@ -0,0 +1,9 @@ +.. _shmem_iget128: + +shmem_iget128 +============= + .. include_body + +.. include:: ../man3/shmem_short_iget.3.rst + :start-after: .. include_body + diff --git a/docs/man-openshmem/man3/shmem_iget32.3.rst b/docs/man-openshmem/man3/shmem_iget32.3.rst new file mode 100644 index 00000000000..a5b0c1b56f9 --- /dev/null +++ b/docs/man-openshmem/man3/shmem_iget32.3.rst @@ -0,0 +1,9 @@ +.. _shmem_iget32: + +shmem_iget32 +============ + .. include_body + +.. include:: ../man3/shmem_short_iget.3.rst + :start-after: .. include_body + diff --git a/docs/man-openshmem/man3/shmem_iget64.3.rst b/docs/man-openshmem/man3/shmem_iget64.3.rst new file mode 100644 index 00000000000..451e495a015 --- /dev/null +++ b/docs/man-openshmem/man3/shmem_iget64.3.rst @@ -0,0 +1,9 @@ +.. _shmem_iget64: + +shmem_iget64 +============ + .. include_body + +.. include:: ../man3/shmem_short_iget.3.rst + :start-after: .. include_body + diff --git a/docs/man-openshmem/man3/shmem_info_get_name.3.rst b/docs/man-openshmem/man3/shmem_info_get_name.3.rst new file mode 100644 index 00000000000..c9e685205c5 --- /dev/null +++ b/docs/man-openshmem/man3/shmem_info_get_name.3.rst @@ -0,0 +1,51 @@ +.. _shmem_info_get_name: + + +shmem_info_get_name +=================== + +.. include_body + +:ref:`shmem_info_get_name` - This routine returns the vendor defined character +string. + + +SYNOPSIS +-------- + +C or C++: + +.. code-block:: c++ + + #include + + void shmem_info_get_name(char *name) + +Fortran: + +.. code-block:: fortran + + include 'mpp/shmem.fh' + SHMEM_INFO_GET_NAME(NAME) + CHARACTER *(*)NAME + + +DESCRIPTION +----------- + +shmem_info_get_name() This routine returns the vendor defined character +string of size defined by the constant SHMEM_MAX_NAME_LEN. The program +calling this function prepares the memory of size SHMEM_MAX_NAME_LEN, +and the implementation copies the string of size at most +SHMEM_MAX_NAME_LEN. In C, the string is terminated by a null character. +In Fortran, the string of size less than SHMEM_MAX_NAME_LEN is padded +with blank characters up to size SHMEM_MAX_NAME_LEN. The implementation +copying a string of size greater than SHMEM_MAX_NAME_LEN results in an +undefined behavior. Multiple invocations of the routine in an OpenSHMEM +program always return the same string. For a given library +implementation, the major and minor version returned by these calls is +consistent with the compile-time constants defined in its shmem.h. + + +.. seealso:: + *intro_shmem*\ (3) *shmem_my_pe*\ (3) *shmem_init*\ (3) diff --git a/docs/man-openshmem/man3/shmem_info_get_version.3.rst b/docs/man-openshmem/man3/shmem_info_get_version.3.rst new file mode 100644 index 00000000000..6f646c97b2b --- /dev/null +++ b/docs/man-openshmem/man3/shmem_info_get_version.3.rst @@ -0,0 +1,45 @@ +.. _shmem_info_get_version: + + +shmem_info_get_version +====================== + +.. include_body + +:ref:`shmem_info_get_version` - Returns the major and minor version of the +library implementation. + + +SYNOPSIS +-------- + +C or C++: + +.. code-block:: c++ + + #include + + void shmem_info_get_version(int *major, int *minor) + +Fortran: + +.. code-block:: fortran + + include 'mpp/shmem.fh' + SHMEM_INFO_GET_VERSION(MAJOR, MINOR) + INTEGER MAJOR, MINOR + + +DESCRIPTION +----------- + +shmem_info_get_version() This routine returns the major and minor +version of the OpenSHMEM standard in use. For a given library +implementation, the major and minor version returned by these calls is +consistent with the compile-time constants, SHMEM_MAJOR_VERSION and +SHMEM_MINOR_VERSION, defined in its shmem.h. The valid major version +value is 1, and the valid minor version value is 2. + + +.. seealso:: + *intro_shmem*\ (3) *shmem_my_pe*\ (3) *shmem_init*\ (3) diff --git a/docs/man-openshmem/man3/shmem_init.3.rst b/docs/man-openshmem/man3/shmem_init.3.rst new file mode 100644 index 00000000000..9e51afdfe8e --- /dev/null +++ b/docs/man-openshmem/man3/shmem_init.3.rst @@ -0,0 +1,91 @@ +.. _shmem_init: + + +shmem_init +========== + +.. include_body + +:ref:`shmem_init`, start_pes - Allocates a block of memory from the symmetric +heap. + + +SYNOPSIS +-------- + +C or C++: + +.. code-block:: c++ + + void shmem_init(void) + void start_pes(int npes) + +Fortran: + +.. code-block:: fortran + + CALL SHMEM_INIT() + CALL START_PES(npes) + + +DESCRIPTION +----------- + +The start_pes routine should be the first statement in a SHMEM parallel +program. + +The start_pes routine accepts the following argument: + +npes + Unused. Should be set to 0. + +This routine initializes the SHMEM API, therefore it must be called +before calling any other SHMEM routine. This routine is responsible +inter alia for setting up the symmetric heap on the calling PE, and the +creation of the virtual PE numbers. Upon successful return from this +routine, the calling PE will be able to communicate with and transfer +data to other PEs. + +Multiple calls to this function are not allowed. + +For an overview of programming with SHMEM communication routines, +example SHMEM programs, and instructions for compiling SHMEM programs, +see the *intro_shmem*\ (3) man page. + + +EXAMPLES +-------- + +This is a simple program that calls shmem_integer_put\ (3): + +:: + + PROGRAM PUT + INCLUDE "mpp/shmem.fh" + + INTEGER TARG, SRC, RECEIVER, BAR + COMMON /T/ TARG + PARAMETER (RECEIVER=1) + + CALL SHMEM_INIT() + IF (MY_PE() .EQ. 0) THEN + SRC = 33 + CALL SHMEM_INTEGER_PUT(TARG, SRC, 1, RECEIVER) + ENDIF + CALL SHMEM_BARRIER_ALL ! SYNCHRONIZES SENDER AND RECEIVER + IF (MY_PE() .EQ. RECEIVER) THEN + PRINT *,'PE ', MY_PE(),' TARG=',TARG,' (expect 33)' + ENDIF + END + + +NOTES +----- + +If the start_pes call is not the first statement in a program, +unexpected results may occur on some architectures. + + +.. seealso:: + *intro_shmem*\ (3) :ref:`shmem_barrier`\ (3) :ref:`shmem_barrier_all`\ (3) + *shmem_put*\ (3) *my_pe*\ (3) *shmem_n_pes*\ (3) diff --git a/docs/man-openshmem/man3/shmem_int_add.3.rst b/docs/man-openshmem/man3/shmem_int_add.3.rst new file mode 100644 index 00000000000..1864e7a83ef --- /dev/null +++ b/docs/man-openshmem/man3/shmem_int_add.3.rst @@ -0,0 +1,75 @@ +.. _shmem_int_add: + + +shmem_int_add +============= + +.. include_body + +:ref:`shmem_int_add`\ (3), shmem_int4_add\ (3), shmem_int8_add\ (3), +:ref:`shmem_long_add`\ (3), :ref:`shmem_longlong_add`\ (3) - Performs an atomic +add operation. + + +SYNOPSIS +-------- + +C or C++: + +.. code-block:: c++ + + #include + + void shmem_int_add(int *target, int value, int pe) + void shmem_long_add(long *target, long value, int pe) + void shmem_longlong_add(long long *target, long long value, + int pe) + +Fortran: + +.. code-block:: fortran + + include 'mpp/shmem.h' + + INTEGER pe + + CALL SHMEM_INT4_ADD(target, value, pe) + CALL SHMEM_INT8_ADD(target, value, pe) + + +DESCRIPTION +----------- + +The atomic add routines add **value** to the data at address **target** +on PE **pe**. The operation completes without the possibility of another +process updating target between the time of the fetch and the update. + +The arguments are as follows: + +target + The remotely accessible integer data object to be updated on the + remote PE. If you are using C/C++, the type of target should match + that implied in the SYNOPSIS section. If you are using the Fortran + compiler, it must be of type integer with an element size of 4 bytes + for SHMEM_INT4_ADD and 8 bytes for SHMEM_INT8_ADD. + +value + The value to be atomically added to target. If you are using C/C++, + the type of value should match that implied in the SYNOPSIS section. + If you are using Fortran, it must be of type integer with an element + size of target. + +pe + An integer that indicates the PE number upon which target is to be + updated. If you are using Fortran, it must be a default integer + value. + + +NOTES +----- + +The term remotely accessible is defined in *intro_shmem*\ (3). + + +.. seealso:: + *intro_shmem*\ (3) *shmem_cache*\ (3) diff --git a/docs/man-openshmem/man3/shmem_int_and_to_all.3.rst b/docs/man-openshmem/man3/shmem_int_and_to_all.3.rst new file mode 100644 index 00000000000..14bd9a73e90 --- /dev/null +++ b/docs/man-openshmem/man3/shmem_int_and_to_all.3.rst @@ -0,0 +1,9 @@ +.. _shmem_int_and_to_all: + +shmem_int_and_to_all +==================== + .. include_body + +.. include:: ../man3/shmem_short_and_to_all.3.rst + :start-after: .. include_body + diff --git a/docs/man-openshmem/man3/shmem_int_cswap.3.rst b/docs/man-openshmem/man3/shmem_int_cswap.3.rst new file mode 100644 index 00000000000..85b48b6e016 --- /dev/null +++ b/docs/man-openshmem/man3/shmem_int_cswap.3.rst @@ -0,0 +1,135 @@ +.. _shmem_int_cswap: + + +shmem_int_cswap +=============== + +.. include_body + +:ref:`shmem_int_cswap`\ (3), shmem_int4_cswap\ (3), +shmem_int8_cswap\ (3), :ref:`shmem_long_cswap`\ (3), +:ref:`shmem_longlong_cswap`\ (3) - Performs an atomic conditional swap to a +remote data object + + +SYNOPSIS +-------- + +C or C++: + +.. code-block:: c++ + + #include + + int shmem_int_cswap(int *target, int cond, int value, int pe) + + long shmem_long_cswap(long *target, long cond, long value, + int pe) + + long long shmem_longlong_cswap(longlong *target, + longlong cond, longlong value, int pe) + +Fortran: + +.. code-block:: fortran + + INCLUDE "mpp/shmem.fh" + + INTEGER pe + + INTEGER(KIND=4) SHMEM_INT4_CSWAP + ires = SHMEM_INT4_CSWAP(target, cond, value, pe) + + INTEGER(KIND=8) SHMEM_INT8_CSWAP + ires = SHMEM_INT8_CSWAP(target, cond, value, pe) + + +DESCRIPTION +----------- + +The conditional swap routines conditionally update a target data object +on an arbitrary processing element (PE) and return the prior contents of +the data object in one atomic operation. + +The arguments are as follows: + +target + The remotely accessible integer data object to be updated on the + remote PE. If you are using C/C++, the type of target should match + that implied in the SYNOPSIS section. If you are using the Fortran + compiler, it must be of type integer with an element size of 4 bytes + for SHMEM_INT4_ADD and 8 bytes for SHMEM_INT8_ADD. + +value + The value to be atomically added to target. If you are using C/C++, + the type of value should match that implied in the SYNOPSIS section. + If you are using Fortran, it must be of type integer with an element + size of target. + +pe + An integer that indicates the PE number upon which target is to be + updated. If you are using Fortran, it must be a default integer + value. + +target + The remotely accessible integer data object to be updated on the + remote PE. If you are using C/C++, the data type of target should + match that implied in the SYNOPSIS section. If you are using Fortran, + it must be of the following type: + + **SHMEM_INT4_CSWAP**: 4-byte integer + + **SHMEM_INT8_CSWAP**: 8-byte integer + +cond + cond is compared to the remote target value. If cond and the remote + target are equal, then value is swapped into the remote target. + Otherwise, the remote target is unchanged. In either case, the old + value of the remote target is returned as the function return value. + cond must be of the same data type as target. + +value + The value to be atomically written to the remote PE. value must be + the same data type as target. + +pe + An integer that indicates the PE number upon which target is to be + updated. If you are using Fortran, it must be a default integer + value. + + +NOTES +----- + +The term remotely accessible is defined in *intro_shmem*\ (3). + + +RETURN VALUES +------------- + +The contents that had been in the target data object on the remote PE +prior to the conditional swap. + + +EXAMPLES +-------- + +The following call ensures that the first PE to execute the conditional +swap will successfully write its PE number to race_winner on PE 0. + +:: + + main() + { + static int race_winner = -1; + int oldval; + + shmem_init(); + oldval = shmem_int_cswap(&race_winner, -1, shmem_my_pe(), 0); + if (oldval == -1) + printf("pe %d was first\n",shmem_my_pe()); + } + + +.. seealso:: + *intro_shmem*\ (3) *shmem_cache*\ (3) *shmem_swap*\ (3) diff --git a/docs/man-openshmem/man3/shmem_int_fadd.3.rst b/docs/man-openshmem/man3/shmem_int_fadd.3.rst new file mode 100644 index 00000000000..a61b4c308bd --- /dev/null +++ b/docs/man-openshmem/man3/shmem_int_fadd.3.rst @@ -0,0 +1,86 @@ +.. _shmem_int_fadd: + + +shmem_int_fadd +============== + +.. include_body + +shmem_int4_fadd\ (3), shmem_int8_fadd\ (3), :ref:`shmem_int_fadd`\ (3), +:ref:`shmem_long_fadd`\ (3), :ref:`shmem_longlong_fadd`\ (3) - Performs an atomic +fetch-and-add operation on a remote data object + + +SYNOPSIS +-------- + +C or C++: + +.. code-block:: c++ + + #include + + int shmem_int_fadd(int *target, int value, int pe) + + long shmem_long_fadd(long *target, long value, int pe) + + long long shmem_longlong_fadd(long long *target, longlong value, + int pe) + +Fortran: + +.. code-block:: fortran + + INCLUDE "mpp/shmem.fh" + + INTEGER pe + + INTEGER(KIND=4) SHMEM_INT4_FADD, ires, target, value + ires = SHMEM_INT4_FADD(target, value, pe) + + INTEGER(KIND=8) SHMEM_INT8_FADD, ires, target, value + ires = SHMEM_INT8_FADD(target, value, pe) + + +DESCRIPTION +----------- + +shmem_fadd functions perform an atomic fetch-and-add operation. An +atomic fetch-and-add operation fetches the old target and adds value to +target without the possibility of another process updating target +between the time of the fetch and the update. These routines add value +to target on Processing Element (PE) pe and return the previous contents +of target as an atomic operation. + +The arguments are as follows: + +target + The remotely accessible integer data object to be updated on the + remote PE. The type of target should match that implied in the + SYNOPSIS section. + +value + The value to be atomically added to target. The type of value should + match that implied in the SYNOPSIS section. + +pe + An integer that indicates the PE number on which target is to be + updated. If you are using Fortran, it must be a default integer + value. + + +NOTES +----- + +The term remotely accessible is defined in *intro_shmem*\ (3). + + +RETURN VALUES +------------- + +The contents that had been at the target address on the remote PE prior +to the atomic addition operation. + + +.. seealso:: + *intro_shmem*\ (3) diff --git a/docs/man-openshmem/man3/shmem_int_fetch.3.rst b/docs/man-openshmem/man3/shmem_int_fetch.3.rst new file mode 100644 index 00000000000..29f8d0aaf31 --- /dev/null +++ b/docs/man-openshmem/man3/shmem_int_fetch.3.rst @@ -0,0 +1,82 @@ +.. _shmem_int_fetch: + + +shmem_int_fetch +=============== + +.. include_body + +shmem_int4_fetch\ (3), shmem_int8_fetch\ (3), +:ref:`shmem_int_fetch`\ (3), :ref:`shmem_long_fetch`\ (3), +:ref:`shmem_longlong_fetch`\ (3) :ref:`shmem_double_fetch`\ (3) +:ref:`shmem_float_fetch`\ (3) - Atomically fetches the value of a remote data +object + + +SYNOPSIS +-------- + +C or C++: + +.. code-block:: c++ + + #include + + int shmem_int_fetch(int *target, int pe) + + long shmem_long_fetch(long *target, int pe) + + long long shmem_longlong_fetch(long long *target, int pe) + + double shmem_double_fetch(long long *target, int pe) + + float shmem_float_fetch(float *target, int pe) + +Fortran: + +.. code-block:: fortran + + INCLUDE "mpp/shmem.fh" + + INTEGER pe + + INTEGER(KIND=4) SHMEM_INT4_FETCH, ires, target + ires = SHMEM_INT4_FETCH(target, pe) + + INTEGER(KIND=8) SHMEM_INT8_FETCH, ires, target + ires = SHMEM_INT8_FETCH(target, pe) + + + REAL(KIND=4) SHMEM_INT4_FETCH, ires, target + ires = SHMEM_REAL4_FETCH(target, pe) + + REAL(KIND=8) SHMEM_INT8_FETCH, ires, target + ires = SHMEM_REAL8_FETCH(target, pe) + + +DESCRIPTION +----------- + +The shmem_fetch functions perform an atomic fetch operation. They return +the contents of the **target** as an atomic operation. + +The arguments are as follows: + +target + The remotely accessible data object to be fetched from the remote PE. + +pe + An integer that indicates the PE number from which *target* is to be + fetched. If you are using Fortran, it must be a default integer + value. + + +RETURN VALUES +------------- + +The contents at the *target* address on the remote PE. The data type of +the return value is the same as the the type of the remote data object. + + +.. seealso:: + *intro_shmem*\ (3) diff --git a/docs/man-openshmem/man3/shmem_int_finc.3.rst b/docs/man-openshmem/man3/shmem_int_finc.3.rst new file mode 100644 index 00000000000..c7dc75c03db --- /dev/null +++ b/docs/man-openshmem/man3/shmem_int_finc.3.rst @@ -0,0 +1,80 @@ +.. _shmem_int_finc: + + +shmem_int_finc +============== + +.. include_body + +shmem_int4_finc\ (3), shmem_int8_finc\ (3), :ref:`shmem_int_finc`\ (3), +:ref:`shmem_long_finc`\ (3), :ref:`shmem_longlong_finc`\ (3) - Performs an atomic +fetch-and-increment operation on a remote data object + + +SYNOPSIS +-------- + +C or C++: + +.. code-block:: c++ + + #include + + int shmem_int_finc(int *target, int pe) + + long shmem_long_finc(long *target, int pe) + + long long shmem_longlong_finc(long long *target, int pe) + +Fortran: + +.. code-block:: fortran + + INCLUDE "mpp/shmem.fh" + + INTEGER pe + INTEGER(KIND=4) SHMEM_INT4_FINC, target4 + INTEGER(KIND=8) SHMEM_INT8_FINC, target8 + + ires4 = SHMEM_INT4_FINC(target4, pe) + + ires8 = SHMEM_INT8_FINC(target8, pe) + + +DESCRIPTION +----------- + +The fetch and increment routines retrieve the value at address +**target** on PE **pe**, and update **target** with the result of +incrementing the retrieved value by one. The operation must be completed +without the possibility of another process updating **target** between +the time of the fetch and the update. + +The arguments are as follows: + +target + The remotely accessible integer data object to be updated on the + remote PE. The type of target should match that implied in the + SYNOPSIS section. + +pe + An integer that indicates the PE number upon which target is to be + updated. If you are using Fortran, it must be a default integer + value. + + +NOTES +----- + +The term remotely accessible is defined in *intro_shmem*\ (3). + + +RETURN VALUES +------------- + +The contents that had been at the target address on the remote PE prior +to the increment. + + +.. seealso:: + *intro_shmem*\ (3) diff --git a/docs/man-openshmem/man3/shmem_int_g.3.rst b/docs/man-openshmem/man3/shmem_int_g.3.rst new file mode 100644 index 00000000000..07ba1e88cb0 --- /dev/null +++ b/docs/man-openshmem/man3/shmem_int_g.3.rst @@ -0,0 +1,9 @@ +.. _shmem_int_g: + +shmem_int_g +=========== + .. include_body + +.. include:: ../man3/shmem_char_g.3.rst + :start-after: .. include_body + diff --git a/docs/man-openshmem/man3/shmem_int_get.3.rst b/docs/man-openshmem/man3/shmem_int_get.3.rst new file mode 100644 index 00000000000..705283517b8 --- /dev/null +++ b/docs/man-openshmem/man3/shmem_int_get.3.rst @@ -0,0 +1,9 @@ +.. _shmem_int_get: + +shmem_int_get +============= + .. include_body + +.. include:: ../man3/shmem_char_get.3.rst + :start-after: .. include_body + diff --git a/docs/man-openshmem/man3/shmem_int_get_nbi.3.rst b/docs/man-openshmem/man3/shmem_int_get_nbi.3.rst new file mode 100644 index 00000000000..49877e7819f --- /dev/null +++ b/docs/man-openshmem/man3/shmem_int_get_nbi.3.rst @@ -0,0 +1,9 @@ +.. _shmem_int_get_nbi: + +shmem_int_get_nbi +================= + .. include_body + +.. include:: ../man3/shmem_getmem_nbi.3.rst + :start-after: .. include_body + diff --git a/docs/man-openshmem/man3/shmem_int_iget.3.rst b/docs/man-openshmem/man3/shmem_int_iget.3.rst new file mode 100644 index 00000000000..d00a9418b94 --- /dev/null +++ b/docs/man-openshmem/man3/shmem_int_iget.3.rst @@ -0,0 +1,9 @@ +.. _shmem_int_iget: + +shmem_int_iget +============== + .. include_body + +.. include:: ../man3/shmem_short_iget.3.rst + :start-after: .. include_body + diff --git a/docs/man-openshmem/man3/shmem_int_inc.3.rst b/docs/man-openshmem/man3/shmem_int_inc.3.rst new file mode 100644 index 00000000000..08020e38ab7 --- /dev/null +++ b/docs/man-openshmem/man3/shmem_int_inc.3.rst @@ -0,0 +1,78 @@ +.. _shmem_int_inc: + + +shmem_int_inc +============= + +.. include_body + +shmem_int4_inc\ (3), shmem_int8_inc\ (3), :ref:`shmem_int_inc`\ (3), +:ref:`shmem_long_inc`\ (3), :ref:`shmem_longlong_inc`\ (3) - These routines +perform an atomic increment operation on a remote data object. + + +SYNOPSIS +-------- + +C or C++: + +.. code-block:: c++ + + #include + + int shmem_int_inc(int *target, int pe) + + long shmem_long_inc(long *target, int pe) + + long long shmem_longlong_inc(long long *target, int pe) + +Fortran: + +.. code-block:: fortran + + INCLUDE "mpp/shmem.fh" + + INTEGER pe + INTEGER(KIND=4) SHMEM_INT4_INC, target4 + INTEGER(KIND=8) SHMEM_INT8_INC, target8 + + ires4 = SHMEM_INT4_INC(target4, pe) + + ires8 = SHMEM_INT8_INC(target8, pe) + + +DESCRIPTION +----------- + +The atomic increment routines replace the value of **target** with its +value incremented by one. The operation must be completed without the +possibility of another process updating **target** between the time of +the fetch and the update. + +The arguments are as follows: + +target + The remotely accessible integer data object to be updated on the + remote PE. The type of target should match that implied in the + SYNOPSIS section. + +pe + An integer that indicates the PE number upon which target is to be + updated. If you are using Fortran, it must be a default integer + value. + + +NOTES +----- + +The term remotely accessible is defined in *intro_shmem*\ (3). + + +RETURN VALUES +------------- + +None. + + +.. seealso:: + *intro_shmem*\ (3) diff --git a/docs/man-openshmem/man3/shmem_int_iput.3.rst b/docs/man-openshmem/man3/shmem_int_iput.3.rst new file mode 100644 index 00000000000..c8112843e0f --- /dev/null +++ b/docs/man-openshmem/man3/shmem_int_iput.3.rst @@ -0,0 +1,9 @@ +.. _shmem_int_iput: + +shmem_int_iput +============== + .. include_body + +.. include:: ../man3/shmem_short_iput.3.rst + :start-after: .. include_body + diff --git a/docs/man-openshmem/man3/shmem_int_max_to_all.3.rst b/docs/man-openshmem/man3/shmem_int_max_to_all.3.rst new file mode 100644 index 00000000000..2264da23f40 --- /dev/null +++ b/docs/man-openshmem/man3/shmem_int_max_to_all.3.rst @@ -0,0 +1,9 @@ +.. _shmem_int_max_to_all: + +shmem_int_max_to_all +==================== + .. include_body + +.. include:: ../man3/shmem_short_max_to_all.3.rst + :start-after: .. include_body + diff --git a/docs/man-openshmem/man3/shmem_int_min_to_all.3.rst b/docs/man-openshmem/man3/shmem_int_min_to_all.3.rst new file mode 100644 index 00000000000..5415ad4c5e8 --- /dev/null +++ b/docs/man-openshmem/man3/shmem_int_min_to_all.3.rst @@ -0,0 +1,9 @@ +.. _shmem_int_min_to_all: + +shmem_int_min_to_all +==================== + .. include_body + +.. include:: ../man3/shmem_short_min_to_all.3.rst + :start-after: .. include_body + diff --git a/docs/man-openshmem/man3/shmem_int_or_to_all.3.rst b/docs/man-openshmem/man3/shmem_int_or_to_all.3.rst new file mode 100644 index 00000000000..d26023a0c4c --- /dev/null +++ b/docs/man-openshmem/man3/shmem_int_or_to_all.3.rst @@ -0,0 +1,9 @@ +.. _shmem_int_or_to_all: + +shmem_int_or_to_all +=================== + .. include_body + +.. include:: ../man3/shmem_short_or_to_all.3.rst + :start-after: .. include_body + diff --git a/docs/man-openshmem/man3/shmem_int_p.3.rst b/docs/man-openshmem/man3/shmem_int_p.3.rst new file mode 100644 index 00000000000..911b38cebee --- /dev/null +++ b/docs/man-openshmem/man3/shmem_int_p.3.rst @@ -0,0 +1,9 @@ +.. _shmem_int_p: + +shmem_int_p +=========== + .. include_body + +.. include:: ../man3/shmem_char_p.3.rst + :start-after: .. include_body + diff --git a/docs/man-openshmem/man3/shmem_int_prod_to_all.3.rst b/docs/man-openshmem/man3/shmem_int_prod_to_all.3.rst new file mode 100644 index 00000000000..4f5e1100d97 --- /dev/null +++ b/docs/man-openshmem/man3/shmem_int_prod_to_all.3.rst @@ -0,0 +1,9 @@ +.. _shmem_int_prod_to_all: + +shmem_int_prod_to_all +===================== + .. include_body + +.. include:: ../man3/shmem_short_prod_to_all.3.rst + :start-after: .. include_body + diff --git a/docs/man-openshmem/man3/shmem_int_put.3.rst b/docs/man-openshmem/man3/shmem_int_put.3.rst new file mode 100644 index 00000000000..20bccc5634c --- /dev/null +++ b/docs/man-openshmem/man3/shmem_int_put.3.rst @@ -0,0 +1,9 @@ +.. _shmem_int_put: + +shmem_int_put +============= + .. include_body + +.. include:: ../man3/shmem_char_put.3.rst + :start-after: .. include_body + diff --git a/docs/man-openshmem/man3/shmem_int_put_nbi.3.rst b/docs/man-openshmem/man3/shmem_int_put_nbi.3.rst new file mode 100644 index 00000000000..44fe9983f39 --- /dev/null +++ b/docs/man-openshmem/man3/shmem_int_put_nbi.3.rst @@ -0,0 +1,9 @@ +.. _shmem_int_put_nbi: + +shmem_int_put_nbi +================= + .. include_body + +.. include:: ../man3/shmem_putmem_nbi.3.rst + :start-after: .. include_body + diff --git a/docs/man-openshmem/man3/shmem_int_set.3.rst b/docs/man-openshmem/man3/shmem_int_set.3.rst new file mode 100644 index 00000000000..38fd1763f69 --- /dev/null +++ b/docs/man-openshmem/man3/shmem_int_set.3.rst @@ -0,0 +1,75 @@ +.. _shmem_int_set: + + +shmem_int_set +============= + +.. include_body + +:ref:`shmem_double_set`\ (3), :ref:`shmem_float_set`\ (3), :ref:`shmem_int_set`\ (3), +:ref:`shmem_long_set`\ (3), :ref:`shmem_longlong_set`\ (3) shmem_int4_set\ (3), +shmem_int8_set\ (3), shmem_real4_set\ (3), shmem_real8_set\ (3), - +Atomically sets the value of a remote data object + + +SYNOPSIS +-------- + +C or C++: + +.. code-block:: c++ + + #include + + void shmem_double_set(double *target, double value, int pe) + + void shmem_float_set(float *target, float value, int pe) + + void shmem_int_set(int *target, int value, int pe) + + void shmem_long_set(long *target, long value, int pe) + + void shmem_longlong_set(long long *target, long long value, int pe) + +Fortran: + +.. code-block:: fortran + + INCLUDE "mpp/shmem.fh" + + INTEGER pe + + CALL SHMEM_INT4_SET(target, value, pe) + CALL SHMEM_INT8_SET(target, value, pe) + CALL SHMEM_REAL4_SET(target, value, pe) + CALL SHMEM_REAL8_SET(target, value, pe) + + +DESCRIPTION +----------- + +The set routines write the **value** into the address **target** on +**pe** as an atomic operation. + +The arguments are as follows: + +target + The remotely accessible data object to be set on the remote PE. + +value + The value to be atomically written to the remote PE. + +pe + An integer that indicates the PE number upon which target is to be + updated. If you are using Fortran, it must be a default integer + value. + + +RETURN VALUES +------------- + +NONE + + +.. seealso:: + *intro_shmem*\ (3) diff --git a/docs/man-openshmem/man3/shmem_int_sum_to_all.3.rst b/docs/man-openshmem/man3/shmem_int_sum_to_all.3.rst new file mode 100644 index 00000000000..368fc51f94d --- /dev/null +++ b/docs/man-openshmem/man3/shmem_int_sum_to_all.3.rst @@ -0,0 +1,9 @@ +.. _shmem_int_sum_to_all: + +shmem_int_sum_to_all +==================== + .. include_body + +.. include:: ../man3/shmem_short_sum_to_all.3.rst + :start-after: .. include_body + diff --git a/docs/man-openshmem/man3/shmem_int_swap.3.rst b/docs/man-openshmem/man3/shmem_int_swap.3.rst new file mode 100644 index 00000000000..2bde8da8e94 --- /dev/null +++ b/docs/man-openshmem/man3/shmem_int_swap.3.rst @@ -0,0 +1,9 @@ +.. _shmem_int_swap: + +shmem_int_swap +============== + .. include_body + +.. include:: ../man3/shmem_swap.3.rst + :start-after: .. include_body + diff --git a/docs/man-openshmem/man3/shmem_int_wait.3.rst b/docs/man-openshmem/man3/shmem_int_wait.3.rst new file mode 100644 index 00000000000..cda6f030fcc --- /dev/null +++ b/docs/man-openshmem/man3/shmem_int_wait.3.rst @@ -0,0 +1,9 @@ +.. _shmem_int_wait: + +shmem_int_wait +============== + .. include_body + +.. include:: ../man3/shmem_wait.3.rst + :start-after: .. include_body + diff --git a/docs/man-openshmem/man3/shmem_int_wait_until.3.rst b/docs/man-openshmem/man3/shmem_int_wait_until.3.rst new file mode 100644 index 00000000000..8e771960400 --- /dev/null +++ b/docs/man-openshmem/man3/shmem_int_wait_until.3.rst @@ -0,0 +1,9 @@ +.. _shmem_int_wait_until: + +shmem_int_wait_until +==================== + .. include_body + +.. include:: ../man3/shmem_wait.3.rst + :start-after: .. include_body + diff --git a/docs/man-openshmem/man3/shmem_int_xor_to_all.3.rst b/docs/man-openshmem/man3/shmem_int_xor_to_all.3.rst new file mode 100644 index 00000000000..aaf63973e01 --- /dev/null +++ b/docs/man-openshmem/man3/shmem_int_xor_to_all.3.rst @@ -0,0 +1,9 @@ +.. _shmem_int_xor_to_all: + +shmem_int_xor_to_all +==================== + .. include_body + +.. include:: ../man3/shmem_short_xor_to_all.3.rst + :start-after: .. include_body + diff --git a/docs/man-openshmem/man3/shmem_iput128.3.rst b/docs/man-openshmem/man3/shmem_iput128.3.rst new file mode 100644 index 00000000000..05ac5baa52b --- /dev/null +++ b/docs/man-openshmem/man3/shmem_iput128.3.rst @@ -0,0 +1,9 @@ +.. _shmem_iput128: + +shmem_iput128 +============= + .. include_body + +.. include:: ../man3/shmem_short_iput.3.rst + :start-after: .. include_body + diff --git a/docs/man-openshmem/man3/shmem_iput32.3.rst b/docs/man-openshmem/man3/shmem_iput32.3.rst new file mode 100644 index 00000000000..2ebe811986b --- /dev/null +++ b/docs/man-openshmem/man3/shmem_iput32.3.rst @@ -0,0 +1,9 @@ +.. _shmem_iput32: + +shmem_iput32 +============ + .. include_body + +.. include:: ../man3/shmem_short_iput.3.rst + :start-after: .. include_body + diff --git a/docs/man-openshmem/man3/shmem_iput64.3.rst b/docs/man-openshmem/man3/shmem_iput64.3.rst new file mode 100644 index 00000000000..cde38ff3d98 --- /dev/null +++ b/docs/man-openshmem/man3/shmem_iput64.3.rst @@ -0,0 +1,9 @@ +.. _shmem_iput64: + +shmem_iput64 +============ + .. include_body + +.. include:: ../man3/shmem_short_iput.3.rst + :start-after: .. include_body + diff --git a/docs/man-openshmem/man3/shmem_long_add.3.rst b/docs/man-openshmem/man3/shmem_long_add.3.rst new file mode 100644 index 00000000000..4501559a8c7 --- /dev/null +++ b/docs/man-openshmem/man3/shmem_long_add.3.rst @@ -0,0 +1,9 @@ +.. _shmem_long_add: + +shmem_long_add +============== + .. include_body + +.. include:: ../man3/shmem_int_add.3.rst + :start-after: .. include_body + diff --git a/docs/man-openshmem/man3/shmem_long_and_to_all.3.rst b/docs/man-openshmem/man3/shmem_long_and_to_all.3.rst new file mode 100644 index 00000000000..90643ffed7b --- /dev/null +++ b/docs/man-openshmem/man3/shmem_long_and_to_all.3.rst @@ -0,0 +1,9 @@ +.. _shmem_long_and_to_all: + +shmem_long_and_to_all +===================== + .. include_body + +.. include:: ../man3/shmem_short_and_to_all.3.rst + :start-after: .. include_body + diff --git a/docs/man-openshmem/man3/shmem_long_cswap.3.rst b/docs/man-openshmem/man3/shmem_long_cswap.3.rst new file mode 100644 index 00000000000..74d43645187 --- /dev/null +++ b/docs/man-openshmem/man3/shmem_long_cswap.3.rst @@ -0,0 +1,9 @@ +.. _shmem_long_cswap: + +shmem_long_cswap +================ + .. include_body + +.. include:: ../man3/shmem_int_cswap.3.rst + :start-after: .. include_body + diff --git a/docs/man-openshmem/man3/shmem_long_fadd.3.rst b/docs/man-openshmem/man3/shmem_long_fadd.3.rst new file mode 100644 index 00000000000..9cc10df9b19 --- /dev/null +++ b/docs/man-openshmem/man3/shmem_long_fadd.3.rst @@ -0,0 +1,9 @@ +.. _shmem_long_fadd: + +shmem_long_fadd +=============== + .. include_body + +.. include:: ../man3/shmem_int_fadd.3.rst + :start-after: .. include_body + diff --git a/docs/man-openshmem/man3/shmem_long_fetch.3.rst b/docs/man-openshmem/man3/shmem_long_fetch.3.rst new file mode 100644 index 00000000000..f17808db6d9 --- /dev/null +++ b/docs/man-openshmem/man3/shmem_long_fetch.3.rst @@ -0,0 +1,9 @@ +.. _shmem_long_fetch: + +shmem_long_fetch +================ + .. include_body + +.. include:: ../man3/shmem_int_fetch.3.rst + :start-after: .. include_body + diff --git a/docs/man-openshmem/man3/shmem_long_finc.3.rst b/docs/man-openshmem/man3/shmem_long_finc.3.rst new file mode 100644 index 00000000000..67d4ae3ac41 --- /dev/null +++ b/docs/man-openshmem/man3/shmem_long_finc.3.rst @@ -0,0 +1,9 @@ +.. _shmem_long_finc: + +shmem_long_finc +=============== + .. include_body + +.. include:: ../man3/shmem_int_finc.3.rst + :start-after: .. include_body + diff --git a/docs/man-openshmem/man3/shmem_long_g.3.rst b/docs/man-openshmem/man3/shmem_long_g.3.rst new file mode 100644 index 00000000000..0a1e1aa091a --- /dev/null +++ b/docs/man-openshmem/man3/shmem_long_g.3.rst @@ -0,0 +1,9 @@ +.. _shmem_long_g: + +shmem_long_g +============ + .. include_body + +.. include:: ../man3/shmem_char_g.3.rst + :start-after: .. include_body + diff --git a/docs/man-openshmem/man3/shmem_long_get.3.rst b/docs/man-openshmem/man3/shmem_long_get.3.rst new file mode 100644 index 00000000000..ffce43ca574 --- /dev/null +++ b/docs/man-openshmem/man3/shmem_long_get.3.rst @@ -0,0 +1,9 @@ +.. _shmem_long_get: + +shmem_long_get +============== + .. include_body + +.. include:: ../man3/shmem_char_get.3.rst + :start-after: .. include_body + diff --git a/docs/man-openshmem/man3/shmem_long_get_nbi.3.rst b/docs/man-openshmem/man3/shmem_long_get_nbi.3.rst new file mode 100644 index 00000000000..9099b93a808 --- /dev/null +++ b/docs/man-openshmem/man3/shmem_long_get_nbi.3.rst @@ -0,0 +1,9 @@ +.. _shmem_long_get_nbi: + +shmem_long_get_nbi +================== + .. include_body + +.. include:: ../man3/shmem_getmem_nbi.3.rst + :start-after: .. include_body + diff --git a/docs/man-openshmem/man3/shmem_long_iget.3.rst b/docs/man-openshmem/man3/shmem_long_iget.3.rst new file mode 100644 index 00000000000..a3dc46d2083 --- /dev/null +++ b/docs/man-openshmem/man3/shmem_long_iget.3.rst @@ -0,0 +1,9 @@ +.. _shmem_long_iget: + +shmem_long_iget +=============== + .. include_body + +.. include:: ../man3/shmem_short_iget.3.rst + :start-after: .. include_body + diff --git a/docs/man-openshmem/man3/shmem_long_inc.3.rst b/docs/man-openshmem/man3/shmem_long_inc.3.rst new file mode 100644 index 00000000000..496080cdbcb --- /dev/null +++ b/docs/man-openshmem/man3/shmem_long_inc.3.rst @@ -0,0 +1,9 @@ +.. _shmem_long_inc: + +shmem_long_inc +============== + .. include_body + +.. include:: ../man3/shmem_int_inc.3.rst + :start-after: .. include_body + diff --git a/docs/man-openshmem/man3/shmem_long_iput.3.rst b/docs/man-openshmem/man3/shmem_long_iput.3.rst new file mode 100644 index 00000000000..62a3bc3c651 --- /dev/null +++ b/docs/man-openshmem/man3/shmem_long_iput.3.rst @@ -0,0 +1,9 @@ +.. _shmem_long_iput: + +shmem_long_iput +=============== + .. include_body + +.. include:: ../man3/shmem_short_iput.3.rst + :start-after: .. include_body + diff --git a/docs/man-openshmem/man3/shmem_long_max_to_all.3.rst b/docs/man-openshmem/man3/shmem_long_max_to_all.3.rst new file mode 100644 index 00000000000..6b0a33b61d1 --- /dev/null +++ b/docs/man-openshmem/man3/shmem_long_max_to_all.3.rst @@ -0,0 +1,9 @@ +.. _shmem_long_max_to_all: + +shmem_long_max_to_all +===================== + .. include_body + +.. include:: ../man3/shmem_short_max_to_all.3.rst + :start-after: .. include_body + diff --git a/docs/man-openshmem/man3/shmem_long_min_to_all.3.rst b/docs/man-openshmem/man3/shmem_long_min_to_all.3.rst new file mode 100644 index 00000000000..230bca99de7 --- /dev/null +++ b/docs/man-openshmem/man3/shmem_long_min_to_all.3.rst @@ -0,0 +1,9 @@ +.. _shmem_long_min_to_all: + +shmem_long_min_to_all +===================== + .. include_body + +.. include:: ../man3/shmem_short_min_to_all.3.rst + :start-after: .. include_body + diff --git a/docs/man-openshmem/man3/shmem_long_or_to_all.3.rst b/docs/man-openshmem/man3/shmem_long_or_to_all.3.rst new file mode 100644 index 00000000000..fad1698750d --- /dev/null +++ b/docs/man-openshmem/man3/shmem_long_or_to_all.3.rst @@ -0,0 +1,9 @@ +.. _shmem_long_or_to_all: + +shmem_long_or_to_all +==================== + .. include_body + +.. include:: ../man3/shmem_short_or_to_all.3.rst + :start-after: .. include_body + diff --git a/docs/man-openshmem/man3/shmem_long_p.3.rst b/docs/man-openshmem/man3/shmem_long_p.3.rst new file mode 100644 index 00000000000..13c8b4be0c8 --- /dev/null +++ b/docs/man-openshmem/man3/shmem_long_p.3.rst @@ -0,0 +1,9 @@ +.. _shmem_long_p: + +shmem_long_p +============ + .. include_body + +.. include:: ../man3/shmem_char_p.3.rst + :start-after: .. include_body + diff --git a/docs/man-openshmem/man3/shmem_long_prod_to_all.3.rst b/docs/man-openshmem/man3/shmem_long_prod_to_all.3.rst new file mode 100644 index 00000000000..1c059a2c17e --- /dev/null +++ b/docs/man-openshmem/man3/shmem_long_prod_to_all.3.rst @@ -0,0 +1,9 @@ +.. _shmem_long_prod_to_all: + +shmem_long_prod_to_all +====================== + .. include_body + +.. include:: ../man3/shmem_short_prod_to_all.3.rst + :start-after: .. include_body + diff --git a/docs/man-openshmem/man3/shmem_long_put.3.rst b/docs/man-openshmem/man3/shmem_long_put.3.rst new file mode 100644 index 00000000000..a909a66d8bf --- /dev/null +++ b/docs/man-openshmem/man3/shmem_long_put.3.rst @@ -0,0 +1,9 @@ +.. _shmem_long_put: + +shmem_long_put +============== + .. include_body + +.. include:: ../man3/shmem_char_put.3.rst + :start-after: .. include_body + diff --git a/docs/man-openshmem/man3/shmem_long_put_nbi.3.rst b/docs/man-openshmem/man3/shmem_long_put_nbi.3.rst new file mode 100644 index 00000000000..07747786fbf --- /dev/null +++ b/docs/man-openshmem/man3/shmem_long_put_nbi.3.rst @@ -0,0 +1,9 @@ +.. _shmem_long_put_nbi: + +shmem_long_put_nbi +================== + .. include_body + +.. include:: ../man3/shmem_putmem_nbi.3.rst + :start-after: .. include_body + diff --git a/docs/man-openshmem/man3/shmem_long_set.3.rst b/docs/man-openshmem/man3/shmem_long_set.3.rst new file mode 100644 index 00000000000..d90818c2366 --- /dev/null +++ b/docs/man-openshmem/man3/shmem_long_set.3.rst @@ -0,0 +1,9 @@ +.. _shmem_long_set: + +shmem_long_set +============== + .. include_body + +.. include:: ../man3/shmem_int_set.3.rst + :start-after: .. include_body + diff --git a/docs/man-openshmem/man3/shmem_long_sum_to_all.3.rst b/docs/man-openshmem/man3/shmem_long_sum_to_all.3.rst new file mode 100644 index 00000000000..2566e14c960 --- /dev/null +++ b/docs/man-openshmem/man3/shmem_long_sum_to_all.3.rst @@ -0,0 +1,9 @@ +.. _shmem_long_sum_to_all: + +shmem_long_sum_to_all +===================== + .. include_body + +.. include:: ../man3/shmem_short_sum_to_all.3.rst + :start-after: .. include_body + diff --git a/docs/man-openshmem/man3/shmem_long_swap.3.rst b/docs/man-openshmem/man3/shmem_long_swap.3.rst new file mode 100644 index 00000000000..cb8c2560fc7 --- /dev/null +++ b/docs/man-openshmem/man3/shmem_long_swap.3.rst @@ -0,0 +1,9 @@ +.. _shmem_long_swap: + +shmem_long_swap +=============== + .. include_body + +.. include:: ../man3/shmem_swap.3.rst + :start-after: .. include_body + diff --git a/docs/man-openshmem/man3/shmem_long_wait.3.rst b/docs/man-openshmem/man3/shmem_long_wait.3.rst new file mode 100644 index 00000000000..2a6cde1c734 --- /dev/null +++ b/docs/man-openshmem/man3/shmem_long_wait.3.rst @@ -0,0 +1,9 @@ +.. _shmem_long_wait: + +shmem_long_wait +=============== + .. include_body + +.. include:: ../man3/shmem_wait.3.rst + :start-after: .. include_body + diff --git a/docs/man-openshmem/man3/shmem_long_wait_until.3.rst b/docs/man-openshmem/man3/shmem_long_wait_until.3.rst new file mode 100644 index 00000000000..68d9bcf8c73 --- /dev/null +++ b/docs/man-openshmem/man3/shmem_long_wait_until.3.rst @@ -0,0 +1,9 @@ +.. _shmem_long_wait_until: + +shmem_long_wait_until +===================== + .. include_body + +.. include:: ../man3/shmem_wait.3.rst + :start-after: .. include_body + diff --git a/docs/man-openshmem/man3/shmem_long_xor_to_all.3.rst b/docs/man-openshmem/man3/shmem_long_xor_to_all.3.rst new file mode 100644 index 00000000000..b6a6cd447b6 --- /dev/null +++ b/docs/man-openshmem/man3/shmem_long_xor_to_all.3.rst @@ -0,0 +1,9 @@ +.. _shmem_long_xor_to_all: + +shmem_long_xor_to_all +===================== + .. include_body + +.. include:: ../man3/shmem_short_xor_to_all.3.rst + :start-after: .. include_body + diff --git a/docs/man-openshmem/man3/shmem_longdouble_g.3.rst b/docs/man-openshmem/man3/shmem_longdouble_g.3.rst new file mode 100644 index 00000000000..176f756c310 --- /dev/null +++ b/docs/man-openshmem/man3/shmem_longdouble_g.3.rst @@ -0,0 +1,9 @@ +.. _shmem_longdouble_g: + +shmem_longdouble_g +================== + .. include_body + +.. include:: ../man3/shmem_char_g.3.rst + :start-after: .. include_body + diff --git a/docs/man-openshmem/man3/shmem_longdouble_get.3.rst b/docs/man-openshmem/man3/shmem_longdouble_get.3.rst new file mode 100644 index 00000000000..e5354a185c2 --- /dev/null +++ b/docs/man-openshmem/man3/shmem_longdouble_get.3.rst @@ -0,0 +1,9 @@ +.. _shmem_longdouble_get: + +shmem_longdouble_get +==================== + .. include_body + +.. include:: ../man3/shmem_char_get.3.rst + :start-after: .. include_body + diff --git a/docs/man-openshmem/man3/shmem_longdouble_get_nbi.3.rst b/docs/man-openshmem/man3/shmem_longdouble_get_nbi.3.rst new file mode 100644 index 00000000000..bade1cec504 --- /dev/null +++ b/docs/man-openshmem/man3/shmem_longdouble_get_nbi.3.rst @@ -0,0 +1,9 @@ +.. _shmem_longdouble_get_nbi: + +shmem_longdouble_get_nbi +======================== + .. include_body + +.. include:: ../man3/shmem_getmem_nbi.3.rst + :start-after: .. include_body + diff --git a/docs/man-openshmem/man3/shmem_longdouble_iget.3.rst b/docs/man-openshmem/man3/shmem_longdouble_iget.3.rst new file mode 100644 index 00000000000..e874a69f364 --- /dev/null +++ b/docs/man-openshmem/man3/shmem_longdouble_iget.3.rst @@ -0,0 +1,9 @@ +.. _shmem_longdouble_iget: + +shmem_longdouble_iget +===================== + .. include_body + +.. include:: ../man3/shmem_short_iget.3.rst + :start-after: .. include_body + diff --git a/docs/man-openshmem/man3/shmem_longdouble_iput.3.rst b/docs/man-openshmem/man3/shmem_longdouble_iput.3.rst new file mode 100644 index 00000000000..24809ce32fc --- /dev/null +++ b/docs/man-openshmem/man3/shmem_longdouble_iput.3.rst @@ -0,0 +1,9 @@ +.. _shmem_longdouble_iput: + +shmem_longdouble_iput +===================== + .. include_body + +.. include:: ../man3/shmem_short_iput.3.rst + :start-after: .. include_body + diff --git a/docs/man-openshmem/man3/shmem_longdouble_max_to_all.3.rst b/docs/man-openshmem/man3/shmem_longdouble_max_to_all.3.rst new file mode 100644 index 00000000000..9273cf3f52a --- /dev/null +++ b/docs/man-openshmem/man3/shmem_longdouble_max_to_all.3.rst @@ -0,0 +1,9 @@ +.. _shmem_longdouble_max_to_all: + +shmem_longdouble_max_to_all +=========================== + .. include_body + +.. include:: ../man3/shmem_short_max_to_all.3.rst + :start-after: .. include_body + diff --git a/docs/man-openshmem/man3/shmem_longdouble_min_to_all.3.rst b/docs/man-openshmem/man3/shmem_longdouble_min_to_all.3.rst new file mode 100644 index 00000000000..7a85cedc8e5 --- /dev/null +++ b/docs/man-openshmem/man3/shmem_longdouble_min_to_all.3.rst @@ -0,0 +1,9 @@ +.. _shmem_longdouble_min_to_all: + +shmem_longdouble_min_to_all +=========================== + .. include_body + +.. include:: ../man3/shmem_short_min_to_all.3.rst + :start-after: .. include_body + diff --git a/docs/man-openshmem/man3/shmem_longdouble_p.3.rst b/docs/man-openshmem/man3/shmem_longdouble_p.3.rst new file mode 100644 index 00000000000..946801c4142 --- /dev/null +++ b/docs/man-openshmem/man3/shmem_longdouble_p.3.rst @@ -0,0 +1,9 @@ +.. _shmem_longdouble_p: + +shmem_longdouble_p +================== + .. include_body + +.. include:: ../man3/shmem_char_p.3.rst + :start-after: .. include_body + diff --git a/docs/man-openshmem/man3/shmem_longdouble_prod_to_all.3.rst b/docs/man-openshmem/man3/shmem_longdouble_prod_to_all.3.rst new file mode 100644 index 00000000000..2d29f021f92 --- /dev/null +++ b/docs/man-openshmem/man3/shmem_longdouble_prod_to_all.3.rst @@ -0,0 +1,9 @@ +.. _shmem_longdouble_prod_to_all: + +shmem_longdouble_prod_to_all +============================ + .. include_body + +.. include:: ../man3/shmem_short_prod_to_all.3.rst + :start-after: .. include_body + diff --git a/docs/man-openshmem/man3/shmem_longdouble_put.3.rst b/docs/man-openshmem/man3/shmem_longdouble_put.3.rst new file mode 100644 index 00000000000..d4a29e9fe58 --- /dev/null +++ b/docs/man-openshmem/man3/shmem_longdouble_put.3.rst @@ -0,0 +1,9 @@ +.. _shmem_longdouble_put: + +shmem_longdouble_put +==================== + .. include_body + +.. include:: ../man3/shmem_char_put.3.rst + :start-after: .. include_body + diff --git a/docs/man-openshmem/man3/shmem_longdouble_put_nbi.3.rst b/docs/man-openshmem/man3/shmem_longdouble_put_nbi.3.rst new file mode 100644 index 00000000000..1be9ec6058c --- /dev/null +++ b/docs/man-openshmem/man3/shmem_longdouble_put_nbi.3.rst @@ -0,0 +1,9 @@ +.. _shmem_longdouble_put_nbi: + +shmem_longdouble_put_nbi +======================== + .. include_body + +.. include:: ../man3/shmem_putmem_nbi.3.rst + :start-after: .. include_body + diff --git a/docs/man-openshmem/man3/shmem_longdouble_sum_to_all.3.rst b/docs/man-openshmem/man3/shmem_longdouble_sum_to_all.3.rst new file mode 100644 index 00000000000..0339f3cbb67 --- /dev/null +++ b/docs/man-openshmem/man3/shmem_longdouble_sum_to_all.3.rst @@ -0,0 +1,9 @@ +.. _shmem_longdouble_sum_to_all: + +shmem_longdouble_sum_to_all +=========================== + .. include_body + +.. include:: ../man3/shmem_short_sum_to_all.3.rst + :start-after: .. include_body + diff --git a/docs/man-openshmem/man3/shmem_longlong_add.3.rst b/docs/man-openshmem/man3/shmem_longlong_add.3.rst new file mode 100644 index 00000000000..869f84b157f --- /dev/null +++ b/docs/man-openshmem/man3/shmem_longlong_add.3.rst @@ -0,0 +1,9 @@ +.. _shmem_longlong_add: + +shmem_longlong_add +================== + .. include_body + +.. include:: ../man3/shmem_int_add.3.rst + :start-after: .. include_body + diff --git a/docs/man-openshmem/man3/shmem_longlong_and_to_all.3.rst b/docs/man-openshmem/man3/shmem_longlong_and_to_all.3.rst new file mode 100644 index 00000000000..c678cf048a6 --- /dev/null +++ b/docs/man-openshmem/man3/shmem_longlong_and_to_all.3.rst @@ -0,0 +1,9 @@ +.. _shmem_longlong_and_to_all: + +shmem_longlong_and_to_all +========================= + .. include_body + +.. include:: ../man3/shmem_short_and_to_all.3.rst + :start-after: .. include_body + diff --git a/docs/man-openshmem/man3/shmem_longlong_cswap.3.rst b/docs/man-openshmem/man3/shmem_longlong_cswap.3.rst new file mode 100644 index 00000000000..926991f7fd1 --- /dev/null +++ b/docs/man-openshmem/man3/shmem_longlong_cswap.3.rst @@ -0,0 +1,9 @@ +.. _shmem_longlong_cswap: + +shmem_longlong_cswap +==================== + .. include_body + +.. include:: ../man3/shmem_int_cswap.3.rst + :start-after: .. include_body + diff --git a/docs/man-openshmem/man3/shmem_longlong_fadd.3.rst b/docs/man-openshmem/man3/shmem_longlong_fadd.3.rst new file mode 100644 index 00000000000..22687f47d18 --- /dev/null +++ b/docs/man-openshmem/man3/shmem_longlong_fadd.3.rst @@ -0,0 +1,9 @@ +.. _shmem_longlong_fadd: + +shmem_longlong_fadd +=================== + .. include_body + +.. include:: ../man3/shmem_int_fadd.3.rst + :start-after: .. include_body + diff --git a/docs/man-openshmem/man3/shmem_longlong_fetch.3.rst b/docs/man-openshmem/man3/shmem_longlong_fetch.3.rst new file mode 100644 index 00000000000..8270a6acd32 --- /dev/null +++ b/docs/man-openshmem/man3/shmem_longlong_fetch.3.rst @@ -0,0 +1,9 @@ +.. _shmem_longlong_fetch: + +shmem_longlong_fetch +==================== + .. include_body + +.. include:: ../man3/shmem_int_fetch.3.rst + :start-after: .. include_body + diff --git a/docs/man-openshmem/man3/shmem_longlong_finc.3.rst b/docs/man-openshmem/man3/shmem_longlong_finc.3.rst new file mode 100644 index 00000000000..6fab5df9620 --- /dev/null +++ b/docs/man-openshmem/man3/shmem_longlong_finc.3.rst @@ -0,0 +1,9 @@ +.. _shmem_longlong_finc: + +shmem_longlong_finc +=================== + .. include_body + +.. include:: ../man3/shmem_int_finc.3.rst + :start-after: .. include_body + diff --git a/docs/man-openshmem/man3/shmem_longlong_g.3.rst b/docs/man-openshmem/man3/shmem_longlong_g.3.rst new file mode 100644 index 00000000000..d1648134735 --- /dev/null +++ b/docs/man-openshmem/man3/shmem_longlong_g.3.rst @@ -0,0 +1,9 @@ +.. _shmem_longlong_g: + +shmem_longlong_g +================ + .. include_body + +.. include:: ../man3/shmem_char_g.3.rst + :start-after: .. include_body + diff --git a/docs/man-openshmem/man3/shmem_longlong_get.3.rst b/docs/man-openshmem/man3/shmem_longlong_get.3.rst new file mode 100644 index 00000000000..e05de39053e --- /dev/null +++ b/docs/man-openshmem/man3/shmem_longlong_get.3.rst @@ -0,0 +1,9 @@ +.. _shmem_longlong_get: + +shmem_longlong_get +================== + .. include_body + +.. include:: ../man3/shmem_char_get.3.rst + :start-after: .. include_body + diff --git a/docs/man-openshmem/man3/shmem_longlong_get_nbi.3.rst b/docs/man-openshmem/man3/shmem_longlong_get_nbi.3.rst new file mode 100644 index 00000000000..638e453fac0 --- /dev/null +++ b/docs/man-openshmem/man3/shmem_longlong_get_nbi.3.rst @@ -0,0 +1,9 @@ +.. _shmem_longlong_get_nbi: + +shmem_longlong_get_nbi +====================== + .. include_body + +.. include:: ../man3/shmem_getmem_nbi.3.rst + :start-after: .. include_body + diff --git a/docs/man-openshmem/man3/shmem_longlong_iget.3.rst b/docs/man-openshmem/man3/shmem_longlong_iget.3.rst new file mode 100644 index 00000000000..4798a6c6a3f --- /dev/null +++ b/docs/man-openshmem/man3/shmem_longlong_iget.3.rst @@ -0,0 +1,9 @@ +.. _shmem_longlong_iget: + +shmem_longlong_iget +=================== + .. include_body + +.. include:: ../man3/shmem_short_iget.3.rst + :start-after: .. include_body + diff --git a/docs/man-openshmem/man3/shmem_longlong_inc.3.rst b/docs/man-openshmem/man3/shmem_longlong_inc.3.rst new file mode 100644 index 00000000000..b1c918b1a3d --- /dev/null +++ b/docs/man-openshmem/man3/shmem_longlong_inc.3.rst @@ -0,0 +1,9 @@ +.. _shmem_longlong_inc: + +shmem_longlong_inc +================== + .. include_body + +.. include:: ../man3/shmem_int_inc.3.rst + :start-after: .. include_body + diff --git a/docs/man-openshmem/man3/shmem_longlong_iput.3.rst b/docs/man-openshmem/man3/shmem_longlong_iput.3.rst new file mode 100644 index 00000000000..99253c95b48 --- /dev/null +++ b/docs/man-openshmem/man3/shmem_longlong_iput.3.rst @@ -0,0 +1,9 @@ +.. _shmem_longlong_iput: + +shmem_longlong_iput +=================== + .. include_body + +.. include:: ../man3/shmem_short_iput.3.rst + :start-after: .. include_body + diff --git a/docs/man-openshmem/man3/shmem_longlong_max_to_all.3.rst b/docs/man-openshmem/man3/shmem_longlong_max_to_all.3.rst new file mode 100644 index 00000000000..c68fbba7fb2 --- /dev/null +++ b/docs/man-openshmem/man3/shmem_longlong_max_to_all.3.rst @@ -0,0 +1,9 @@ +.. _shmem_longlong_max_to_all: + +shmem_longlong_max_to_all +========================= + .. include_body + +.. include:: ../man3/shmem_short_max_to_all.3.rst + :start-after: .. include_body + diff --git a/docs/man-openshmem/man3/shmem_longlong_min_to_all.3.rst b/docs/man-openshmem/man3/shmem_longlong_min_to_all.3.rst new file mode 100644 index 00000000000..5ee6e711b24 --- /dev/null +++ b/docs/man-openshmem/man3/shmem_longlong_min_to_all.3.rst @@ -0,0 +1,9 @@ +.. _shmem_longlong_min_to_all: + +shmem_longlong_min_to_all +========================= + .. include_body + +.. include:: ../man3/shmem_short_min_to_all.3.rst + :start-after: .. include_body + diff --git a/docs/man-openshmem/man3/shmem_longlong_or_to_all.3.rst b/docs/man-openshmem/man3/shmem_longlong_or_to_all.3.rst new file mode 100644 index 00000000000..860fdf81d46 --- /dev/null +++ b/docs/man-openshmem/man3/shmem_longlong_or_to_all.3.rst @@ -0,0 +1,9 @@ +.. _shmem_longlong_or_to_all: + +shmem_longlong_or_to_all +======================== + .. include_body + +.. include:: ../man3/shmem_short_or_to_all.3.rst + :start-after: .. include_body + diff --git a/docs/man-openshmem/man3/shmem_longlong_p.3.rst b/docs/man-openshmem/man3/shmem_longlong_p.3.rst new file mode 100644 index 00000000000..d79553416ee --- /dev/null +++ b/docs/man-openshmem/man3/shmem_longlong_p.3.rst @@ -0,0 +1,9 @@ +.. _shmem_longlong_p: + +shmem_longlong_p +================ + .. include_body + +.. include:: ../man3/shmem_char_p.3.rst + :start-after: .. include_body + diff --git a/docs/man-openshmem/man3/shmem_longlong_prod_to_all.3.rst b/docs/man-openshmem/man3/shmem_longlong_prod_to_all.3.rst new file mode 100644 index 00000000000..f4222efa1bf --- /dev/null +++ b/docs/man-openshmem/man3/shmem_longlong_prod_to_all.3.rst @@ -0,0 +1,9 @@ +.. _shmem_longlong_prod_to_all: + +shmem_longlong_prod_to_all +========================== + .. include_body + +.. include:: ../man3/shmem_short_prod_to_all.3.rst + :start-after: .. include_body + diff --git a/docs/man-openshmem/man3/shmem_longlong_put.3.rst b/docs/man-openshmem/man3/shmem_longlong_put.3.rst new file mode 100644 index 00000000000..afdc1e56dbb --- /dev/null +++ b/docs/man-openshmem/man3/shmem_longlong_put.3.rst @@ -0,0 +1,9 @@ +.. _shmem_longlong_put: + +shmem_longlong_put +================== + .. include_body + +.. include:: ../man3/shmem_char_put.3.rst + :start-after: .. include_body + diff --git a/docs/man-openshmem/man3/shmem_longlong_put_nbi.3.rst b/docs/man-openshmem/man3/shmem_longlong_put_nbi.3.rst new file mode 100644 index 00000000000..2c85cb581ad --- /dev/null +++ b/docs/man-openshmem/man3/shmem_longlong_put_nbi.3.rst @@ -0,0 +1,9 @@ +.. _shmem_longlong_put_nbi: + +shmem_longlong_put_nbi +====================== + .. include_body + +.. include:: ../man3/shmem_putmem_nbi.3.rst + :start-after: .. include_body + diff --git a/docs/man-openshmem/man3/shmem_longlong_set.3.rst b/docs/man-openshmem/man3/shmem_longlong_set.3.rst new file mode 100644 index 00000000000..11ddf1d4587 --- /dev/null +++ b/docs/man-openshmem/man3/shmem_longlong_set.3.rst @@ -0,0 +1,9 @@ +.. _shmem_longlong_set: + +shmem_longlong_set +================== + .. include_body + +.. include:: ../man3/shmem_int_set.3.rst + :start-after: .. include_body + diff --git a/docs/man-openshmem/man3/shmem_longlong_sum_to_all.3.rst b/docs/man-openshmem/man3/shmem_longlong_sum_to_all.3.rst new file mode 100644 index 00000000000..403078606b2 --- /dev/null +++ b/docs/man-openshmem/man3/shmem_longlong_sum_to_all.3.rst @@ -0,0 +1,9 @@ +.. _shmem_longlong_sum_to_all: + +shmem_longlong_sum_to_all +========================= + .. include_body + +.. include:: ../man3/shmem_short_sum_to_all.3.rst + :start-after: .. include_body + diff --git a/docs/man-openshmem/man3/shmem_longlong_swap.3.rst b/docs/man-openshmem/man3/shmem_longlong_swap.3.rst new file mode 100644 index 00000000000..1d58e71529a --- /dev/null +++ b/docs/man-openshmem/man3/shmem_longlong_swap.3.rst @@ -0,0 +1,9 @@ +.. _shmem_longlong_swap: + +shmem_longlong_swap +=================== + .. include_body + +.. include:: ../man3/shmem_swap.3.rst + :start-after: .. include_body + diff --git a/docs/man-openshmem/man3/shmem_longlong_wait.3.rst b/docs/man-openshmem/man3/shmem_longlong_wait.3.rst new file mode 100644 index 00000000000..e45eec0ae07 --- /dev/null +++ b/docs/man-openshmem/man3/shmem_longlong_wait.3.rst @@ -0,0 +1,9 @@ +.. _shmem_longlong_wait: + +shmem_longlong_wait +=================== + .. include_body + +.. include:: ../man3/shmem_wait.3.rst + :start-after: .. include_body + diff --git a/docs/man-openshmem/man3/shmem_longlong_wait_until.3.rst b/docs/man-openshmem/man3/shmem_longlong_wait_until.3.rst new file mode 100644 index 00000000000..7ac706b130f --- /dev/null +++ b/docs/man-openshmem/man3/shmem_longlong_wait_until.3.rst @@ -0,0 +1,9 @@ +.. _shmem_longlong_wait_until: + +shmem_longlong_wait_until +========================= + .. include_body + +.. include:: ../man3/shmem_wait.3.rst + :start-after: .. include_body + diff --git a/docs/man-openshmem/man3/shmem_longlong_xor_to_all.3.rst b/docs/man-openshmem/man3/shmem_longlong_xor_to_all.3.rst new file mode 100644 index 00000000000..54c26abd9cb --- /dev/null +++ b/docs/man-openshmem/man3/shmem_longlong_xor_to_all.3.rst @@ -0,0 +1,9 @@ +.. _shmem_longlong_xor_to_all: + +shmem_longlong_xor_to_all +========================= + .. include_body + +.. include:: ../man3/shmem_short_xor_to_all.3.rst + :start-after: .. include_body + diff --git a/docs/man-openshmem/man3/shmem_malloc.3.rst b/docs/man-openshmem/man3/shmem_malloc.3.rst new file mode 100644 index 00000000000..1569bd73c69 --- /dev/null +++ b/docs/man-openshmem/man3/shmem_malloc.3.rst @@ -0,0 +1,106 @@ +.. _shmem_malloc: + + +shmem_malloc +============ + +.. include_body + +:ref:`shmem_malloc`\ (3), :ref:`shmem_free`\ (3), :ref:`shmem_align`\ (3), +:ref:`shmem_realloc`\ (3) *shmalloc*\ (3), *shfree*\ (3), *shmemalign*\ (3), +*shrealloc*\ (3) - Symmetric heap memory management functions. + + +SYNOPSIS +-------- + +C or C++: + +.. code-block:: c++ + + #include + + void *shmem_malloc(size_t size) + void *shmalloc(size_t size) + + void shmem_free(void *ptr) + void shfree(void *ptr) + + void *shmem_realloc(void *ptr, size_t size) + void *shrealloc(void *ptr, size_t size) + + void *shmem_align(size_t alignment, size_t size) + void *shmemalign(size_t alignment, size_t size) + + extern long malloc_error + + +DESCRIPTION +----------- + +The :ref:`shmem_malloc` function returns a pointer to a block of at least +size bytes suitably aligned for any use. This space is allocated from +the symmetric heap (in contrast to *malloc*\ (3), which allocates from +the private heap). + +The :ref:`shmem_align` function allocates a block in the symmetric heap +that has a byte alignment specified by the alignment argument. + +The :ref:`shmem_free` function causes the block to which ptr points to, to +be deallocated, that is, made available for further allocation. If ptr +is a null pointer, no action occurs; otherwise, if the argument does not +match a pointer earlier returned by a symmetric heap function, or if the +space has already been deallocated, malloc_error is set to indicate the +error, and shfree returns. + +The :ref:`shmem_realloc` function changes the size of the block to which +ptr points to, to the size (in bytes) specified by size. + +The contents of the block are unchanged up to the lesser of the new and +old sizes. If the new size is larger, the value of the newly allocated +portion of the block is indeterminate. If ptr is a null pointer, the +shrealloc function behaves like the shmalloc function for the specified +size. If size is 0 and ptr is not a null pointer, the block to which it +points to is freed. Otherwise, if ptr does not match a pointer earlier +returned by a symmetric heap function, or if the space has already been +deallocated, the malloc_error variable is set to indicate the error, and +shrealloc returns a null pointer. If the space cannot be allocated, the +block to which ptr points to is unchanged. + +The :ref:`shmem_malloc`, :ref:`shmem_free`, and :ref:`shmem_realloc` functions are provided +so that multiple PEs in an application can allocate symmetric, remotely +accessible memory blocks. These memory blocks can then be used with +(shmem) communication routines. Each of these functions call the +:ref:`shmem_barrier_all`\ (3) function before returning; this ensures that +all PEs participate in the memory allocation, and that the memory on +other PEs can be used as soon as the local PE returns. + +The user is responsible for calling these functions with identical +argument(s) on all PEs; if differing size arguments are used, subsequent +calls may not return the same symmetric heap address on all PEs. + + +NOTES +----- + +The total size of the symmetric heap is determined at job startup. One +can adjust the size of the heap using the SHMEM_SYMMETRIC_HEAP_SIZE +environment variable. See the *intro_shmem*\ (3) man page for futher +details. The :ref:`shmem_malloc`, :ref:`shmem_free`, and :ref:`shmem_realloc` functions +differ from the private heap allocation functions in that all PEs in an +application must call them (a barrier is used to ensure this). + + +RETURN VALUES +------------- + +The :ref:`shmem_malloc` function returns a pointer to the allocated space +(which should be identical on all PEs); otherwise, it returns a null +pointer (with malloc_error set). The :ref:`shmem_free` function returns no +value. The :ref:`shmem_realloc` function returns a pointer to the allocated +space (which may have moved); otherwise, it returns a null pointer (with +malloc_error set). + + +.. seealso:: + *intro_shmem*\ (3) *shmem_my_pe*\ (3) *shmem_init*\ (3) diff --git a/docs/man-openshmem/man3/shmem_my_pe.3.rst b/docs/man-openshmem/man3/shmem_my_pe.3.rst new file mode 100644 index 00000000000..4d8178aa8bc --- /dev/null +++ b/docs/man-openshmem/man3/shmem_my_pe.3.rst @@ -0,0 +1,44 @@ +.. _shmem_my_pe: + + +shmem_my_pe +=========== + +.. include_body + +:ref:`shmem_my_pe`, my_pe, \_my_pe - Returns the virtual PE number of the +calling PE. + + +SYNOPSIS +-------- + +C or C++: + +.. code-block:: c++ + + #include + + int shmem_my_pe (void) + int my_pe (void) + +Fortran: + +.. code-block:: fortran + + include 'mpp/shmem.fh' + I = SHMEM_MY_PE () + I = MY_PE () + + +DESCRIPTION +----------- + +my_pe() or shmem_my_pe() return the processing element (PE) number of +the calling PE. It accepts no arguments. The result is an integer +between 0 and npes - 1, where npes is the total number of PEs executing +the current program. + + +.. seealso:: + *intro_shmem*\ (3) *shmem_n_pes*\ (3) *shmem_init*\ (3) diff --git a/docs/man-openshmem/man3/shmem_n_pes.3.rst b/docs/man-openshmem/man3/shmem_n_pes.3.rst new file mode 100644 index 00000000000..68d6e1bb207 --- /dev/null +++ b/docs/man-openshmem/man3/shmem_n_pes.3.rst @@ -0,0 +1,42 @@ +.. _shmem_n_pes: + + +shmem_n_pes +=========== + +.. include_body + +:ref:`num_pes`, \_num_pes :ref:`shmem_n_pes` - Returns the number of processing +elements (PEs) used to run the application. + + +SYNOPSIS +-------- + +C or C++: + +.. code-block:: c++ + + #include + + int _num_pes (void) + int shmem_n_pes (void) + +Fortran: + +.. code-block:: fortran + + include 'mpp/shmem.fh' + I = NUM_PES () + I = SHMEM_N_PES () + + +DESCRIPTION +----------- + +num_pes() or shmem_n_pes() return the total number of PEs running in an +application. + + +.. seealso:: + *intro_shmem*\ (3) *shmem_my_pe*\ (3) *shmem_init*\ (3) diff --git a/docs/man-openshmem/man3/shmem_pe_accessible.3.rst b/docs/man-openshmem/man3/shmem_pe_accessible.3.rst new file mode 100644 index 00000000000..275ea4c2fb2 --- /dev/null +++ b/docs/man-openshmem/man3/shmem_pe_accessible.3.rst @@ -0,0 +1,57 @@ +.. _shmem_pe_accessible: + + +shmem_pe_accessible +=================== + +.. include_body + +:ref:`shmem_pe_accessible` - Determines whether a processing element (PE) is +accessible via SHMEM data transfer operations. + + +SYNOPSIS +-------- + +C: + +.. code-block:: c + + #include + + int shmem_pe_accessible(int pe) + +Fortran: + +.. code-block:: fortran + + INCLUDE "mpp/shmem.fh" + + LOGICAL LOG, SHMEM_PE_ACCESSIBLE + INTEGER pe + + LOG = SHMEM_PE_ACCESSIBLE(pe) + + +DESCRIPTION +----------- + +:ref:`shmem_pe_accessible` returns a value that indicates whether the calling +PE is able to perform OpenSHMEM communication operations with the remote +PE. + + +RETURN VALUES +------------- + +C/C++ + The return value is 1 if the specified PE is a valid remote PE for + SHMEM functions; otherwise,it is 0. + +Fortran + The return value is .TRUE. if the specified PE is a valid remote PE + for SHMEM functions; otherwise, it is .FALSE.. + + +.. seealso:: + *intro_shmem*\ (3) *shmem_addr_accessible*\ (3) diff --git a/docs/man-openshmem/man3/shmem_ptr.3.rst b/docs/man-openshmem/man3/shmem_ptr.3.rst new file mode 100644 index 00000000000..a86ebe137a6 --- /dev/null +++ b/docs/man-openshmem/man3/shmem_ptr.3.rst @@ -0,0 +1,137 @@ +.. _shmem_ptr: + + +shmem_ptr +========= + +.. include_body + +:ref:`shmem_ptr`\ (3) - Returns a pointer to a data object on a specified +processing element (PE). + + +SYNOPSIS +-------- + +C or C++: + +.. code-block:: c++ + + #include + + void *shmem_ptr(const void *target, int pe) + +Fortran: + +.. code-block:: fortran + + INCLUDE "mpp/shmem.fh" + + POINTER (PTR, POINTEE) + INTEGER pe + + PTR = SHMEM_PTR(target, pe) + + +DESCRIPTION +----------- + +The :ref:`shmem_ptr` routine returns an address that can be used to directly +reference **target** on the remote PE **pe**. With this address we can +perform ordinary loads and stores to the remote address. + +When a sequence of loads (gets) and stores (puts) to a data object on a +remote PE does not match the access pattern provided in a SHMEM data +transfer routine like :ref:`shmem_put32`\ (3) or shmem_real_iget\ (3), the +:ref:`shmem_ptr` function can provide an efficient means to accomplish the +communication. + +The arguments are as follows: + +target + The symmetric data object to be referenced. + +pe + An integer that indicates the PE number on which target is to be + accessed. If you are using Fortran, it must be a default integer + value. + + +EXAMPLES +-------- + +This Fortran program calls :ref:`shmem_ptr` and then PE 0 writes to the BIGD +array on PE 1: + +:: + + PROGRAM REMOTEWRITE + INCLUDE 'mpp/shmem.fh' + + INTEGER BIGD(100) + SAVE BIGD + INTEGER POINTEE(*) + + POINTER (PTR,POINTEE) + CALL START_PES(0) + IF (MY_PE() .EQ. 0) THEN + ! initialize PE 1's BIGD array + PTR = SHMEM_PTR(BIGD, 1) ! get address of PE 1's BIGD + ! array + DO I=1,100 + POINTEE(I) = I + ENDDO + ENDIF + CALL SHMEM_BARRIER_ALL + IF (MY_PE() .EQ. 1) THEN + PRINT *, 'BIGD on PE 1 is: ' + PRINT *, BIGD + ENDIF + END + +This is the equivalent program written in C: + +.. code-block:: c + + #include + + main() + { + static int bigd[100]; + int *ptr; + int i; + + shmem_init(); + if (shmem_my_pe() == 0) { + /* initialize PE 1's bigd array */ + ptr = shmem_ptr(bigd, 1); + for (i=0; i<100; i++) + *ptr++ = i+1; + } + shmem_barrier_all(); + if (shmem_my_pe() == 1) { + printf("bigd on PE 1 is:\n"); + for (i=0; i<100; i++) + printf(" %d\n",bigd[i]); + printf("\n"); + } + } + + +NOTES +----- + +The :ref:`shmem_ptr` function is available only on systems where ordinary +memory loads and stores are used to implement SHMEM put and get +operations. + + +RETURN VALUES +------------- + +:ref:`shmem_ptr` returns a pointer to the data object on the specified remote +PE. If target is not remotely accessible, a NULL pointer is returned. + + +.. seealso:: + *intro_shmem*\ (3) *shmem_put*\ (3) *shmem_get*\ (3) diff --git a/docs/man-openshmem/man3/shmem_put128.3.rst b/docs/man-openshmem/man3/shmem_put128.3.rst new file mode 100644 index 00000000000..6c28ea7ef36 --- /dev/null +++ b/docs/man-openshmem/man3/shmem_put128.3.rst @@ -0,0 +1,9 @@ +.. _shmem_put128: + +shmem_put128 +============ + .. include_body + +.. include:: ../man3/shmem_char_put.3.rst + :start-after: .. include_body + diff --git a/docs/man-openshmem/man3/shmem_put128_nbi.3.rst b/docs/man-openshmem/man3/shmem_put128_nbi.3.rst new file mode 100644 index 00000000000..97ac54f4e22 --- /dev/null +++ b/docs/man-openshmem/man3/shmem_put128_nbi.3.rst @@ -0,0 +1,9 @@ +.. _shmem_put128_nbi: + +shmem_put128_nbi +================ + .. include_body + +.. include:: ../man3/shmem_putmem_nbi.3.rst + :start-after: .. include_body + diff --git a/docs/man-openshmem/man3/shmem_put16_nbi.3.rst b/docs/man-openshmem/man3/shmem_put16_nbi.3.rst new file mode 100644 index 00000000000..b68ccde86e4 --- /dev/null +++ b/docs/man-openshmem/man3/shmem_put16_nbi.3.rst @@ -0,0 +1,9 @@ +.. _shmem_put16_nbi: + +shmem_put16_nbi +=============== + .. include_body + +.. include:: ../man3/shmem_putmem_nbi.3.rst + :start-after: .. include_body + diff --git a/docs/man-openshmem/man3/shmem_put32.3.rst b/docs/man-openshmem/man3/shmem_put32.3.rst new file mode 100644 index 00000000000..ee8fe5b4dd8 --- /dev/null +++ b/docs/man-openshmem/man3/shmem_put32.3.rst @@ -0,0 +1,9 @@ +.. _shmem_put32: + +shmem_put32 +=========== + .. include_body + +.. include:: ../man3/shmem_char_put.3.rst + :start-after: .. include_body + diff --git a/docs/man-openshmem/man3/shmem_put32_nbi.3.rst b/docs/man-openshmem/man3/shmem_put32_nbi.3.rst new file mode 100644 index 00000000000..1383b28e721 --- /dev/null +++ b/docs/man-openshmem/man3/shmem_put32_nbi.3.rst @@ -0,0 +1,9 @@ +.. _shmem_put32_nbi: + +shmem_put32_nbi +=============== + .. include_body + +.. include:: ../man3/shmem_putmem_nbi.3.rst + :start-after: .. include_body + diff --git a/docs/man-openshmem/man3/shmem_put64.3.rst b/docs/man-openshmem/man3/shmem_put64.3.rst new file mode 100644 index 00000000000..8d0685d0243 --- /dev/null +++ b/docs/man-openshmem/man3/shmem_put64.3.rst @@ -0,0 +1,9 @@ +.. _shmem_put64: + +shmem_put64 +=========== + .. include_body + +.. include:: ../man3/shmem_char_put.3.rst + :start-after: .. include_body + diff --git a/docs/man-openshmem/man3/shmem_put64_nbi.3.rst b/docs/man-openshmem/man3/shmem_put64_nbi.3.rst new file mode 100644 index 00000000000..b6e1b706865 --- /dev/null +++ b/docs/man-openshmem/man3/shmem_put64_nbi.3.rst @@ -0,0 +1,9 @@ +.. _shmem_put64_nbi: + +shmem_put64_nbi +=============== + .. include_body + +.. include:: ../man3/shmem_putmem_nbi.3.rst + :start-after: .. include_body + diff --git a/docs/man-openshmem/man3/shmem_put8_nbi.3.rst b/docs/man-openshmem/man3/shmem_put8_nbi.3.rst new file mode 100644 index 00000000000..3352b918386 --- /dev/null +++ b/docs/man-openshmem/man3/shmem_put8_nbi.3.rst @@ -0,0 +1,9 @@ +.. _shmem_put8_nbi: + +shmem_put8_nbi +============== + .. include_body + +.. include:: ../man3/shmem_putmem_nbi.3.rst + :start-after: .. include_body + diff --git a/docs/man-openshmem/man3/shmem_putmem.3.rst b/docs/man-openshmem/man3/shmem_putmem.3.rst new file mode 100644 index 00000000000..2f03ce9775b --- /dev/null +++ b/docs/man-openshmem/man3/shmem_putmem.3.rst @@ -0,0 +1,9 @@ +.. _shmem_putmem: + +shmem_putmem +============ + .. include_body + +.. include:: ../man3/shmem_char_put.3.rst + :start-after: .. include_body + diff --git a/docs/man-openshmem/man3/shmem_putmem_nbi.3.rst b/docs/man-openshmem/man3/shmem_putmem_nbi.3.rst new file mode 100644 index 00000000000..70913f1e60a --- /dev/null +++ b/docs/man-openshmem/man3/shmem_putmem_nbi.3.rst @@ -0,0 +1,174 @@ +.. _shmem_putmem_nbi: + + +shmem_putmem_nbi +================ + +.. include_body + +:ref:`shmem_putmem_nbi`\ (3), :ref:`shmem_char_put_nbi`\ (3), +:ref:`shmem_short_put_nbi`\ (3), :ref:`shmem_int_put_nbi`\ (3), +:ref:`shmem_long_put_nbi`\ (3), :ref:`shmem_longlong_put_nbi`\ (3), +:ref:`shmem_float_put_nbi`\ (3), :ref:`shmem_double_put_nbi`\ (3), +:ref:`shmem_longdouble_put_nbi`\ (3), :ref:`shmem_put8_nbi`\ (3), +:ref:`shmem_put16_nbi`\ (3), :ref:`shmem_put32_nbi`\ (3), :ref:`shmem_put64_nbi`\ (3), +:ref:`shmem_put128_nbi`\ (3), - The nonblocking put routines provide a method +for copying data from a contiguous local data object to a data object on +a specified PE. + + +SYNOPSIS +-------- + +C or C++: + +.. code-block:: c++ + + #include + + void shmem_putmem_nbi(void *dest, const void *source, + size_t nelems, int pe) + + void shmem_char_put(char *dest, const char *source, + size_t nelems, int pe) + + void shmem_short_put(short *dest, const short *source, + size_t nelems, int pe) + + void shmem_int_put(int *dest, const int *source, + size_t nelems, int pe) + + void shmem_long_put(long *dest, const long *source, + size_t nelems, int pe) + + void shmem_longlong_put(long long *dest, const long long *source, + size_t nelems, int pe) + + void shmem_float_put(float *dest, const float *source, + size_t nelems, int pe) + + void shmem_double_put(double *dest, const double *source, + size_t nelems, int pe) + + void shmem_longdouble_put(long double *dest, const long double *source, + size_t nelems, int pe) + + void shmem_put8(void *dest, const void *source, + size_t nelems, int pe) + + void shmem_put16(void *dest, const void *source, + size_t nelems, int pe) + + void shmem_put32(void *dest, const void *source, + size_t nelems, int pe) + + void shmem_put64(void *dest, const void *source, + size_t nelems, int pe) + + void shmem_put128(void *dest, const void *source, + size_t nelems, int pe) + +Fortran: + +.. code-block:: fortran + + INCLUDE "mpp/shmem.fh" + + INTEGER nelems, pe + + CALL SHMEM_PUTMEM_NBI(dest, source, nelems, pe) + + CALL SHMEM_CHARACTER_PUT_NBI(dest, source, nelems, pe) + + CALL SHMEM_COMPLEX_PUT_NBI(dest, source, nelems, pe) + + CALL SHMEM_DOUBLE_PUT_NBI(dest, source, nelems, pe) + + CALL SHMEM_INTEGER_PUT_NBI(dest, source, nelems, pe) + + CALL SHMEM_LOGICAL_PUT_NBI(dest, source, nelems, pe) + + CALL SHMEM_REAL_PUT_NBI(dest, source, nelems, pe) + + CALL SHMEM_PUT4_NBI(dest, source, nelems, pe) + + CALL SHMEM_PUT8_NBI(dest, source, nelems, pe) + + CALL SHMEM_PUT32_NBI(dest, source, nelems, pe) + + CALL SHMEM_PUT64_NBI(dest, source, nelems, pe) + + CALL SHMEM_PUT128_NBI(dest, source, nelems, pe) + + +DESCRIPTION +----------- + +The routines return after posting the operation. The operation is +considered complete after a subsequent call to :ref:`shmem_quiet`. At the +completion of :ref:`shmem_quiet`, the data has been copied into the dest array +on the destination PE. The delivery of data words into the data object +on the destination PE may occur in any order. Furthermore, two +successive put routines may deliver data out of order unless a call to +:ref:`shmem_fence` is introduced between the two calls. + +The arguments are as follows: + +dest + Data object to be updated on the remote PE. This data object must be + remotely accessible. + +source + Data object containing the data to be copied. + +nelems + Number of elements in the dest and source arrays. nelems must be of + type size_t for C. If you are using Fortran, it must be a constant, + variable, or array element of default integer type. + +pe + PE number of the remote PE. pe must be of type integer. If you are + using Fortran, it must be a constant, variable, or array element of + default integer type. + +If you are using Fortran, data types must be of default size. For +example, a real variable must be declared as REAL, REAL*4, or +REAL(KIND=4). + + +NOTES +----- + +See *intro_shmem*\ (3) for a definition of the term remotely accessible. + + +EXAMPLES +-------- + +Consider this simple example for C. + +.. code-block:: c + + #include + #include + + main() + { + long source[10] = { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 }; + static long target[10]; + shmem_init(); + + if (shmem_my_pe() == 0) { + /* put 10 words into target on PE 1 */ + shmem_long_put_nbi(target, source, 10, 1); + shmem_quiet(); + } + shmem_barrier_all(); /* sync sender and receiver */ + if (shmem_my_pe() == 1) + shmem_udcflush(); /* not required on Altix systems */ + printf("target[0] on PE %d is %d\n", shmem_my_pe(), target[0]); + } + + +.. seealso:: + *intro_shmem*\ (3) *shmem_quiet*\ (3) diff --git a/docs/man-openshmem/man3/shmem_quiet.3.rst b/docs/man-openshmem/man3/shmem_quiet.3.rst new file mode 100644 index 00000000000..6bbfa4e6cb0 --- /dev/null +++ b/docs/man-openshmem/man3/shmem_quiet.3.rst @@ -0,0 +1,86 @@ +.. _shmem_quiet: + + +shmem_quiet +=========== + +.. include_body + +:ref:`shmem_quiet`\ (3) - Waits for completion of all outstanding remote +writes issued by a processing element (PE). + + +SYNOPSIS +-------- + +C or C++: + +.. code-block:: c++ + + #include + + void shmem_quiet(void) + +Fortran: + +.. code-block:: fortran + + CALL SHMEM_QUIET + + +DESCRIPTION +----------- + +:ref:`shmem_quiet` ensures ordering of put (remote write) operations. All put +operations issued to any processing element (PE) prior to the call to +:ref:`shmem_quiet` are guaranteed to be visible to all other PEs no later than +any subsequent memory load or store, remote put or get, or +synchronization operations that follow the call to :ref:`shmem_quiet`. + + +NOTES +----- + +| :ref:`shmem_quiet` is most useful as a way of ensuring ordering of delivery + of several put operations. For example, you might use :ref:`shmem_quiet` to + await delivery of a block of data before issuing another put, which + sets a completion flag on another PE. +| :ref:`shmem_quiet` is not usually needed if :ref:`shmem_barrier_all`\ (3) or + :ref:`shmem_barrier`\ (3) are called. The barrier routines all wait for the + completion of outstanding remote writes (puts). + + +EXAMPLES +-------- + +:: + + PROGRAM COMPFLAG + INCLUDE "mpp/shmem.fh" + + INTEGER FLAG_VAR, ARRAY(100), RECEIVER, SENDER + COMMON/FLAG/FLAG_VAR + COMMON/DATA/ARRAY + INTRINSIC MY_PE + + FLAG_VAR = 0 + CALL SHMEM_BARRIER_ALL ! wait for FLAG_VAR to be initialized + SENDER = 0 ! PE 0 sends the data + RECEIVER = 1 ! PE 1 receives the data + + IF (MY_PE() .EQ. 0) THEN + ARRAY = 33 + CALL SHMEM_PUT(ARRAY, ARRAY, 100, RECEIVER) ! start sending data + CALL SHMEM_QUIET ! wait for delivery + CALL SHMEM_PUT(FLAG_VAR, 1, 1, RECEIVER) ! send completion flag + ELSE IF (MY_PE() .EQ. RECEIVER) THEN + CALL SHMEM_UDCFLUSH + CALL SHMEM_WAIT(FLAG_VAR, 0) + PRINT *,ARRAY ! ARRAY has been delivered + ENDIF + END + + +.. seealso:: + *intro_shmem*\ (3) :ref:`shmem_barrier`\ (3) :ref:`shmem_barrier_all`\ (3) + *shmem_fence*\ (3) *shmem_put*\ (3) *shmem_wait*\ (3) diff --git a/docs/man-openshmem/man3/shmem_realloc.3.rst b/docs/man-openshmem/man3/shmem_realloc.3.rst new file mode 100644 index 00000000000..cba6e6ec2db --- /dev/null +++ b/docs/man-openshmem/man3/shmem_realloc.3.rst @@ -0,0 +1,9 @@ +.. _shmem_realloc: + +shmem_realloc +============= + .. include_body + +.. include:: ../man3/shmem_malloc.3.rst + :start-after: .. include_body + diff --git a/docs/man-openshmem/man3/shmem_set_cache_inv.3.rst b/docs/man-openshmem/man3/shmem_set_cache_inv.3.rst new file mode 100644 index 00000000000..a2a90f6c3cc --- /dev/null +++ b/docs/man-openshmem/man3/shmem_set_cache_inv.3.rst @@ -0,0 +1,9 @@ +.. _shmem_set_cache_inv: + +shmem_set_cache_inv +=================== + .. include_body + +.. include:: ../man3/shmem_udcflush.3.rst + :start-after: .. include_body + diff --git a/docs/man-openshmem/man3/shmem_set_cache_line_inv.3.rst b/docs/man-openshmem/man3/shmem_set_cache_line_inv.3.rst new file mode 100644 index 00000000000..8012164b57a --- /dev/null +++ b/docs/man-openshmem/man3/shmem_set_cache_line_inv.3.rst @@ -0,0 +1,9 @@ +.. _shmem_set_cache_line_inv: + +shmem_set_cache_line_inv +======================== + .. include_body + +.. include:: ../man3/shmem_udcflush.3.rst + :start-after: .. include_body + diff --git a/docs/man-openshmem/man3/shmem_set_lock.3.rst b/docs/man-openshmem/man3/shmem_set_lock.3.rst new file mode 100644 index 00000000000..4105cb321be --- /dev/null +++ b/docs/man-openshmem/man3/shmem_set_lock.3.rst @@ -0,0 +1,85 @@ +.. _shmem_set_lock: + + +shmem_set_lock +============== + +.. include_body + +:ref:`shmem_set_lock`\ (3), :ref:`shmem_clear_lock`\ (3), :ref:`shmem_test_lock`\ (3) - +Releases, locks, and tests a mutual exclusion memory lock. + + +SYNOPSIS +-------- + +C or C++: + +.. code-block:: c++ + + #include + + void shmem_clear_lock(volatile long *lock) + + void shmem_set_lock(volatile long *lock) + + int shmem_test_lock(volatile long *lock) + +Fortran: + +.. code-block:: fortran + + INCLUDE "mpp/shmem.fh" + + INTEGER lock, SHMEM_TEST_LOCK + + CALL SHMEM_CLEAR_LOCK(lock) + + CALL SHMEM_SET_LOCK(lock) + + I = SHMEM_TEST_LOCK(lock) + + +DESCRIPTION +----------- + +The :ref:`shmem_set_lock` routine sets a mutual exclusion lock after waiting +for the lock to be freed by any other PE currently holding the lock. +Waiting PEs are assured of getting the lock in a first-come, +first-served manner. + +The :ref:`shmem_clear_lock` routine releases a lock previously set by +:ref:`shmem_set_lock` after ensuring that all local and remote stores initiated +in the critical region are complete. + +The :ref:`shmem_test_lock` function sets a mutual exclusion lock only if it is +currently cleared. By using this function, a PE can avoid blocking on a +set lock. If the lock is currently set, the function returns without +waiting. These routines are appropriate for protecting a critical region +from simultaneous update by multiple PEs. They accept the following +arguments: + +lock + A symmetric data object that is a scalar variable or an array of + length 1. This data object must be set to 0 on all processing + elements (PEs) prior to the first use. lock must be of type integer. + If you are using Fortran, it must be of default kind. + + +NOTES +----- + +The term symmetric data object is defined on *intro_shmem*\ (3). + + +RETURN VALUES +------------- + +The :ref:`shmem_test_lock` function returns 0 if the lock was originally +cleared and this call was able to set the lock. A value of 1 is returned +if the lock had been set and the call returned without waiting to set +the lock. + + +.. seealso:: + *intro_shmem*\ (3) diff --git a/docs/man-openshmem/man3/shmem_short_and_to_all.3.rst b/docs/man-openshmem/man3/shmem_short_and_to_all.3.rst new file mode 100644 index 00000000000..7f4b598b659 --- /dev/null +++ b/docs/man-openshmem/man3/shmem_short_and_to_all.3.rst @@ -0,0 +1,219 @@ +.. _shmem_short_and_to_all: + + +shmem_short_and_to_all +====================== + +.. include_body + +:ref:`shmem_int_and_to_all`\ (3), shmem_int4_and_to_all\ (3), +shmem_int8_and_to_all\ (3), :ref:`shmem_long_and_to_all`\ (3), +:ref:`shmem_longlong_and_to_all`\ (3), :ref:`shmem_short_and_to_all`\ (3) - +Performs a bitwise AND operation on symmetric arrays over the active set +of PEs. + + +SYNOPSIS +-------- + +C or C++: + +.. code-block:: c++ + + #include + + void shmem_int_and_to_all(int *target, const int *source, + int nreduce, int PE_start, int logPE_stride, int PE_size, + int *pWrk, long *pSync) + + void shmem_long_and_to_all(long *target, const long *source, + int nreduce, int PE_start, int logPE_stride, int PE_size, + long *pWrk, long *pSync) + + void shmem_longlong_and_to_all(long long *target, + const long long *source, int nreduce, int PE_start, int logPE_stride, + int PE_size, long long *pWrk, long *pSync) + + void shmem_short_and_to_all(short *target, const short *source, + int nreduce, int PE_start, int logPE_stride, int PE_size, + short *pWrk, long *pSync) + +Fortran: + +.. code-block:: fortran + + INCLUDE "mpp/shmem.fh" + + INTEGER pSync(SHMEM_REDUCE_SYNC_SIZE) + INTEGER nreduce, PE_start, logPE_stride, PE_size + + CALL SHMEM_INT4_AND_TO_ALL(target, source, nreduce, + & PE_start, logPE_stride, PE_size, pWrk, pSync) + + CALL SHMEM_INT8_AND_TO_ALL(target, source, nreduce, + & PE_start, logPE_stride, PE_size, pWrk, pSync) + + +DESCRIPTION +----------- + +The shared memory (SHMEM) reduction routines compute one or more +reductions across symmetric arrays on multiple virtual PEs. A reduction +performs an associative binary operation across a set of values. For a +list of other SHMEM reduction routines, see *intro_shmem*\ (3). + +The nreduce argument determines the number of separate reductions to +perform. The source array on all PEs in the active set provides one +element for each reduction. The results of the reductions are placed in +the target array on all PEs in the active set. The active set is defined +by the PE_start, logPE_stride, PE_size triplet. + +The source and target arrays may be the same array, but they may not be +overlapping arrays. As with all SHMEM collective routines, each of these +routines assumes that only PEs in the active set call the routine. If a +PE not in the active set calls a SHMEM collective routine, undefined +behavior results. + +The arguments are as follows: + +target + A symmetric array, of length nreduce elements, to receive the result + of the reduction operations. The data type of target varies with the + version of the reduction routine being called. When calling from + C/C++, refer to the SYNOPSIS section for data type information. When + calling from Fortran, the target data types are as follows: + + shmem_int8_and_to_all: Integer, with an element size of 8 bytes + + shmem_int4_and_to_all: Integer, with an element size of 4 bytes + +source + A symmetric array, of length nreduce elements, that contains one + element for each separate reduction operation. The source argument + must have the same data type as target. + +nreduce + The number of elements in the target and source arrays. nreduce must + be of type integer. If you are using Fortran, it must be a default + integer value. + +PE_start + The lowest virtual PE number of the active set of PEs. PE_start must + be of type integer. If you are using Fortran, it must be a default + integer value. + +logPE_stride + The log (base 2) of the stride between consecutive virtual PE numbers + in the active set. logPE_stride must be of type integer. If you are + using Fortran, it must be a default integer value. + +PE_size + The number of PEs in the active set. PE_size must be of type integer. + If you are using Fortran, it must be a default integer value. + +pWrk + A symmetric work array. The pWrk argument must have the same data + type as target. In C/C++, this contains max(nreduce/2 + 1, + \_SHMEM_REDUCE_MIN_WRKDATA_SIZE) elements. In Fortran, this contains + max(nreduce/2 + 1, SHMEM_REDUCE_MIN_WRKDATA_SIZE) elements. + +pSync + A symmetric work array. In C/C++, pSync must be of type long and size + \_SHMEM_REDUCE_SYNC_SIZE. In Fortran, pSync must be of type integer + and size SHMEM_REDUCE_SYNC_SIZE. If you are using Fortran, it must be + a default integer value. Every element of this array must be + initialized with the value \_SHMEM_SYNC_VALUE (in C/C++) or + SHMEM_SYNC_VALUE (in Fortran) before any of the PEs in the active set + enter the reduction routine. + +The values of arguments nreduce, PE_start, logPE_stride, and PE_size +must be equal on all PEs in the active set. The same target and source +arrays, and the same pWrk and pSync work arrays, must be passed to all +PEs in the active set. + +Before any PE calls a reduction routine, you must ensure that the +following conditions exist (synchronization via a barrier or some other +method is often needed to ensure this): The pWrk and pSync arrays on all +PEs in the active set are not still in use from a prior call to a +collective SHMEM routine. The target array on all PEs in the active set +is ready to accept the results of the reduction. + +Upon return from a reduction routine, the following are true for the +local PE: The target array is updated. The values in the pSync array are +restored to the original values. + + +NOTES +----- + +The terms collective, symmetric, and cache aligned are defined in +*intro_shmem*\ (3). All SHMEM reduction routines reset the values in +pSync before they return, so a particular pSync buffer need only be +initialized the first time it is used. + +You must ensure that the pSync array is not being updated on any PE in +the active set while any of the PEs participate in processing of a SHMEM +reduction routine. Be careful to avoid the following situations: If the +pSync array is initialized at run time, some type of synchronization is +needed to ensure that all PEs in the working set have initialized pSync +before any of them enter a SHMEM routine called with the pSync +synchronization array. A pSync or pWrk array can be reused in a +subsequent reduction routine call only if none of the PEs in the active +set are still processing a prior reduction routine call that used the +same pSync or pWrk arrays. In general, this can be assured only by doing +some type of synchronization. However, in the special case of reduction +routines being called with the same active set, you can allocate two +pSync and pWrk arrays and alternate between them on successive calls. + + +EXAMPLES +-------- + +**Example 1**: This Fortran example statically initializes the pSync +array and finds the logical AND of the integer variable FOO across all +even PEs. + +:: + + INCLUDE "mpp/shmem.fh" + + INTEGER PSYNC(SHMEM_REDUCE_SYNC_SIZE) + DATA PSYNC /SHMEM_REDUCE_SYNC_SIZE*SHMEM_SYNC_VALUE/ + PARAMETER (NR=1) + REAL PWRK(MAX(NR/2+1, SHMEM_REDUCE_MIN_WRKDATA_SIZE)) + INTEGER FOO, FOOAND + COMMON /COM/ FOO, FOOAND, PWRK + INTRINSIC MY_PE + + IF ( MOD(MY_PE(),2) .EQ. 0) THEN + CALL SHMEM_INT8_AND_TO_ALL(FOOAND, FOO, NR, 0, 1, N$PES/2, + & PWRK, PSYNC) + PRINT *, 'Result on PE ', MY_PE(), ' is ', FOOAND + ENDIF + +**Example 2**: Consider the following C call: + +.. code-block:: c + + shmem_int_and_to_all( target, source, 3, 0, 0, 8, pwrk, psync ); + +The preceding call is more efficient, but semantically equivalent to, +the combination of the following calls: + +:: + + shmem_int_and_to_all(&(target[0]), &(source[0]), 1, 0, 0, 8, + pwrk1, psync1); + + shmem_int_and_to_all(&(target[1]), &(source[1]), 1, 0, 0, 8, + pwrk2, psync2); + + shmem_int_and_to_all(&(target[2]), &(source[2]), 1, 0, 0, 8, + pwrk1, psync1); + +Note that two sets of pWrk and pSync arrays are used alternately because +no synchronization is done between calls. + + +.. seealso:: + *f90*\ (1) *intro_shmem*\ (3) diff --git a/docs/man-openshmem/man3/shmem_short_g.3.rst b/docs/man-openshmem/man3/shmem_short_g.3.rst new file mode 100644 index 00000000000..bd34bdc17f9 --- /dev/null +++ b/docs/man-openshmem/man3/shmem_short_g.3.rst @@ -0,0 +1,9 @@ +.. _shmem_short_g: + +shmem_short_g +============= + .. include_body + +.. include:: ../man3/shmem_char_g.3.rst + :start-after: .. include_body + diff --git a/docs/man-openshmem/man3/shmem_short_get.3.rst b/docs/man-openshmem/man3/shmem_short_get.3.rst new file mode 100644 index 00000000000..c291ca5e075 --- /dev/null +++ b/docs/man-openshmem/man3/shmem_short_get.3.rst @@ -0,0 +1,9 @@ +.. _shmem_short_get: + +shmem_short_get +=============== + .. include_body + +.. include:: ../man3/shmem_char_get.3.rst + :start-after: .. include_body + diff --git a/docs/man-openshmem/man3/shmem_short_get_nbi.3.rst b/docs/man-openshmem/man3/shmem_short_get_nbi.3.rst new file mode 100644 index 00000000000..47fd781597f --- /dev/null +++ b/docs/man-openshmem/man3/shmem_short_get_nbi.3.rst @@ -0,0 +1,9 @@ +.. _shmem_short_get_nbi: + +shmem_short_get_nbi +=================== + .. include_body + +.. include:: ../man3/shmem_getmem_nbi.3.rst + :start-after: .. include_body + diff --git a/docs/man-openshmem/man3/shmem_short_iget.3.rst b/docs/man-openshmem/man3/shmem_short_iget.3.rst new file mode 100644 index 00000000000..e77dfd5cb88 --- /dev/null +++ b/docs/man-openshmem/man3/shmem_short_iget.3.rst @@ -0,0 +1,213 @@ +.. _shmem_short_iget: + + +shmem_short_iget +================ + +.. include_body + +shmem_complex_iget\ (3), :ref:`shmem_double_iget`\ (3), +:ref:`shmem_float_iget`\ (3), shmem_iget4\ (3), shmem_iget8\ (3), +:ref:`shmem_iget32`\ (3), :ref:`shmem_iget64`\ (3), :ref:`shmem_iget128`\ (3), +:ref:`shmem_int_iget`\ (3), shmem_integer_iget\ (3), +shmem_logical_iget\ (3), :ref:`shmem_long_iget`\ (3), +:ref:`shmem_longdouble_iget`\ (3), :ref:`shmem_longlong_iget`\ (3), +shmem_real_iget\ (3), :ref:`shmem_short_iget`\ (3) - Transfers strided data +from a specified processing element (PE) + + +SYNOPSIS +-------- + +C or C++: + +.. code-block:: c++ + + #include + + void shmem_iget32(void *target, const void *source, + ptrdiff_t tst, ptrdiff_t sst, size_t len, int pe) + + void shmem_iget64(void *target, const void *source, + ptrdiff_t tst, ptrdiff_t sst, size_t len, int pe) + + void shmem_iget128(void *target, const void *source, + ptrdiff_t tst, ptrdiff_t sst, size_t len, int pe) + + void shmem_int_iget(int *target, const int *source, + ptrdiff_t tst, ptrdiff_t sst, size_t len, int pe) + + void shmem_double_iget(double *target, const double *source, + ptrdiff_t tst, ptrdiff_t sst, size_t len, int pe) + + void shmem_float_iget(float *target, const float *source, + ptrdiff_t tst, ptrdiff_t sst, size_t len, int pe) + + void shmem_long_iget(long *target, const long *source, + ptrdiff_t tst, ptrdiff_t sst, size_t len, int pe) + + void shmem_longdouble_iget(long double *target, + const long double *source, ptrdiff_t tst, ptrdiff_t sst,size_t len, int pe) + + void shmem_longlong_iget(long long *target, + const long long *source, ptrdiff_t tst, ptrdiff_t sst, size_t len, int pe) + + void shmem_short_iget(short *target, + const short *source, ptrdiff_t tst, ptrdiff_t sst, size_t len, int pe) + +Fortran: + +.. code-block:: fortran + + INCLUDE "mpp/shmem.fh" + + INTEGER tst, sst, len, pe + + CALL SHMEM_COMPLEX_IGET(target, source, tst, sst, len, + & pe) + + CALL SHMEM_DOUBLE_IGET(target, source, tst, sst, len, + & pe) + + CALL SHMEM_IGET4(target, source, tst, sst, len, pe) + + CALL SHMEM_IGET8(target, source, tst, sst, len, pe) + + CALL SHMEM_IGET32(target, source, tst, sst, len, pe) + + CALL SHMEM_IGET64(target, source, tst, sst, len, pe) + + CALL SHMEM_IGET128(target, source, tst, sst, len, pe) + + CALL SHMEM_INTEGER_IGET(target, source, tst, sst, len, + & pe) + + CALL SHMEM_LOGICAL_IGET(target, source, tst, sst, len, + & pe) + + CALL SHMEM_REAL_IGET(target, source, tst, sst, len, pe) + + +DESCRIPTION +----------- + +The strided get routines retrieve array data available at address source +on remote PE (pe). The elements of the **source** array are separated by +a stride **sst**. Once the data is received, it is stored at the local +memory address **target**, separated by stride **tst**. The routines +return when the data has been copied into the local **target** array. + +The arguments are as follows: + +target + Array to be updated on the local PE. + +source + Array containing the data to be copied on the remote PE. + +tst + The stride between consecutive elements of the target array. The + stride is scaled by the element size of the target array. A value of + 1 indicates contiguous data. tst must be of type integer. If you are + calling from Fortran, it must be a default integer value. + +sst + The stride between consecutive elements of the source array. The + stride is scaled by the element size of the source array. A value of + 1 indicates contiguous data. sst must be of type integer. If you are + calling from Fortran, it must be a default integer value. + +len + Number of elements in the target and source arrays. len must be of + type integer. If you are using Fortran, it must be a constant, + variable, or array element of default integer type. + +pe + PE number of the remote PE. pe must be of type integer. If you are + using Fortran, it must be a constant, variable, or array element of + default integer type. + +The target and source data objects must conform to typing constraints, +which are as follows: + +:ref:`shmem_iget32`, shmem_iget4: Any noncharacter type that has a storage size + equal to 32 bits. + +:ref:`shmem_iget64`, shmem_iget8: Any noncharacter type that has a storage size + equal to 64 bits. + +:ref:`shmem_iget128`: Any noncharacter type that has a storage size equal to + 128 bits. + +:ref:`shmem_short_iget`: Elements of type short. + +:ref:`shmem_int_iget`: Elements of type int. + +:ref:`shmem_long_iget`: Elements of type long. + +:ref:`shmem_longlong_iget`: Elements of type long long. + +:ref:`shmem_float_iget`: Elements of type float. + +:ref:`shmem_double_iget`: Elements of type double. + +:ref:`shmem_longdouble_iget`: Elements of type long double. + +**SHMEM_COMPLEX_IGET**: Elements of type complex of default size. + +**SHMEM_DOUBLE_IGET**: (Fortran) Elements of type double precision. + +**SHMEM_INTEGER_IGET**: Elements of type integer. + +**SHMEM_LOGICAL_IGET**: Elements of type logical. + +**SHMEM_REAL_IGET**: Elements of type real. + +:ref:`shmem_longdouble_iget`: Elements of type long double. + +**SHMEM_COMPLEX_IGET**: Elements of type complex of default size. + +**SHMEM_DOUBLE_IGET**: (Fortran) Elements of type double precision. + +**SHMEM_INTEGER_IGET**: Elements of type integer. + +**SHMEM_LOGICAL_IGET**: Elements of type logical. + +**SHMEM_REAL_IGET**: Elements of type real. + +If you are using Fortran, data types must be of default size. For +example, a real variable must be declared as REAL, REAL*4, or +REAL(KIND=4). + + +NOTES +----- + +See *intro_shmem*\ (3) for a definition of the term remotely accessible. + + +EXAMPLES +-------- + +The following simple example uses shmem_logical_iget in a Fortran +program. Compile this example with the -lsma compiler option. + +:: + + PROGRAM STRIDELOGICAL + LOGICAL SOURCE(10), TARGET(5) + SAVE SOURCE ! SAVE MAKES IT REMOTELY ACCESSIBLE + DATA SOURCE /.T.,.F.,.T.,.F.,.T.,.F.,.T.,.F.,.T.,.F./ + DATA TARGET / 5*.F. / + + CALL START_PES(2) + IF (MY_PE() .EQ. 0) THEN + CALL SHMEM_LOGICAL_IGET(TARGET, SOURCE, 1, 2, 5, 1) + PRINT*,'TARGET AFTER SHMEM_LOGICAL_IGET:',TARGET + ENDIF + CALL SHMEM_BARRIER_ALL + END + + +.. seealso:: + *intro_shmem*\ (3) *shmem_get*\ (3) *shmem_quiet*\ (3) diff --git a/docs/man-openshmem/man3/shmem_short_iput.3.rst b/docs/man-openshmem/man3/shmem_short_iput.3.rst new file mode 100644 index 00000000000..c52ef01608a --- /dev/null +++ b/docs/man-openshmem/man3/shmem_short_iput.3.rst @@ -0,0 +1,216 @@ +.. _shmem_short_iput: + + +shmem_short_iput +================ + +.. include_body + +shmem_complex_iput\ (3), :ref:`shmem_double_iput`\ (3), +:ref:`shmem_float_iput`\ (3), :ref:`shmem_int_iput`\ (3), +shmem_integer_iput\ (3), shmem_iput4\ (3), shmem_iput8\ (3), +:ref:`shmem_iput32`\ (3), :ref:`shmem_iput64`\ (3), :ref:`shmem_iput128`\ (3), +shmem_logical_iput\ (3), :ref:`shmem_long_iput`\ (3), +:ref:`shmem_longdouble_iput`\ (3), :ref:`shmem_longlong_iput`\ (3), +shmem_real_iput\ (3), :ref:`shmem_short_iput`\ (3) - Transfer strided data +to a specified processing element (PE). + + +SYNOPSIS +-------- + +C or C++: + +.. code-block:: c++ + + #include + + void shmem_double_iput(double *target, const double *source, + ptrdiff_t tst, ptrdiff_t sst, size_t len, int pe) + + void shmem_float_iput(float *target, const float *source, + ptrdiff_t tst, ptrdiff_t sst, size_t len, int pe) + + void shmem_int_iput(int *target, const int *source, + ptrdiff_t tst, ptrdiff_t sst, size_t len, int pe) + + void shmem_iput32(void *target, const void *source, + ptrdiff_t tst, ptrdiff_t sst, size_t len, int pe) + + void shmem_iput64(void *target, const void *source, + ptrdiff_t tst, ptrdiff_t sst, size_t len, int pe) + + void shmem_iput128(void *target, const void *source, + ptrdiff_t tst, ptrdiff_t sst, size_t len, int pe) + + void shmem_long_iput(long *target, const long *source, + ptrdiff_t tst, ptrdiff_t sst, size_t len, int pe) + + void shmem_longdouble_iput(long double *target, + const long double *source, ptrdiff_t tst, ptrdiff_t sst, + size_t len, int pe) + + void shmem_longlong_iput(long long *target, + const long long *source, ptrdiff_t tst, ptrdiff_t sst, + size_t len, int pe) + + void shmem_short_iput(short *target, const short *source, + ptrdiff_t tst, ptrdiff_t sst, size_t len, int pe) + +Fortran: + +.. code-block:: fortran + + INCLUDE "mpp/shmem.fh" + + INTEGER tst, sst, len, pe + + CALL SHMEM_COMPLEX_IPUT(target, source, tst, sst, len, + & pe) + + CALL SHMEM_DOUBLE_IPUT(target, source, tst, sst, len, + & pe) + + CALL SHMEM_INTEGER_IPUT(target, source, tst, sst, len, + & pe) + + CALL SHMEM_IPUT4(target, source, tst, sst, len, pe) + + CALL SHMEM_IPUT8(target, source, tst, sst, len, pe) + + CALL SHMEM_IPUT32(target, source, tst, sst, len, pe) + + CALL SHMEM_IPUT64(target, source, tst, sst, len, pe) + + CALL SHMEM_IPUT128(target, source, tst, sst, len, pe) + + CALL SHMEM_LOGICAL_IPUT(target, source, tst, sst, len, + & pe) + + CALL SHMEM_REAL_IPUT(target, source, tst, sst, len, pe) + + +DESCRIPTION +----------- + +The shmem_iput routines read the elements of a local array (**source**) +and write them to a remote array (**target**) on the PE indicated by +**pe**. These routines return when the data has been copied out of the +source array on the local PE but not necessarily before the data has +been delivered to the remote data object. + +The arguments are as follows: + +target + Array to be updated on the remote PE. This data object must be + remotely accessible. + +source + Array containing the data to be copied. + +tst + The stride between consecutive elements of the target array. The + stride is scaled by the element size of the target array. A value of + 1 indicates contiguous data. tst must be of type integer. If you are + using Fortran, it must be a default integer value. + +sst + The stride between consecutive elements of the source array. The + stride is scaled by the element size of the source array. A value of + 1 indicates contiguous data. sst must be of type integer. If you are + using Fortran, it must be a default integer value. + +len + Number of elements in the target and source arrays. len must be of + type integer. If you are using Fortran, it must be a constant, + variable, or array element of default integer type. + +pe + PE number of the remote PE. pe must be of type integer. If you are + using Fortran, it must be a constant, variable, or array element of + default integer type. + +The target and source data objects must conform to typing constraints, +which are as follows: + +:ref:`shmem_iput32`, shmem_iput4: Any noncharacter type that has a storage size equal + to 32 bits. + +:ref:`shmem_iput64`, shmem_iput8: Any noncharacter type that has a storage size equal + to 64 bits. + +:ref:`shmem_iput128`: Any noncharacter type that has a storage size equal to 128 bits. + +:ref:`shmem_short_iput`: Elements of type short. + +:ref:`shmem_int_iput`: Elements of type int. + +:ref:`shmem_long_iput`: Elements of type long. + +:ref:`shmem_longlong_iput`: Elements of type long long. + +:ref:`shmem_float_iput`: Elements of type float. + +:ref:`shmem_double_iput`: Elements of type double. + +:ref:`shmem_longdouble_iput`: Elements of type long double. + +**SHMEM_COMPLEX_IPUT**: Elements of type complex of default size. + +**SHMEM_DOUBLE_IPUT**: (Fortran) Elements of type double precision. + +**SHMEM_INTEGER_IPUT**: Elements of type integer. + +**SHMEM_LOGICAL_IPUT**: Elements of type logical. + +**SHMEM_REAL_IPUT**: Elements of type real. + +**SHMEM_LOGICAL_IPUT**: Elements of type logical. + +**SHMEM_REAL_IPUT**: Elements of type real. + +If you are using Fortran, data types must be of default size. For +example, a real variable must be declared as REAL, REAL*4 or +REAL(KIND=4). + + +NOTES +----- + +See *intro_shmem*\ (3) for a definition of the term remotely accessible. + + +EXAMPLES +-------- + +Consider the following simple :ref:`shmem_long_iput` example for C/C++ +programs. + +:: + + #include + + main() + { + short source[10] = { 1, 2, 3, 4, 5, + 6, 7, 8, 9, 10 }; + static short target[10]; + + shmem_init(); + if (shmem_my_pe() == 0) { + /* put 10 words into target on PE 1 */ + shmem_short_iput(target, source, 1, 2, 5, 1); + } + shmem_barrier_all(); /* sync sender and receiver */ + if (shmem_my_pe() == 1) { + shmem_udcflush(); /* not required on IRIX systems */ + printf("target on PE %d is %d %d %d %d %d0, shmem_my_pe(), + (int)target[0], (int)target[1], (int)target[2], + (int)target[3], (int)target[4] ); + } + shmem_barrier_all(); /* sync before exiting */ + } + + +.. seealso:: + *intro_shmem*\ (3) *shmem_iget*\ (3) *shmem_put*\ (3) *shmem_quiet*\ (3) diff --git a/docs/man-openshmem/man3/shmem_short_max_to_all.3.rst b/docs/man-openshmem/man3/shmem_short_max_to_all.3.rst new file mode 100644 index 00000000000..b4c72779e42 --- /dev/null +++ b/docs/man-openshmem/man3/shmem_short_max_to_all.3.rst @@ -0,0 +1,253 @@ +.. _shmem_short_max_to_all: + + +shmem_short_max_to_all +====================== + +.. include_body + +:ref:`shmem_double_max_to_all`\ (3), :ref:`shmem_float_max_to_all`\ (3), +:ref:`shmem_int_max_to_all`\ (3), shmem_int4_max_to_all\ (3), +shmem_int8_max_to_all\ (3), :ref:`shmem_long_max_to_all`\ (3), +:ref:`shmem_longdouble_max_to_all`\ (3), :ref:`shmem_longlong_max_to_all`\ (3), +shmem_real4_max_to_all\ (3), shmem_real8_max_to_all\ (3), +shmem_real16_max_to_all\ (3), :ref:`shmem_short_max_to_all`\ (3) - Performs +a maximum function reduction across a set of processing elements (PEs). + + +SYNOPSIS +-------- + +C or C++: + +.. code-block:: c++ + + #include + + void shmem_double_max_to_all(double *target, const double *source, + int nreduce, int PE_start, int logPE_stride, int PE_size, + double *pWrk, long *pSync) + + void shmem_float_max_to_all(float *target, const float *source, + int nreduce, int PE_start, int logPE_stride, int PE_size, + float *pWrk, long *pSync) + + void shmem_int_max_to_all(int *target, const int *source, + int nreduce, int PE_start, int logPE_stride, int PE_size, + int *pWrk, long *pSync) + + void shmem_long_max_to_all(long *target, const long *source, + int nreduce, int PE_start, int logPE_stride, int PE_size, + long *pWrk, long *pSync) + + void shmem_longdouble_max_to_all(long double *target, + const long double *source, int nreduce, int PE_start, + int logPE_stride, int PE_size, long double *pWrk, long *pSync) + + void shmem_longlong_max_to_all(long long *target, + const long long *source, int nreduce, int PE_start, + int logPE_stride, int PE_size, long long *pWrk, long *pSync) + + void shmem_short_max_to_all(short *target, const short *source, + int nreduce, int PE_start, int logPE_stride, int PE_size, + short *pWrk, long *pSync) + +Fortran: + +.. code-block:: fortran + + INCLUDE "mpp/shmem.fh" + + INTEGER pSync(SHMEM_REDUCE_SYNC_SIZE) + + INTEGER nreduce, PE_start, logPE_stride, PE_size + + CALL SHMEM_INT4_MAX_TO_ALL(target, source, nreduce, + & PE_start, logPE_stride, PE_size, pWrk, pSync) + + CALL SHMEM_INT8_MAX_TO_ALL(target, source, nreduce, + & PE_start, logPE_stride, PE_size, pWrk, pSync) + + CALL SHMEM_REAL4_MAX_TO_ALL(target, source, nreduce, + & PE_start, logPE_stride, PE_size, pWrk, pSync) + + CALL SHMEM_REAL8_MAX_TO_ALL(target, source, nreduce, + & PE_start, logPE_stride, PE_size, pWrk, pSync) + + CALL SHMEM_REAL16_MAX_TO_ALL(target, source, nreduce, + & PE_start, logPE_stride, PE_size, pWrk, pSync) + + +DESCRIPTION +----------- + +The shared memory (SHMEM) reduction routines compute one or more +reductions across symmetric arrays on multiple virtual PEs. A reduction +performs an associative binary operation across a set of values. For a +list of other SHMEM reduction routines, see *intro_shmem*\ (3). + +As with all SHMEM collective routines, each of these routines assumes +that only PEs in the active set call the routine. If a PE not in the +active set calls a SHMEM collective routine, undefined behavior results. + +The nreduce argument determines the number of separate reductions to +perform. The source array on all PEs in the active set provides one +element for each reduction. The results of the reductions are placed in +the target array on all PEs in the active set. The active set is defined +by the PE_start, logPE_stride, PE_size triplet. + +The source and target arrays may be the same array, but they may not be +overlapping arrays. + +The arguments are as follows: + +target + A symmetric array of length nreduce elements to receive the results + of the reduction operations. The data type of target varies with the + version of the reduction routine being called. When calling from C, + refer to the SYNOPSIS section for data type information. + +When calling from Fortran, the target data types are as follows: + + shmem_comp8_max_to_all: Complex, with an element size equal to two + 8-byte real values. + + shmem_int4_max_to_all: Integer, with an element size of 4 bytes. + + shmem_int8_max_to_all: Integer, with an element size of 8 bytes. + + shmem_real4_max_to_all: Real, with an element size of 4 bytes. + + shmem_real16_max_to_all: Real, with an element size of 16 bytes. + +.. + +source + A symmetric array of length nreduce elements that contains one + element for each separate reduction operation. The source argument + must have the same data type as target. + +nreduce + The number of elements in the target and source arrays. nreduce must + be of type integer. If you are using Fortran, it must be a default + integer value. + +PE_start + The lowest virtual PE number of the active set of PEs. PE_start must + be of type integer. If you are using Fortran, it must be a default + integer value. + +logPE_stride + The log (base 2) of the stride between consecutive virtual PE numbers + in the active set. logPE_stride must be of type integer. If you are + using Fortran, it must be a default integer value. + +PE_size + The number of PEs in the active set. PE_size must be of type integer. + If you are using Fortran, it must be a default integer value. + +pWrk + A symmetric work array. The pWrk argument must have the same data + type as target. In C/C++, this contains max(nreduce/2 + 1, + \_SHMEM_REDUCE_MIN_WRKDATA_SIZE) elements. In Fortran, this contains + max(nreduce/2 + 1, SHMEM_REDUCE_MIN_WRKDATA_SIZE) elements. + +pSync + A symmetric work array. In C/C++, pSync is of type long and size + \_SHMEM_REDUCE_SYNC_SIZE. In Fortran, pSync is of type integer and + size SHMEM_REDUCE_SYNC_SIZE. If you are using Fortran, it must be a + default integer value. Every element of this array must be + initialized with the value \_SHMEM_SYNC_VALUE (in C/C++) or + SHMEM_SYNC_VALUE (in Fortran) before any of the PEs in the active set + enter the reduction routine. + +The values of arguments nreduce, PE_start, logPE_stride, and PE_size +must be equal on all PEs in the active set. The same target and source +arrays, and the same pWrk and pSync work arrays, must be passed to all +PEs in the active set. + +Before any PE calls a reduction routine, you must ensure that the +following conditions exist (synchronization via a barrier or some other +method is often needed to ensure this): The pWrk and pSync arrays on all +PEs in the active set are not still in use from a prior call to a +collective SHMEM routine. The target array on all PEs in the active set +is ready to accept the results of the reduction. + +Upon return from a reduction routine, the following are true for the +local PE: The target array is updated. The values in the pSync array are +restored to the original values. + + +NOTES +----- + +The terms collective, symmetric, and cache aligned are defined in +*intro_shmem*\ (3). All SHMEM reduction routines reset the values in +pSync before they return, so a particular pSync buffer need only be +initialized the first time it is used. + +You must ensure that the pSync array is not being updated on any PE in +the active set while any of the PEs participate in processing of a SHMEM +reduction routine. Be careful of the following situations: If the pSync +array is initialized at run time, some type of synchronization is needed +to ensure that all PEs in the working set have initialized pSync before +any of them enter a SHMEM routine called with the pSync synchronization +array. A pSync or pWrk array can be reused in a subsequent reduction +routine call only if none of the PEs in the active set are still +processing a prior reduction routine call that used the same pSync or +pWrk arrays. + +In general, this can be assured only by doing some type of +synchronization. However, in the special case of reduction routines +being called with the same active set, you can allocate two pSync and +pWrk arrays and alternate between them on successive calls. + + +EXAMPLES +-------- + +**Example 1:** This Fortran example statically initializes the pSync +array and finds the maximum value of real variable FOO across all even +PEs. + +:: + + INCLUDE "mpp/shmem.fh" + + INTEGER PSYNC(SHMEM_REDUCE_SYNC_SIZE) + DATA PSYNC /SHMEM_REDUCE_SYNC_SIZE*SHMEM_SYNC_VALUE/ + PARAMETER (NR=1) + REAL FOO, FOOMAX, PWRK(MAX(NR/2+1,SHMEM_REDUCE_MIN_WRKDATA_SIZE)) + COMMON /COM/ FOO, FOOMAX, PWRK + INTRINSIC MY_PE + + IF ( MOD(MY_PE(),2) .EQ. 0) THEN + CALL SHMEM_REAL8_MAX_TO_ALL(FOOMAX, FOO, NR, 0, 1, N$PES/2, + & PWRK, PSYNC) + PRINT *, 'Result on PE ', MY_PE(), ' is ', FOOMAX + ENDIF + +**Example 2:** Consider the following C/C++ call: + +.. code-block:: c++ + + shmem_int_max_to_all( target, source, 3, 0, 0, 8, pwrk, psync ); + +The preceding call is more efficient, but semantically equivalent to, +the combination of the following calls: + +:: + + shmem_int_max_to_all(&(target[0]), &(source[0]), 1, 0, 0, 8, + pwrk1, psync1); + shmem_int_max_to_all(&(target[1]), &(source[1]), 1, 0, 0, 8, + pwrk2, psync2); + shmem_int_max_to_all(&(target[2]), &(source[2]), 1, 0, 0, 8, + pwrk1, psync1); + +Note that two sets of pWrk and pSync arrays are used alternately because +no synchronization is done between calls. + + +.. seealso:: + *intro_shmem*\ (3) diff --git a/docs/man-openshmem/man3/shmem_short_min_to_all.3.rst b/docs/man-openshmem/man3/shmem_short_min_to_all.3.rst new file mode 100644 index 00000000000..1ce89284494 --- /dev/null +++ b/docs/man-openshmem/man3/shmem_short_min_to_all.3.rst @@ -0,0 +1,246 @@ +.. _shmem_short_min_to_all: + + +shmem_short_min_to_all +====================== + +.. include_body + +:ref:`shmem_double_min_to_all`\ (3), :ref:`shmem_float_min_to_all`\ (3), +:ref:`shmem_int_min_to_all`\ (3), shmem_int4_min_to_all\ (3), +shmem_int8_min_to_all\ (3), :ref:`shmem_long_min_to_all`\ (3), +:ref:`shmem_longdouble_min_to_all`\ (3), :ref:`shmem_longlong_min_to_all`\ (3), +shmem_real4_min_to_all\ (3), shmem_real8_min_to_all\ (3), +shmem_real16_min_to_all\ (3), :ref:`shmem_short_min_to_all`\ (3) - Performs +a minimum function reduction across a set of processing elements (PEs) + + +SYNOPSIS +-------- + +C or C++: + +.. code-block:: c++ + + #include + + void shmem_double_min_to_all(double *target, const double *source, + int nreduce, int PE_start, int logPE_stride, int PE_size, + double *pWrk, long *pSync) + + void shmem_float_min_to_all(float *target, const float *source, + int nreduce, int PE_start, int logPE_stride, int PE_size, + float *pWrk, long *pSync) + + void shmem_int_min_to_all(int *target, const int *source, + int nreduce, int PE_start, int logPE_stride, int PE_size, + int *pWrk, long *pSync) + + void shmem_long_min_to_all(long *target, const long *source, + int nreduce, int PE_start, int logPE_stride, int PE_size, + long *pWrk, long *pSync) + + void shmem_longdouble_min_to_all(long double *target, + const long double *source, int nreduce, int PE_start, + int logPE_stride, int PE_size, long double *pWrk, + long *pSync) + + void shmem_longlong_min_to_all(long long *target, + const long long *source, int nreduce, int PE_start, int logPE_stride, + int PE_size, long long *pWrk, long *pSync) + + void shmem_short_min_to_all(short *target, const short *source, + int nreduce, int PE_start, int logPE_stride, int PE_size, + short *pWrk, long *pSync) + +Fortran: + +.. code-block:: fortran + + INCLUDE "mpp/shmem.fh" + + INTEGER pSync(SHMEM_REDUCE_SYNC_SIZE) + INTEGER nreduce, PE_start, logPE_stride, PE_size + + CALL SHMEM_INT4_MIN_TO_ALL(target, source, nreduce, PE_start, + & logPE_stride, PE_size, pWrk, pSync) + + CALL SHMEM_INT8_MIN_TO_ALL(target, source, nreduce, PE_start, + & logPE_stride, PE_size, pWrk, pSync) + + CALL SHMEM_REAL4_MIN_TO_ALL(target, source, nreduce, PE_start, + & logPE_stride, PE_size, pWrk, pSync) + + CALL SHMEM_REAL8_MIN_TO_ALL(target, source, nreduce, PE_start, + & logPE_stride, PE_size, pWrk, pSync) + + CALL SHMEM_REAL16_MIN_TO_ALL(target, source, nreduce, PE_start, + & logPE_stride, PE_size, pWrk, pSync) + + +DESCRIPTION +----------- + +The shared memory (SHMEM) reduction routines compute one or more +reductions across symmetric arrays on multiple virtual PEs. A reduction +performs an associative binary operation across a set of values. For a +list of other SHMEM reduction routines, see *intro_shmem*\ (3). + +As with all SHMEM collective routines, each of these routines assumes +that only PEs in the active set call the routine. If a PE not in the +active set calls a SHMEM collective routine, undefined behavior results. + +The nreduce argument determines the number of separate reductions to +perform. The source array on all PEs in the active set provides one +element for each reduction. The results of the reductions are placed in +the target array on all PEs in the active set. The active set is defined +by the PE_start, logPE_stride, PE_size triplet. + +The source and target arrays may be the same array, but they may not be +overlapping arrays. + +The arguments are as follows: + +target + A symmetric array of length nreduce elements to receive the results + of the reduction operations. The data type of target varies with the + version of the reduction routine being called. When calling from + C/C++, refer to the SYNOPSIS section for data type information. When + calling from Fortran, the target data types are as follows: + + shmem_int4_min_to_all: Integer, with an element size of 4 bytes + + shmem_int8_min_to_all: Integer, with an element size of 8 bytes + + shmem_real4_min_to_all: Real, with an element size of 4 bytes + + shmem_real8_min_to_all: Real, with an element size of 8 bytes + + shmem_real16_min_to_all: Real, with an element size of 16 bytes + + **source A symmetric array**: of length nreduce elements, that contains one + element for each separate reduction operation. The source argument + must have the same data type as target. + +nreduce + The number of elements in the target and source arrays. nreduce must + be of type integer. If you are using Fortran, it must be a default + integer value. + +PE_start + The lowest virtual PE number of the active set of PEs. PE_start must + be of type integer. If you are using Fortran, it must be a default + integer value. + +logPE_stride + The log (base 2) of the stride between consecutive virtual PE numbers + in the active set. logPE_stride must be of type integer. If you are + using Fortran, it must be a default integer value. + +PE_size + The number of PEs in the active set. PE_size must be of type integer. + If you are using Fortran, it must be a default integer value. + +pWrk + A symmetric work array. The pWrk argument must have the same data + type as target. In C/C++, this contains max(nreduce/2 + 1, + \_SHMEM_REDUCE_MIN_WRKDATA_SIZE) elements. In Fortran, this contains + max(nreduce/2 + 1, SHMEM_REDUCE_MIN_WRKDATA_SIZE) elements. + +pSync + A symmetric work array. In C/C++, pSync is of type long and size + \_SHMEM_REDUCE_SYNC_SIZE. In Fortran, pSync is of type integer and + size SHMEM_REDUCE_SYNC_SIZE. If you are using Fortran, it must be a + default integer value. Every element of this array must be + initialized with the value \_SHMEM_SYNC_VALUE (in C/C++) or + SHMEM_SYNC_VALUE (in Fortran) before any of the PEs in the active set + enter the reduction routine. + +The values of arguments nreduce, PE_start, logPE_stride, and PE_size +must be equal on all PEs in the active set. The same target and source +arrays, and the same pWrk and pSync work arrays, must be passed to all +PEs in the active set. + +Before any PE calls a reduction routine, you must ensure that the +following conditions exist (synchronization via a barrier or some other +method is often needed to ensure this): The pWrk and pSync arrays on all +PEs in the active set are not still in use from a prior call to a +collective SHMEM routine. The target array on all PEs in the active set +is ready to accept the results of the reduction. + +Upon return from a reduction routine, the following are true for the +local PE: The target array is updated. The values in the pSync array are +restored to the original values. + + +NOTES +----- + +The terms collective, symmetric, and cache aligned are defined in +*intro_shmem*\ (3). All SHMEM reduction routines reset the values in +pSync before they return, so a particular pSync buffer need only be +initialized the first time it is used. + +You must ensure that the pSync array is not being updated on any PE in +the active set while any of the PEs participate in processing of a SHMEM +reduction routine. Be careful of the following situations: If the pSync +array is initialized at run time, some type of synchronization is needed +to ensure that all PEs in the working set have initialized pSync before +any of them enter a SHMEM routine called with the pSync synchronization +array. A pSync or pWrk array can be reused in a subsequent reduction +routine call only if none of the PEs in the active set are still +processing a prior reduction routine call that used the same pSync or +pWrk arrays. In general, this can be assured only by doing some type of +synchronization. However, in the special case of reduction routines +being called with the same active set, you can allocate two pSync and +pWrk arrays and alternate between them on successive calls. + + +EXAMPLES +-------- + +**Example 1:** This Fortran example statically initializes the pSync +array and finds the minimum value of real variable FOO across all the +even PEs. + +:: + + INCLUDE "mpp/shmem.fh" + + INTEGER PSYNC(SHMEM_REDUCE_SYNC_SIZE) + DATA PSYNC /SHMEM_REDUCE_SYNC_SIZE*SHMEM_SYNC_VALUE/ + PARAMETER (NR=1) + REAL FOO, FOOMIN, PWRK(MAX(NR/2+1,SHMEM_REDUCE_MIN_WRKDATA_SIZE)) + COMMON /COM/ FOO, FOOMIN, PWRK + INTRINSIC MY_PE + + IF ( MOD(MY_PE(),2) .EQ. 0) THEN + CALL SHMEM_REAL8_MIN_TO_ALL(FOOMIN, FOO, NR, 0, 1, N$PES/2, + & PWRK, PSYNC) + PRINT *, 'Result on PE ', MY_PE(), ' is ', FOOMIN + ENDIF + +**Example 2:** Consider the following C/C++ call: + +.. code-block:: c++ + + shmem_int_min_to_all( target, source, 3, 0, 0, 8, pwrk, psync ); + +The preceding call is more efficient, but semantically equivalent to, +the combination of the following calls: + +:: + + shmem_int_min_to_all(&(target[0]), &(source[0]), 1, 0, 0, 8, + pwrk1, psync1); + shmem_int_min_to_all(&(target[1]), &(source[1]), 1, 0, 0, 8, + pwrk2, psync2); + shmem_int_min_to_all(&(target[2]), &(source[2]), 1, 0, 0, 8, + pwrk1, psync1); + +Note that two sets of pWrk and pSync arrays are used alternately because +no synchronization is done between calls. + + +.. seealso:: + *intro_shmem*\ (3) diff --git a/docs/man-openshmem/man3/shmem_short_or_to_all.3.rst b/docs/man-openshmem/man3/shmem_short_or_to_all.3.rst new file mode 100644 index 00000000000..912cf399d54 --- /dev/null +++ b/docs/man-openshmem/man3/shmem_short_or_to_all.3.rst @@ -0,0 +1,218 @@ +.. _shmem_short_or_to_all: + + +shmem_short_or_to_all +===================== + +.. include_body + +:ref:`shmem_int_or_to_all`\ (3), shmem_int4_or_to_all\ (3), +shmem_int8_or_to_all\ (3), :ref:`shmem_long_or_to_all`\ (3), +:ref:`shmem_longlong_or_to_all`\ (3), :ref:`shmem_short_or_to_all`\ (3) - Performs +a bitwise OR function reduction across a set of processing elements +(PEs) + + +SYNOPSIS +-------- + +C or C++: + +.. code-block:: c++ + + #include + + void shmem_int_or_to_all(int *target, const int *source, + int nreduce, int PE_start, int logPE_stride, int PE_size, + int *pWrk, long *pSync) + + void shmem_long_or_to_all(long *target, const long *source, + int nreduce, int PE_start, int logPE_stride, int PE_size, + long *pWrk, long *pSync) + + void shmem_longlong_or_to_all(long long *target, + const long long *source, int nreduce, int PE_start, int logPE_stride, + int PE_size, long long *pWrk, long *pSync) + + void shmem_short_or_to_all(short *target, const short *source, + int nreduce, int PE_start, int logPE_stride, int PE_size, + short *pWrk, long *pSync) + +Fortran: + +.. code-block:: fortran + + INCLUDE "mpp/shmem.fh" + + INTEGER pSync(SHMEM_REDUCE_SYNC_SIZE) + INTEGER nreduce, PE_start, logPE_stride, PE_size + + CALL SHMEM_INT4_OR_TO_ALL(target, source, nreduce, PE_start, + & logPE_stride, PE_size, pWrk, pSync) + + CALL SHMEM_INT8_OR_TO_ALL(target, source, nreduce, PE_start, + & logPE_stride, PE_size, pWrk, pSync) + + +DESCRIPTION +----------- + +The shared memory (SHMEM) reduction routines compute one or more +reductions across symmetric arrays on multiple virtual PEs. A reduction +performs an associative binary operation across a set of values. For a +list of other SHMEM reduction routines, see intro_shmem(3). + +As with all SHMEM collective routines, each of these routines assumes +that only PEs in the active set call the routine. If a PE not in the +active set calls a SHMEM collective routine, undefined behavior results. + +The nreduce argument determines the number of separate reductions to +perform. The source array on all PEs in the active set provides one +element for each reduction. The results of the reductions are placed in +the target array on all PEs in the active set. The active set is defined +by the PE_start, logPE_stride, PE_size triplet. + +The source and target arrays may be the same array, but they may not be +overlapping arrays. + +The arguments are as follows: + +target + A symmetric array of length nreduce elements to receive the results + of the reduction operations. The data type of target varies with the + version of the reduction routine being called. When calling from + C/C++, refer to the SYNOPSIS section for data type information. When + calling from Fortran, the target data types are as follows: + + shmem_int8_or_to_all Integer, with an element size of 8 bytes. + + shmem_int4_or_to_all Integer, with an element size of 4 bytes. + +source + A symmetric array, of length nreduce elements, that contains one + element for each separate reduction operation. The source argument + must have the same data type as target. + +nreduce + The number of elements in the target and source arrays. nreduce must + be of type integer. If you are using Fortran, it must be a default + integer value. + +PE_start + The lowest virtual PE number of the active set of PEs. PE_start must + be of type integer. If you are using Fortran, it must be a default + integer value. + +logPE_stride + The log (base 2) of the stride between consecutive virtual PE numbers + in the active set. logPE_stride must be of type integer. If you are + using Fortran, it must be a default integer value. + +PE_size + The number of PEs in the active set. PE_size must be of type integer. + If you are using Fortran, it must be a default integer value. + +pWrk + A symmetric work array. The pWrk argument must have the same data + type as target. In C/C++, this contains max(nreduce/2 + 1, + \_SHMEM_REDUCE_MIN_WRKDATA_SIZE) elements. In Fortran, this contains + max(nreduce/2 + 1, SHMEM_REDUCE_MIN_WRKDATA_SIZE) elements. + +pSync + A symmetric work array. In C/C++, pSync is of type long and size + \_SHMEM_REDUCE_SYNC_SIZE. In Fortran, pSync is of type integer and + size SHMEM_REDUCE_SYNC_SIZE. If you are using Fortran, it must be a + default integer value. Every element of this array must be + initialized with the value \_SHMEM_SYNC_VALUE (in C/C++) or + SHMEM_SYNC_VALUE (in Fortran) before any of the PEs in the active set + enter the reduction routine. + +The values of arguments nreduce, PE_start, logPE_stride, and PE_size +must be equal on all PEs in the active set. The same target and source +arrays, and the same pWrk and pSync work arrays, must be passed to all +PEs in the active set. + +Before any PE calls a reduction routine, you must ensure that the +following conditions exist (synchronization via a barrier or some other +method is often needed to ensure this): The pWrk and pSync arrays on all +PEs in the active set are not still in use from a prior call to a +collective SHMEM routine. The target array on all PEs in the active set +is ready to accept the results of the reduction. + +Upon return from a reduction routine, the following are true: The target +array is updated. The values in the pSync array are restored to the +original values. + + +NOTES +----- + +The terms collective, symmetric, and cache aligned are defined in +*intro_shmem*\ (3). All SHMEM reduction routines reset the values in +pSync before they return, so a particular pSync buffer need only be +initialized the first time it is used. + +You must ensure that the pSync array is not being updated on any PE in +the active set while any of the PEs participate in processing of a SHMEM +reduction routine. Be careful to avoid these situations: If the pSync +array is initialized at run time, some type of synchronization is needed +to ensure that all PEs in the working set have initialized pSync before +any of them enter a SHMEM routine called with the pSync synchronization +array. A pSync or pWrk array can be reused in a subsequent reduction +routine call only if none of the PEs in the active set are still +processing a prior reduction routine call that used the same pSync or +pWrk arrays. In general, this can be assured only by doing some type of +synchronization. However, in the special case of reduction routines +being called with the same active set, you can allocate two pSync and +pWrk arrays and alternate between them on successive calls. + + +EXAMPLES +-------- + +**Example 1:** This Fortran example statically initializes the pSync +array and finds the logical OR of the integer variable FOO across all +even PEs. + +:: + + INCLUDE "mpp/shmem.fh" + + INTEGER PSYNC(SHMEM_REDUCE_SYNC_SIZE) + DATA PSYNC /SHMEM_REDUCE_SYNC_SIZE*SHMEM_SYNC_VALUE/ + PARAMETER (NR=1) + REAL PWRK(MAX(NR/2+1,SHMEM_REDUCE_MIN_WRKDATA_SIZE)) + INTEGER FOO, FOOOR + COMMON /COM/ FOO, FOOOR, PWRK + INTRINSIC MY_PE + + IF ( MOD(MY_PE(),2) .EQ. 0) THEN + CALL SHMEM_INT8_OR_TO_ALL(FOOOR, FOO, NR, 0, 1, N$PES/2, + & PWRK, PSYNC) + PRINT *,'Result on PE ',MY_PE(),' is ',FOOOR + ENDIF + +**Example 2:** Consider the following C/C++ call: + +.. code-block:: c++ + + shmem_int_or_to_all( target, source, 3, 0, 0, 8, pwrk, psync ); + +The preceding call is more efficient, but semantically equivalent to, +the combination of the following calls: + +:: + + shmem_int_or_to_all(&(target[0]), &(source[0]), 1, 0, 0, 8, + pwrk1, psync1); + shmem_int_or_to_all(&(target[1]), &(source[1]), 1, 0, 0, 8, + pwrk2, psync2); + shmem_int_or_to_all(&(target[2]), &(source[2]), 1, 0, 0, 8, + pwrk1, psync1); + +Note that two sets of pWrk and pSync arrays are used alternately because +no synchronization is done between calls. + + +.. seealso:: + *intro_shmem*\ (3) diff --git a/docs/man-openshmem/man3/shmem_short_p.3.rst b/docs/man-openshmem/man3/shmem_short_p.3.rst new file mode 100644 index 00000000000..76f9b8aedc3 --- /dev/null +++ b/docs/man-openshmem/man3/shmem_short_p.3.rst @@ -0,0 +1,9 @@ +.. _shmem_short_p: + +shmem_short_p +============= + .. include_body + +.. include:: ../man3/shmem_char_p.3.rst + :start-after: .. include_body + diff --git a/docs/man-openshmem/man3/shmem_short_prod_to_all.3.rst b/docs/man-openshmem/man3/shmem_short_prod_to_all.3.rst new file mode 100644 index 00000000000..cf79160d0ff --- /dev/null +++ b/docs/man-openshmem/man3/shmem_short_prod_to_all.3.rst @@ -0,0 +1,271 @@ +.. _shmem_short_prod_to_all: + + +shmem_short_prod_to_all +======================= + +.. include_body + +shmem_comp4_prod_to_all\ (3), shmem_comp8_prod_to_all\ (3), +:ref:`shmem_complexd_prod_to_all`\ (3), :ref:`shmem_complexf_prod_to_all`\ (3), +:ref:`shmem_double_prod_to_all`\ (3), :ref:`shmem_float_prod_to_all`\ (3), +:ref:`shmem_int_prod_to_all`\ (3), shmem_int4_prod_to_all\ (3), +shmem_int8_prod_to_all\ (3), :ref:`shmem_long_prod_to_all`\ (3), +:ref:`shmem_longdouble_prod_to_all`\ (3), :ref:`shmem_longlong_prod_to_all`\ (3), +shmem_real8_prod_to_all\ (3), shmem_real16_prod_to_all\ (3), +shmem_real4_prod_to_all\ (3), :ref:`shmem_short_prod_to_all`\ (3) - +Performs a product reduction across a set of processing elements (PEs) + + +SYNOPSIS +-------- + +C or C++: + +.. code-block:: c++ + + #include + + void shmem_complexd_prod_to_all(double complex *target, + const double complex *source, int nreduce, int PE_start, + int logPE_stride, int PE_size, double complex *pWrk, + long *pSync) + + void shmem_complexf_prod_to_all(float complex *target, + const float complex *source, int nreduce, int PE_start, + int logPE_stride, int PE_size, float complex *pWrk, + long *pSync) + + void shmem_double_prod_to_all(double *target, const double *source, + int nreduce, int PE_start, int logPE_stride, int PE_size, + double *pWrk, long *pSync) + + void shmem_float_prod_to_all(float *target, const float *source, + int nreduce, int PE_start, int logPE_stride, int PE_size, + float *pWrk, long *pSync) + + void shmem_int_prod_to_all(int *target, const int *source, + int nreduce, int PE_start, int logPE_stride, int PE_size, + int *pWrk, long *pSync) + + void shmem_long_prod_to_all(long *target, const long *source, + int nreduce, int PE_start, int logPE_stride, int PE_size, + long *pWrk, long *pSync) + + void shmem_longdouble_prod_to_all(long double *target, + const long double *source, int nreduce, int PE_start, + int logPE_stride, int PE_size, long double *pWrk, + long *pSync) + + void shmem_longlong_prod_to_all(long long *target, + const long long *source, int nreduce, int PE_start, + int logPE_stride, int PE_size, long long *pWrk, + long *pSync) + + void shmem_short_prod_to_all(short *target, const short *source, + int nreduce, int PE_start, int logPE_stride, int PE_size, + short *pWrk, long *pSync) + +Fortran: + +.. code-block:: fortran + + INCLUDE "mpp/shmem.fh" + + INTEGER pSync(SHMEM_REDUCE_SYNC_SIZE) + INTEGER nreduce, PE_start, logPE_stride, PE_size + + CALL SHMEM_COMP4_PROD_TO_ALL(target, source, nreduce, PE_start, + & logPE_stride, PE_size, pWrk, pSync) + + CALL SHMEM_COMP8_PROD_TO_ALL(target, source, nreduce, PE_start, + & logPE_stride, PE_size, pWrk, pSync) + + CALL SHMEM_INT4_PROD_TO_ALL(target, source, nreduce, PE_start, + & logPE_stride, PE_size, pWrk, pSync) + + CALL SHMEM_INT8_PROD_TO_ALL(target, source, nreduce, PE_start, + & logPE_stride, PE_size, pWrk, pSync) + + CALL SHMEM_REAL4_PROD_TO_ALL(target, source, nreduce, PE_start, + & logPE_stride, PE_size, pWrk, pSync) + + CALL SHMEM_REAL8_PROD_TO_ALL(target, source, nreduce, PE_start, + & logPE_stride, PE_size, pWrk, pSync) + + CALL SHMEM_REAL16_PROD_TO_ALL(target, source, nreduce, PE_start, + & logPE_stride, PE_size, pWrk, pSync) + + +DESCRIPTION +----------- + +The shared memory (SHMEM) reduction routines compute one or more +reductions across symmetric arrays on multiple virtual PEs. A reduction +performs an associative binary operation across a set of values. For a +list of other SHMEM reduction routines, see *intro_shmem*\ (3). + +As with all SHMEM collective routines, each of these routines assumes +that only PEs in the active set call the routine. If a PE not in the +active set calls a SHMEM collective routine, undefined behavior results. + +The nreduce argument determines the number of separate reductions to +perform. The source array on all PEs in the active set provides one +element for each reduction. The results of the reductions are placed in +the target array on all PEs in the active set. The active set is defined +by the PE_start, logPE_stride, PE_size triplet. + +The source and target arrays may be the same array, but they may not be +overlapping arrays. + +The arguments are as follows: + +target + A symmetric array of length nreduce elements to receive the results + of the reduction operations. The data type of target varies with the + version of the reduction routine being called and the language used. + When calling from C/C++, refer to the SYNOPSIS section for data type + information. When calling from Fortran, the target data types are as + follows: + + shmem_comp4_prod_to_all: Complex, with an element size equal to two + 4-byte real values. + + shmem_comp8_prod_to_all: Complex, with an element size equal to two + 8-byte real values. + + shmem_int4_prod_to_all: Integer, with an element size of 4 bytes + + shmem_int8_prod_to_all: Integer, with an element size of 8 bytes + + shmem_real4_prod_to_all: Real, with an element size of 4 bytes + + shmem_real8_prod_to_all: Real, with an element size of 8 bytes + + shmem_real16_prod_to_all: Real, with an element size of 16 bytes + +source + A symmetric array, of length nreduce elements, that contains one + element for each separate reduction operation. The source argument + must have the same data type as target. + +nreduce + The number of elements in the target and source arrays. nreduce must + be of type integer. If you are using Fortran, it must be a default + integer value. + +PE_start + The lowest virtual PE number of the active set of PEs. PE_start must + be of type integer. If you are using Fortran, it must be a default + integer value. + +logPE_stride + The log (base 2) of the stride between consecutive virtual PE numbers + in the active set. logPE_stride must be of type integer. If you are + using Fortran, it must be a default integer value. + +PE_size + The number of PEs in the active set. PE_size must be of type integer. + If you are using Fortran, it must be a default integer value. + +pWrk + A symmetric work array. The pWrk argument must have the same data + type as target. In C/C++, this contains max(nreduce/2 + 1, + \_SHMEM_REDUCE_MIN_WRKDATA_SIZE) elements. In Fortran, this contains + max(nreduce/2 + 1, SHMEM_REDUCE_MIN_WRKDATA_SIZE) elements. + +pSync + A symmetric work array. In C/C++, pSync is of type long and size + \_SHMEM_REDUCE_SYNC_SIZE. In Fortran, pSync is of type integer and + size SHMEM_REDUCE_SYNC_SIZE. If you are using Fortran, it must be a + default integer value. Before any of the PEs in the active set enter + the reduction routine, every element of this array must be + initialized with the value \_SHMEM_SYNC_VALUE (in C/C++) or + SHMEM_SYNC_VALUE (in Fortran). + +The values of arguments nreduce, PE_start, logPE_stride, and PE_size +must be equal on all PEs in the active set. The same target and source +arrays, and the same pWrk and pSync work arrays, must be passed to all +PEs in the active set. Before any PE calls a reduction routine, you must +ensure that the following conditions exist (synchronization via a +barrier or some other method is often needed to ensure this): The pWrk +and pSync arrays on all PEs in the active set are not still in use from +a prior call to a collective SHMEM routine. The target array on all PEs +in the active set is ready to accept the results of the reduction. + +Upon return from a reduction routine, the following are true for the +local PE: The target array is updated. The values in the pSync array are +restored to the original values. + + +NOTES +----- + +The terms collective, symmetric, and cache aligned are defined in +*intro_shmem*\ (3). All SHMEM reduction routines reset the values in +pSync before they return, so a particular pSync buffer need only be +initialized the first time it is used. + +You must ensure that the pSync array is not being updated on any PE in +the active set while any of the PEs participate in processing of a SHMEM +reduction routine. Be careful of the following situations: If the pSync +array is initialized at run time, some type of synchronization is needed +to ensure that all PEs in the working set have initialized pSync before +any of them enter a SHMEM routine called with the pSync synchronization +array. A pSync or pWrk array can be reused in a subsequent reduction +routine call only if none of the PEs in the active set are still +processing a prior reduction routine call that used the same pSync or +pWrk arrays. In general, this can be assured only by doing some type of +synchronization. However, in the special case of reduction routines +being called with the same active set, you can allocate two pSync and +pWrk arrays and alternate between them on successive calls. + + +EXAMPLES +-------- + +**Example 1:** This Fortran example statically initializes the pSync +array and finds the product of the real variable FOO across all the even +PEs. + +:: + + INCLUDE "mpp/shmem.fh" + + INTEGER PSYNC(SHMEM_REDUCE_SYNC_SIZE) + DATA PSYNC /SHMEM_REDUCE_SYNC_SIZE*SHMEM_SYNC_VALUE/ + PARAMETER (NR=1) + REAL FOO, FOOPROD, PWRK(MAX(NR/2+1,SHMEM_REDUCE_MIN_WRKDATA_SIZE)) + COMMON /COM/ FOO, FOOPROD, PWRK + INTRINSIC MY_PE + + IF ( MOD(MY_PE(),2) .EQ. 0) THEN + CALL SHMEM_COMP8_PROD_TO_ALL(FOOPROD, FOO, NR, 0, 1, N$PES/2, + & PWRK, PSYNC) + PRINT *, 'Result on PE ', MY_PE(), ' is ', FOOPROD + ENDIF + +**Example 2:** Consider the following C/C++ call: + +.. code-block:: c++ + + shmem_short_prod_to_all(target, source, 3, 0, 0, 8, pwrk, psync); + +The preceding call is more efficient, but semantically equivalent to, +the combination of the following calls: + +:: + + shmem_short_prod_to_all(&(target[0]), &(source[0]), 1, 0, 0, 8, + pwrk1, psync1); + shmem_short_prod_to_all(&(target[1]), &(source[1]), 1, 0, 0, 8, + pwrk2, psync2); + shmem_short_prod_to_all(&(target[2]), &(source[2]), 1, 0, 0, 8, + pwrk1, psync1); + +Note that two sets of pWrk and pSync arrays are used alternately because +no synchronization is done between calls. + + +.. seealso:: + *intro_shmem*\ (3) diff --git a/docs/man-openshmem/man3/shmem_short_put.3.rst b/docs/man-openshmem/man3/shmem_short_put.3.rst new file mode 100644 index 00000000000..b1484355389 --- /dev/null +++ b/docs/man-openshmem/man3/shmem_short_put.3.rst @@ -0,0 +1,9 @@ +.. _shmem_short_put: + +shmem_short_put +=============== + .. include_body + +.. include:: ../man3/shmem_char_put.3.rst + :start-after: .. include_body + diff --git a/docs/man-openshmem/man3/shmem_short_put_nbi.3.rst b/docs/man-openshmem/man3/shmem_short_put_nbi.3.rst new file mode 100644 index 00000000000..e12b0cadb0c --- /dev/null +++ b/docs/man-openshmem/man3/shmem_short_put_nbi.3.rst @@ -0,0 +1,9 @@ +.. _shmem_short_put_nbi: + +shmem_short_put_nbi +=================== + .. include_body + +.. include:: ../man3/shmem_putmem_nbi.3.rst + :start-after: .. include_body + diff --git a/docs/man-openshmem/man3/shmem_short_sum_to_all.3.rst b/docs/man-openshmem/man3/shmem_short_sum_to_all.3.rst new file mode 100644 index 00000000000..03f43ac59ac --- /dev/null +++ b/docs/man-openshmem/man3/shmem_short_sum_to_all.3.rst @@ -0,0 +1,292 @@ +.. _shmem_short_sum_to_all: + + +shmem_short_sum_to_all +====================== + +.. include_body + +shmem_comp4_sum_to_all\ (3), shmem_comp8_sum_to_all\ (3), +:ref:`shmem_complexd_sum_to_all`\ (3), :ref:`shmem_complexf_sum_to_all`\ (3), +:ref:`shmem_double_sum_to_all`\ (3), :ref:`shmem_float_sum_to_all`\ (3), +:ref:`shmem_int_sum_to_all`\ (3), shmem_int4_sum_to_all\ (3), +shmem_int8_sum_to_all\ (3), :ref:`shmem_long_sum_to_all`\ (3), +:ref:`shmem_longdouble_sum_to_all`\ (3), :ref:`shmem_longlong_sum_to_all`\ (3), +shmem_real4_sum_to_all\ (3), shmem_real8_sum_to_all\ (3), +shmem_real16_sum_to_all\ (3), :ref:`shmem_short_sum_to_all`\ (3) - Performs +a sum reduction across a set of processing elements (PEs) + + +SYNOPSIS +-------- + +C or C++: + +.. code-block:: c++ + + #include + + void shmem_complexd_sum_to_all(double complex *target, + const double complex *source, int nreduce, int PE_start, + int logPE_stride, int PE_size, double complex *pWrk, + long *pSync) + + void shmem_complexf_sum_to_all(float complex *target, + const float complex *source, int nreduce, int PE_start, + int logPE_stride, int PE_size, float complex *pWrk, + long *pSync) + + void shmem_double_sum_to_all(double *target, + const double *source, int nreduce, int PE_start, int logPE_stride, + int PE_size, double *pWrk, long *pSync) + + void shmem_float_sum_to_all(float *target, const float *source, + int nreduce, int PE_start, int logPE_stride, int PE_size, + float *pWrk, long *pSync) + + void shmem_int_sum_to_all(int *target, const int *source, + int nreduce, int PE_start, int logPE_stride, int PE_size, + int *pWrk, long *pSync) + + void shmem_long_sum_to_all(long *target, const long *source, + int nreduce, int PE_start, int logPE_stride, int PE_size, + long *pWrk, long *pSync) + + void shmem_longdouble_sum_to_all(long double *target, + const long double *source, int nreduce, int PE_start, int + logPE_stride, int PE_size, long double *pWrk, long *pSync) + + void shmem_longlong_sum_to_all(long long *target, + const long long *source, int nreduce, int PE_start, + int logPE_stride, int PE_size, long long *pWrk, + long *pSync) + + void shmem_short_sum_to_all(short *target, const short *source, + int nreduce, int PE_start, int logPE_stride, int PE_size, + short *pWrk, long *pSync) + +Fortran: + +.. code-block:: fortran + + INCLUDE "mpp/shmem.fh" + + INTEGER pSync(SHMEM_REDUCE_SYNC_SIZE) + INTEGER nreduce, PE_start, logPE_stride, PE_size + + CALL SHMEM_COMP4_SUM_TO_ALL(target, source, nreduce, + & PE_start, logPE_stride, PE_size, pWrk, pSync) + + CALL SHMEM_COMP8_SUM_TO_ALL(target, source, nreduce, + & PE_start, logPE_stride, PE_size, pWrk, pSync) + + CALL SHMEM_INT4_SUM_TO_ALL(target, source, nreduce, + & PE_start, logPE_stride, PE_size, pWrk, pSync) + + CALL SHMEM_INT8_SUM_TO_ALL(target, source, nreduce, + & PE_start, logPE_stride, PE_size, pWrk, pSync) + + CALL SHMEM_REAL4_SUM_TO_ALL(target, source, nreduce, + & PE_start, logPE_stride, PE_size, pWrk, pSync) + + CALL SHMEM_REAL8_SUM_TO_ALL(target, source, nreduce, + & PE_start, logPE_stride, PE_size, pWrk, pSync) + + CALL SHMEM_REAL16_SUM_TO_ALL(target, source, nreduce, + & PE_start, logPE_stride, PE_size, pWrk, pSync) + + +DESCRIPTION +----------- + +The shared memory (SHMEM) reduction routines compute one or more +reductions across symmetric arrays on multiple virtual PEs. A reduction +performs an associative binary operation across a set of values. For a +list of other SHMEM reduction routines, see *intro_shmem*\ (3). + +As with all SHMEM collective routines, each of these routines assumes +that only PEs in the active set call the routine. If a PE not in the +active set calls a SHMEM collective routine, undefined behavior results. + +The nreduce argument determines the number of separate reductions to +perform. The source array on all PEs in the active set provides one +element for each reduction. The results of the reductions are placed in +the target array on all PEs in the active set. The active set is defined +by the PE_start, logPE_stride, PE_size triplet. + +The source and target arrays may be the same array, but they may not be +overlapping arrays. + +The arguments are as follows: + +target + The remotely accessible integer data object to be updated on the + remote PE. If you are using C/C++, the type of target should match + that implied in the SYNOPSIS section. If you are using the Fortran + compiler, it must be of type integer with an element size of 4 bytes + for SHMEM_INT4_ADD and 8 bytes for SHMEM_INT8_ADD. + +value + The value to be atomically added to target. If you are using C/C++, + the type of value should match that implied in the SYNOPSIS section. + If you are using Fortran, it must be of type integer with an element + size of target. + +pe + An integer that indicates the PE number upon which target is to be + updated. If you are using Fortran, it must be a default integer + value. + +target + | A symmetric array of length nreduce elements to receive the results + of the reduction operations. + | The data type of target varies with the version of the reduction + routine being called and the language used. When calling from + C/C++, refer to the SYNOPSIS section for data type information. + When calling from Fortran, the target data types are as follows: + + shmem_comp4_sum_to_all:** COMPLEX(KIND=4). + + shmem_comp8_sum_to_all:** Complex. If you are using Fortran, it must be + a default complex value. + + shmem_int4_sum_to_all:** INTEGER(KIND=4). + + shmem_int8_sum_to_all:** Integer. If you are using Fortran, it must be a + default integer value. + + shmem_real4_sum_to_all:** REAL(KIND=4). + + shmem_real8_sum_to_all:** Real. If you are using Fortran, it must be a + default real value. + + shmem_real16_sum_to_all:** Real. If you are using Fortran, it must be a + default real value. + +source + A symmetric array, of length nreduce elements, that contains one + element for each separate reduction operation. The source argument + must have the same data type as target. + +nreduce + The number of elements in the target and source arrays. nreduce must + be of type integer. If you are using Fortran, it must be a default + integer value. + +PE_start + The lowest virtual PE number of the active set of PEs. PE_start must + be of type integer. If you are using Fortran, it must be a default + integer value. + +logPE_stride + The log (base 2) of the stride between consecutive virtual PE numbers + in the active set. logPE_stride must be of type integer. If you are + using Fortran, it must be a default integer value. + +PE_size + The number of PEs in the active set. PE_size must be of type integer. + If you are using Fortran, it must be a default integer value. + +pWrk + A symmetric work array. The pWrk argument must have the same data + type as target. In C/C++, this contains max(nreduce/2 + 1, + \_SHMEM_REDUCE_MIN_WRKDATA_SIZE) elements. In Fortran, this contains + max(nreduce/2 + 1, SHMEM_REDUCE_MIN_WRKDATA_SIZE) elements. + +pSync + A symmetric work array. In C/C++, pSync is of type long and size + \_SHMEM_REDUCE_SYNC_SIZE. In Fortran, pSync is of type integer and + size SHMEM_REDUCE_SYNC_SIZE. It must be a default integer value. + Every element of this array must be initialized with the value + \_SHMEM_SYNC_VALUE (in C/C++) or SHMEM_SYNC_VALUE (in Fortran) before + any of the PEs in the active set enter the reduction routine. + +The values of arguments nreduce, PE_start, logPE_stride, and PE_size +must be equal on all PEs in the active set. The same target and source +arrays, and the same pWrk and pSync work arrays, must be passed to all +PEs in the active set. + +Before any PE calls a reduction routine, you must ensure that the +following conditions exist (synchronization via a barrier or some other +method is often needed to ensure this): The pWrk and pSync arrays on all +PEs in the active set are not still in use from a prior call to a +collective SHMEM routine. The target array on all PEs in the active set +is ready to accept the results of the reduction. + +Upon return from a reduction routine, the following are true for the +local PE: The target array is updated. The values in the pSync array are +restored to the original values. + + +NOTES +----- + +The terms collective, symmetric, and cache aligned are defined in +*intro_shmem*\ (3). + +All SHMEM reduction routines reset the values in pSync before they +return, so a particular pSync buffer need only be initialized the first +time it is used. + +You must ensure that the pSync array is not being updated on any PE in +the active set while any of the PEs participate in processing of a SHMEM +reduction routine. Be careful of the following situations: If the pSync +array is initialized at run time, some type of synchronization is needed +to ensure that all PEs in the working set have initialized pSync before +any of them enter a SHMEM routine called with the pSync synchronization +array. A pSync or pWrk array can be reused in a subsequent reduction +routine call only if none of the PEs in the active set are still +processing a prior reduction routine call that used the same pSync or +pWrk arrays. In general, this can be assured only by doing some type of +synchronization. However, in the special case of reduction routines +being called with the same active set, you can allocate two pSync and +pWrk arrays and alternate between them on successive calls. + + +EXAMPLES +-------- + +**Example 1:** This Fortran example statically initializes the pSync +array and finds the sum of the real variable FOO across all even PEs. + +:: + + INCLUDE "mpp/shmem.fh" + + INTEGER PSYNC(SHMEM_REDUCE_SYNC_SIZE) + DATA PSYNC /SHMEM_REDUCE_SYNC_SIZE*SHMEM_SYNC_VALUE/ + PARAMETER (NR=1) + REAL FOO, FOOSUM, PWRK(MAX(NR/2+1,SHMEM_REDUCE_MIN_WRKDATA_SIZE)) + COMMON /COM/ FOO, FOOSUM, PWRK + INTRINSIC MY_PE + + IF ( MOD(MY_PE(),2) .EQ. 0) THEN + CALL SHMEM_INT4_SUM_TO_ALL(FOOSUM, FOO, NR, 0, 1, N$PES/2, + & PWRK, PSYNC) + PRINT *, 'Result on PE ', MY_PE(), ' is ', FOOSUM + ENDIF + +**Example 2:** Consider the following C/C++ call: + +.. code-block:: c++ + + shmem_int_sum_to_all( target, source, 3, 0, 0, 8, pwrk, psync ); + +The preceding call is more efficient, but semantically equivalent to, +the combination of the following calls: + +:: + + shmem_int_sum_to_all(&(target[0]), &(source[0]), 1, 0, 0, 8, + pwrk1, psync1); + shmem_int_sum_to_all(&(target[1]), &(source[1]), 1, 0, 0, 8, + pwrk2, psync2); + shmem_int_sum_to_all(&(target[2]), &(source[2]), 1, 0, 0, 8, + pwrk1, psync1); + + Note that two sets of pWrk and pSync arrays are used alternately because no + synchronization is done between calls. + + +.. seealso:: + *intro_shmem*\ (3) diff --git a/docs/man-openshmem/man3/shmem_short_wait.3.rst b/docs/man-openshmem/man3/shmem_short_wait.3.rst new file mode 100644 index 00000000000..3235556f0f4 --- /dev/null +++ b/docs/man-openshmem/man3/shmem_short_wait.3.rst @@ -0,0 +1,9 @@ +.. _shmem_short_wait: + +shmem_short_wait +================ + .. include_body + +.. include:: ../man3/shmem_wait.3.rst + :start-after: .. include_body + diff --git a/docs/man-openshmem/man3/shmem_short_wait_until.3.rst b/docs/man-openshmem/man3/shmem_short_wait_until.3.rst new file mode 100644 index 00000000000..cf1654cfa75 --- /dev/null +++ b/docs/man-openshmem/man3/shmem_short_wait_until.3.rst @@ -0,0 +1,9 @@ +.. _shmem_short_wait_until: + +shmem_short_wait_until +====================== + .. include_body + +.. include:: ../man3/shmem_wait.3.rst + :start-after: .. include_body + diff --git a/docs/man-openshmem/man3/shmem_short_xor_to_all.3.rst b/docs/man-openshmem/man3/shmem_short_xor_to_all.3.rst new file mode 100644 index 00000000000..8b4c47b6ccd --- /dev/null +++ b/docs/man-openshmem/man3/shmem_short_xor_to_all.3.rst @@ -0,0 +1,230 @@ +.. _shmem_short_xor_to_all: + + +shmem_short_xor_to_all +====================== + +.. include_body + +shmem_comp4_xor_to_all\ (3), :ref:`shmem_int_xor_to_all`\ (3), +shmem_int4_xor_to_all\ (3), shmem_int8_xor_to_all\ (3), +:ref:`shmem_long_xor_to_all`\ (3), :ref:`shmem_longlong_xor_to_all`\ (3), +:ref:`shmem_short_xor_to_all`\ (3) - Performs a bitwise XOR operation on +symmetric arrays over the active set of PEs. + + +SYNOPSIS +-------- + +C or C++: + +.. code-block:: c++ + + #include + + void shmem_int_xor_to_all(int *target, const int *source, + int nreduce, int PE_start, int logPE_stride, int PE_size, + int *pWrk, long *pSync) + + void shmem_long_xor_to_all(long *target, const long *source, + int nreduce, int PE_start, int logPE_stride, int PE_size, + long *pWrk, long *pSync) + + void shmem_longlong_xor_to_all(long long *target, + const long long *source, int nreduce, int PE_start, int logPE_stride, + int PE_size, long long *pWrk, long *pSync) + + void shmem_short_xor_to_all(short *target, const short *source, + int nreduce, int PE_start, int logPE_stride, int PE_size, + short *pWrk, long *pSync) + +Fortran: + +.. code-block:: fortran + + INCLUDE "mpp/shmem.fh" + + INTEGER pSync(SHMEM_REDUCE_SYNC_SIZE) + INTEGER nreduce, PE_start, logPE_stride, PE_size + + CALL SHMEM_COMP4_XOR_TO_ALL(target, source, nreduce, + & PE_start, logPE_stride, PE_size, pWrk, pSync) + + CALL SHMEM_INT4_XOR_TO_ALL(target, source, nreduce, + & PE_start, logPE_stride, PE_size, pWrk, pSync) + + CALL SHMEM_INT8_XOR_TO_ALL(target, source, nreduce, + & PE_start, logPE_stride, PE_size, pWrk, pSync) + + +DESCRIPTION +----------- + +The shared memory (SHMEM) reduction routines compute one or more +reductions across symmetric arrays on multiple virtual PEs. A reduction +performs an associative binary operation across a set of values. For a +list of other SHMEM reduction routines, see *intro_shmem*\ (3). + +As with all SHMEM collective routines, each of these routines assumes +that only PEs in the active set call the routine. If a PE not in the +active set calls a SHMEM collective routine, undefined behavior results. + +The nreduce argument determines the number of separate reductions to +perform. The source array on all PEs in the active set provides one +element for each reduction. The results of the reductions are placed in +the target array on all PEs in the active set. The active set is defined +by the PE_start, logPE_stride, PE_size triplet. + +The source and target arrays may be the same array, but they may not be +overlapping arrays. + +The arguments are as follows: + +target + A symmetric array of length nreduce elements to receive the results + of the reduction operations. The data type of target varies with the + version of the reduction routine being called and the language used. + When calling from C/C++, refer to the SYNOPSIS section for data type + information. When calling from Fortran, the target data types are as + follows: + + shmem_comp8_xor_to_all:** Complex, with an element size equal to two 8- + byte real values + + shmem_comp4_xor_to_all:** Complex, with an element size equal to two 4- + byte real values + + shmem_int8_xor_to_all:** Integer, with an element size of 8 bytes + + shmem_int4_xor_to_all:** Integer, with an element size of 4 bytes + + shmem_real8_xor_to_all:** Real, with an element size of 8 bytes + + shmem_real4_xor_to_all:** Real, with an element size of 4 bytes + +source + A symmetric array, of length nreduce elements, that contains one + element for each separate reduction operation. The source argument + must have the same data type as target. + +nreduce + The number of elements in the target and source arrays. nreduce must + be of type integer. If you are using Fortran, it must be a default + integer value. + +PE_start + The lowest virtual PE number of the active set of PEs. PE_start must + be of type integer. If you are using Fortran, it must be a default + integer value. + +logPE_stride + The log (base 2) of the stride between consecutive virtual PE numbers + in the active set. logPE_stride must be of type integer. If you are + using Fortran, it must be a default integer value. + +PE_size + The number of PEs in the active set. PE_size must be of type integer. + If you are using Fortran, it must be a default integer value. + +pWrk + A symmetric work array. The pWrk argument must have the same data + type as target. In C/C++, this contains max(nreduce/2 + 1, + \_SHMEM_REDUCE_MIN_WRKDATA_SIZE) elements. In Fortran, this contains + max(nreduce/2 + 1, SHMEM_REDUCE_MIN_WRKDATA_SIZE) elements. + +pSync + A symmetric work array. In C/C++, pSync is of type long and size + \_SHMEM_REDUCE_SYNC_SIZE. In Fortran, pSync is of type integer and + size SHMEM_REDUCE_SYNC_SIZE. If you are using Fortran, it must be a + default integer value. Every element of this array must be + initialized with the value \_SHMEM_SYNC_VALUE (in C/C++) or + SHMEM_SYNC_VALUE (in Fortran) before any of the PEs in the active set + enter the reduction routine. + +The values of arguments nreduce, PE_start, logPE_stride, and PE_size +must be equal on all PEs in the active set. The same target and source +arrays, and the same pWrk and pSync work arrays, must be passed to all +PEs in the active set. + +Before any PE calls a reduction routine, you must ensure that the +following conditions exist (synchronization via a barrier or some other +method is often needed to ensure this): The pWrk and pSync arrays on all +PEs in the active set are not still in use from a prior call to a +collective SHMEM routine. The target array on all PEs in the active set +is ready to accept the results of the reduction. + +Upon return from a reduction routine, the following are true for the +local PE: The target array is updated. The values in the pSync array are +restored to the original values. + + +NOTES +----- + +The terms collective, symmetric, and cache aligned are defined in +*intro_shmem*\ (3). All SHMEM reduction routines reset the values in +pSync before they return, so a particular pSync buffer need only be +initialized the first time it is used. + +You must ensure that the pSync array is not being updated on any PE in +the active set while any of the PEs participate in processing of a SHMEM +reduction routine. Be careful of the following situations: If the pSync +array is initialized at run time, some type of synchronization is needed +to ensure that all PEs in the working set have initialized pSync before +any of them enter a SHMEM routine called with the pSync synchronization +array. A pSync or pWrk array can be reused in a subsequent reduction +routine call only if none of the PEs in the active set are still +processing a prior reduction routine call that used the same pSync or +pWrk arrays. In general, this can be assured only by doing some type of +synchronization. However, in the special case of reduction routines +being called with the same active set, you can allocate two pSync and +pWrk arrays and alternate between them on successive calls. + + +EXAMPLES +-------- + +**Example 1:** This Fortran example statically initializes the pSync +array and computes the exclusive OR of variable FOO across all even PEs. + +:: + + INCLUDE "mpp/shmem.fh" + + INTEGER PSYNC(SHMEM_REDUCE_SYNC_SIZE) + DATA PSYNC /SHMEM_REDUCE_SYNC_SIZE*SHMEM_SYNC_VALUE/ + PARAMETER (NR=1) + REAL FOO, FOOXOR, PWRK(MAX(NR/2+1,SHMEM_REDUCE_MIN_WRKDATA_SIZE)) + COMMON /COM/ FOO, FOOXOR, PWRK + INTRINSIC MY_PE + + IF ( MOD(MY_PE(),2) .EQ. 0) THEN + CALL SHMEM_REAL8_XOR_TO_ALL(FOOXOR, FOO, NR, 0, 1, N$PES/2, + & PWRK, PSYNC) + PRINT *, 'Result on PE ', MY_PE(), ' is ', FOOXOR + ENDIF + +**Example 2:** Consider the following C/C++ call: + +.. code-block:: c++ + + shmem_short_xor_to_all( target, source, 3, 0, 0, 8, pwrk, psync ); + +The preceding call is more efficient, but semantically equivalent to, +the combination of the following calls: + +:: + + shmem_short_xor_to_all(&(target[0]), &(source[0]), 1, 0, 0, 8, + pwrk1, psync1); + shmem_short_xor_to_all(&(target[1]), &(source[1]), 1, 0, 0, 8, + pwrk2, psync2); + shmem_short_xor_to_all(&(target[2]), &(source[2]), 1, 0, 0, 8, + pwrk1, psync1); + +Note that two sets of pWrk and pSync arrays are used alternately because +no synchronization is done between calls. + + +.. seealso:: + *intro_shmem*\ (3) diff --git a/docs/man-openshmem/man3/shmem_swap.3.rst b/docs/man-openshmem/man3/shmem_swap.3.rst new file mode 100644 index 00000000000..d273eae4964 --- /dev/null +++ b/docs/man-openshmem/man3/shmem_swap.3.rst @@ -0,0 +1,111 @@ +.. _shmem_swap: + + +shmem_swap +========== + +.. include_body + +:ref:`shmem_double_swap`\ (3), :ref:`shmem_float_swap`\ (3), +:ref:`shmem_int_swap`\ (3), :ref:`shmem_long_swap`\ (3), :ref:`shmem_swap`\ (3), +shmem_int4_swap\ (3), shmem_int8_swap\ (3), shmem_real4_swap\ (3), +shmem_real8_swap\ (3), :ref:`shmem_longlong_swap`\ (3) - Performs an atomic +swap to a remote data object + + +SYNOPSIS +-------- + +C or C++: + +.. code-block:: c++ + + #include + + double shmem_double_swap(double *target, double value, + int pe) + + float shmem_float_swap(float *target, float value, int pe) + + int shmem_int_swap(int *target, int value, int pe) + + long shmem_long_swap(long *target, long value, int pe) + + long long shmem_longlong_swap(long long *target, + long long value, int pe) + + long shmem_swap(long *target, long value, int pe) + +Fortran: + +.. code-block:: fortran + + INCLUDE "mpp/shmem.fh" + + INTEGER pe + + INTEGER SHMEM_SWAP + ires = SHMEM_SWAP(target, value, pe) + + INTEGER(KIND=4) SHMEM_INT4_SWAP + ires = SHMEM_INT4_SWAP(target, value, pe) + + INTEGER(KIND=8) SHMEM_INT8_SWAP + ires = SHMEM_INT8_SWAP(target, value, pe) + + REAL(KIND=4) SHMEM_REAL4_SWAP + res = SHMEM_REAL4_SWAP(target, value, pe) + + REAL(KIND=8) SHMEM_REAL8_SWAP + res = SHMEM_REAL8_SWAP(target, value, pe) + + +DESCRIPTION +----------- + +The atomic swap routines write **value** to address target on PE **pe**, +and return the previous contents of **target** in one atomic operation. + +The arguments are as follows: + +target + The remotely accessible integer data object to be updated on the + remote PE. If you are using C/C++, the type of target should match + that implied in the SYNOPSIS section. If you are using Fortran, it + must be of the following type: + + **SHMEM_SWAP:** Integer of default kind + + **SHMEM_INT4_SWAP:** 4-byte integer + + **SHMEM_INT8_SWAP:** 8-byte integer + + **SHMEM_REAL4_SWAP:** 4-byte real + + **SHMEM_REAL8_SWAP:** 8-byte real + +value + Value to be atomically written to the remote PE. value is the same + type as target. + +pe + An integer that indicates the PE number on which target is to be + updated. If you are using Fortran, it must be a default integer + value. + + +NOTES +----- + +The term remotely accessible is defined in *intro_shmem*\ (3). + + +RETURN VALUES +------------- + +The contents that had been at the target address on the remote PE prior +to the swap is returned. + + +.. seealso:: + *intro_shmem*\ (3) diff --git a/docs/man-openshmem/man3/shmem_test_lock.3.rst b/docs/man-openshmem/man3/shmem_test_lock.3.rst new file mode 100644 index 00000000000..1c4dd8a6a98 --- /dev/null +++ b/docs/man-openshmem/man3/shmem_test_lock.3.rst @@ -0,0 +1,9 @@ +.. _shmem_test_lock: + +shmem_test_lock +=============== + .. include_body + +.. include:: ../man3/shmem_set_lock.3.rst + :start-after: .. include_body + diff --git a/docs/man-openshmem/man3/shmem_udcflush.3.rst b/docs/man-openshmem/man3/shmem_udcflush.3.rst new file mode 100644 index 00000000000..5d79b8dc873 --- /dev/null +++ b/docs/man-openshmem/man3/shmem_udcflush.3.rst @@ -0,0 +1,84 @@ +.. _shmem_udcflush: + + +shmem_udcflush +============== + +.. include_body + +:ref:`shmem_clear_cache_inv`\ (3), :ref:`shmem_set_cache_inv`\ (3), +:ref:`shmem_set_cache_line_inv`\ (3), :ref:`shmem_udcflush`\ (3), +:ref:`shmem_udcflush_line`\ (3) - Controls data cache utilities + + +SYNOPSIS +-------- + +C or C++: + +.. code-block:: c++ + + #include + + void shmem_clear_cache_inv(void) + void shmem_clear_cache_line_inv(void *target) + void shmem_set_cache_inv(void) + void shmem_set_cache_line_inv(void *target) + void shmem_udcflush(void) + void shmem_udcflush_line(void *target) + +Fortran: + +.. code-block:: fortran + + INCLUDE "mpp/shmem.fh" + + CALL SHMEM_CLEAR_CACHE_INV + CALL SHMEM_CLEAR_CACHE_LINE_INV(target) + CALL SHMEM_SET_CACHE_INV + CALL SHMEM_SET_CACHE_LINE_INV(target) + + CALL SHMEM_UDCFLUSH + CALL SHMEM_UDCFLUSH_LINE(target) + + +DESCRIPTION +----------- + +The following argument is passed to the cache line control routines: + +target + A data object that is local to the processing element (PE). target + can be of any noncharacter type. If you are using Fortran, it can be + of any kind. + +:ref:`shmem_clear_cache_inv` disables automatic cache coherency mode +previously enabled by :ref:`shmem_set_cache_inv` or :ref:`shmem_set_cache_line_inv`. + +:ref:`shmem_clear_cache_line_inv` disables automatic cache coherency mode +for the cache line associated with the address of **target** only. + +:ref:`shmem_set_cache_inv` enables the OpenSHMEM API to automatically +decide the best strategy for cache coherency. + +:ref:`shmem_set_cache_line_inv` enables automatic cache coherency mode for +the cache line associated with the address of **target** only. + +:ref:`shmem_clear_cache_inv` disables automatic cache coherency mode +previously enabled by :ref:`shmem_set_cache_inv` or :ref:`shmem_set_cache_line_inv`. + +:ref:`shmem_udcflush` makes the entire user data cache coherent. + +:ref:`shmem_udcflush_line` makes coherent the cache line that corresponds +with the address specified by target. + + +NOTES +----- + +These routines have been retained for improved backward compatability +with legacy architectures. + + +.. seealso:: + *intro_shmem*\ (3) *shmem_put*\ (3) *shmem_swap*\ (3) diff --git a/docs/man-openshmem/man3/shmem_udcflush_line.3.rst b/docs/man-openshmem/man3/shmem_udcflush_line.3.rst new file mode 100644 index 00000000000..60d60775092 --- /dev/null +++ b/docs/man-openshmem/man3/shmem_udcflush_line.3.rst @@ -0,0 +1,9 @@ +.. _shmem_udcflush_line: + +shmem_udcflush_line +=================== + .. include_body + +.. include:: ../man3/shmem_udcflush.3.rst + :start-after: .. include_body + diff --git a/docs/man-openshmem/man3/shmem_wait.3.rst b/docs/man-openshmem/man3/shmem_wait.3.rst new file mode 100644 index 00000000000..5b5fd861e65 --- /dev/null +++ b/docs/man-openshmem/man3/shmem_wait.3.rst @@ -0,0 +1,201 @@ +.. _shmem_wait: + + +shmem_wait +========== + +.. include_body + +:ref:`shmem_int_wait`\ (3), :ref:`shmem_int_wait`\ (3)_until, +shmem_int4_wait\ (3), shmem_int4_wait\ (3)_until, +shmem_int8_wait\ (3), shmem_int8_wait\ (3)_until, +:ref:`shmem_long_wait`\ (3), :ref:`shmem_long_wait`\ (3)_until, +:ref:`shmem_longlong_wait`\ (3), :ref:`shmem_longlong_wait`\ (3)_until, +:ref:`shmem_short_wait`\ (3), :ref:`shmem_short_wait`\ (3)_until, +:ref:`shmem_wait`\ (3), :ref:`shmem_wait`\ (3)_until - Waits for a variable on the +local processing element (PE) to change + + +SYNOPSIS +-------- + +C or C++: + +.. code-block:: c++ + + #include + + void shmem_int_wait(volatile int *var, int value) + + void shmem_int_wait_until(volatile int *var, int cond, int value) + + void shmem_long_wait(volatile long *var, long value) + + void shmem_long_wait_until(volatile long *var, int cond, long value) + + void shmem_longlong_wait(volatile long long *var, long long value) + + void shmem_longlong_wait_until(volatile long long *var, int cond, + long long value) + + void shmem_short_wait(volatile short *var, short value) + + void shmem_short_wait_until(volatile short *var, int cond, + short value) + + void shmem_wait(volatile long *ivar, long cmp_value) + + void shmem_wait_until(volatile long *ivar, int cmp, long value) + +Fortran: + +.. code-block:: fortran + + INCLUDE "mpp/shmem.fh" + + CALL SHMEM_INT4_WAIT(ivar, cmp_value) + + CALL SHMEM_INT4_WAIT_UNTIL(ivar, cmp, cmp_value) + + CALL SHMEM_INT8_WAIT(ivar, cmp_value) + + CALL SHMEM_INT8_WAIT_UNTIL(ivar, cmp, cmp_value) + + CALL SHMEM_WAIT(ivar, cmp_value) + + CALL SHMEM_WAIT_UNTIL(ivar, cmp, cmp_value) + + +DESCRIPTION +----------- + +:ref:`shmem_wait` and :ref:`shmem_wait_until` wait for **ivar** to be changed by a +remote write or atomic swap issued by a different processor. These +routines can be used for point-to- point directed synchronization. A +call to :ref:`shmem_wait` does not return until some other processor writes a +value, not equal to cmp_value, into **ivar** on the waiting processor. A +call to :ref:`shmem_wait_until` does not return until some other processor +changes **ivar** to satisfy the condition implied by cmp and cmp_value. +This mechanism is useful when a processor needs to tell another +processor that it has completed some action. + +The arguments are as follows: + +target + The remotely accessible integer data object to be updated on the + remote PE. If you are using C/C++, the type of target should match + that implied in the SYNOPSIS section. If you are using the Fortran + compiler, it must be of type integer with an element size of 4 bytes + for SHMEM_INT4_ADD and 8 bytes for SHMEM_INT8_ADD. + +value + The value to be atomically added to target. If you are using C/C++, + the type of value should match that implied in the SYNOPSIS section. + If you are using Fortran, it must be of type integer with an element + size of target. + +pe + An integer that indicates the PE number upon which target is to be + updated. If you are using Fortran, it must be a default integer + value. + +ivar + A remotely accessible integer variable that is being updated by + another PE. If you are using C/C++, the type of ivar should match + that implied in the SYNOPSIS section. If you are using Fortran, ivar + must be a specific sized integer type according to the function being + called, as follows: + + :ref:`shmem_wait`, :ref:`shmem_wait_until`:** default INTEGER + + shmem_int4_wait, shmem_int4_wait_until:** INTEGER*4 + + shmem_int8_wait, shmem_int8_wait_until:** INTEGER*8 + +cmp + The compare operator that compares ivar with cmp_value. cmp must be + of type integer. If you are using Fortran, it must be of default + kind. If you are using C/C++, the type of cmp should match that + implied in the SYNOPSIS section. The following cmp values are + supported: + + SHMEM_CMP_EQ + Equal + + SHMEM_CMP_NE + Not equal + + SHMEM_CMP_GT + Greater than + + SHMEM_CMP_LE + Less than or equal to + + SHMEM_CMP_LT + Less than + + SHMEM_CMP_GE + Greater than or equal to + +cmp_value + cmp_value must be of type integer. If you are using C/C++, the type + of cmp_value should match thatimplied in the SYNOPSIS section. If you + are using Fortran, cmp_value must be an integer of the same size and + kind as ivar. The :ref:`shmem_wait` routines return when ivar is no longer + equal to cmp_value. The :ref:`shmem_wait_until` routines return when the + compare condition is true. The compare condition is defined by the + ivar argument compared with the cmp_value using the comparison + operator, cmp. + + +EXAMPLES +-------- + +**Example 1:** The following call returns when variable ivar is not +equal to 100: + +:: + + INTEGER*8 IVAR + + CALL SHMEM_INT8_WAIT(IVAR, INT8(100)) + +**Example 2:** The following call to SHMEM_INT8_WAIT_UNTIL is equivalent +to the call to SHMEM_INT8_WAIT in example 1: + +:: + + INTEGER*8 IVAR + + CALL SHMEM_INT8_WAIT_UNTIL(IVAR, SHMEM_CMP_NE, INT8(100)) + +**Example 3:** The following C/C++ call waits until the sign bit in ivar +is set by a transfer from a remote PE: + +:: + + int ivar; + + shmem_int_wait_until(&ivar, SHMEM_CMP_LT, 0); + +**Example 4:** The following Fortran example is in the context of a +subroutine: + +:: + + SUBROUTINE EXAMPLE() + INTEGER FLAG_VAR + COMMON/FLAG/FLAG_VAR + . . . + FLAG_VAR = FLAG_VALUE ! initialize the event variable + . . . + IF (FLAG_VAR .EQ. FLAG_VALUE) THEN + CALL SHMEM_WAIT(FLAG_VAR, FLAG_VALUE) + ENDIF + FLAG_VAR = FLAG_VALUE ! reset the event variable for next time + . . . + END + + +.. seealso:: + *intro_shmem*\ (3) *shmem_put*\ (3) diff --git a/docs/man-openshmem/man3/shmem_wait_until.3.rst b/docs/man-openshmem/man3/shmem_wait_until.3.rst new file mode 100644 index 00000000000..8cb089fa32a --- /dev/null +++ b/docs/man-openshmem/man3/shmem_wait_until.3.rst @@ -0,0 +1,9 @@ +.. _shmem_wait_until: + +shmem_wait_until +================ + .. include_body + +.. include:: ../man3/shmem_wait.3.rst + :start-after: .. include_body + diff --git a/docs/man-openshmem/man3/shmemalign.3.rst b/docs/man-openshmem/man3/shmemalign.3.rst new file mode 100644 index 00000000000..3d38c6bbc34 --- /dev/null +++ b/docs/man-openshmem/man3/shmemalign.3.rst @@ -0,0 +1,9 @@ +.. _shmemalign: + +shmemalign +========== + .. include_body + +.. include:: ../man3/shmem_malloc.3.rst + :start-after: .. include_body + diff --git a/docs/man-openshmem/man3/shrealloc.3.rst b/docs/man-openshmem/man3/shrealloc.3.rst new file mode 100644 index 00000000000..c95c9de3fde --- /dev/null +++ b/docs/man-openshmem/man3/shrealloc.3.rst @@ -0,0 +1,9 @@ +.. _shrealloc: + +shrealloc +========= + .. include_body + +.. include:: ../man3/shmem_malloc.3.rst + :start-after: .. include_body + diff --git a/docs/man-openshmem/man3/start_pes.3.rst b/docs/man-openshmem/man3/start_pes.3.rst new file mode 100644 index 00000000000..b1624e28a96 --- /dev/null +++ b/docs/man-openshmem/man3/start_pes.3.rst @@ -0,0 +1,9 @@ +.. _start_pes: + +start_pes +========= + .. include_body + +.. include:: ../man3/shmem_init.3.rst + :start-after: .. include_body + diff --git a/docs/networking/cuda.rst b/docs/networking/cuda.rst new file mode 100644 index 00000000000..4b4869f47e9 --- /dev/null +++ b/docs/networking/cuda.rst @@ -0,0 +1,690 @@ +CUDA +==== + +How do I build Open MPI with CUDA-aware support? +------------------------------------------------ + +CUDA-aware support means that the MPI library can send and receive GPU +buffers directly. CUDA support is being continuously updated so +different levels of support exist in different versions. We recommend +you use the latest version of Open MPI for best support. + +Open MPI offers two flavors of CUDA support: + +#. Via `UCX `_. + + This is the preferred mechanism. Since UCX will be providing the + CUDA support, it is important to ensure that UCX itself is built + with CUDA support. + + To see if your ucx was built with CUDA support run the following + command: + + .. code-block:: sh + + # Check if ucx was built with CUDA support + shell$ ucx_info -v + + # configured with: --build=powerpc64le-redhat-linux-gnu --host=powerpc64le-redhat-linux-gnu --program-prefix= --disable-dependency-tracking --prefix=/usr --exec-prefix=/usr --bindir=/usr/bin --sbindir=/usr/sbin --sysconfdir=/etc --datadir=/usr/share --includedir=/usr/include --libdir=/usr/lib64 --libexecdir=/usr/libexec --localstatedir=/var --sharedstatedir=/var/lib --mandir=/usr/share/man --infodir=/usr/share/info --disable-optimizations --disable-logging --disable-debug --disable-assertions --enable-mt --disable-params-check --enable-cma --without-cuda --without-gdrcopy --with-verbs --with-cm --with-knem --with-rdmacm --without-rocm --without-xpmem --without-ugni --without-java + + If you need to build ucx yourself to include CUDA support, please + see the UCX documentation for `building ucx with Open MPI: `_ + + It should look something like: + + .. code-block:: sh + + # Configure UCX this way + shell$ ./configure --prefix=/path/to/ucx-cuda-install --with-cuda=/usr/local/cuda --with-gdrcopy=/usr + + # Configure Open MPI this way + shell$ ./configure --with-cuda=/usr/local/cuda --with-ucx=/path/to/ucx-cuda-install + +#. Via internal Open MPI CUDA support + +Regardless of which flavor of CUDA support (or both) you plan to use, +Open MPI should be conigured using the ``--with-cuda=`` +configure option to build CUDA support into Open MPI. + +This affects the smcuda shared memory btl, as well as the uct btl. + +///////////////////////////////////////////////////////////////////////// + +How do I verify that Open MPI has been built with CUDA support? +--------------------------------------------------------------- + +Verify that Open MPI has been built with cuda using ``ompi_info`` + +.. code-block:: sh + + # Use ompi_info to verify cuda support in Open MPI + shell$ ./ompi_info |grep "MPI extensions" + MPI extensions: affinity, cuda, pcollreq + +///////////////////////////////////////////////////////////////////////// + +How do I run Open MPI with applications that pass CUDA buffers to MPI? +---------------------------------------------------------------------- + +Open MPI will detect and enable CUDA enabled components at runtime with +no additional mpirun parameters. + +///////////////////////////////////////////////////////////////////////// + +How do I build Open MPI with CUDA-aware support using PGI? +---------------------------------------------------------- + +With CUDA 6.5, you can build all versions of CUDA-aware Open MPI +without doing anything special. However, with CUDA 7.0 and CUDA 7.5, +you need to pass in some specific compiler flags for things to work +correctly. Add the following to your configure line. + +.. code-block:: sh + + # For PGI 15.9 and later (Also called NVCC): + shell$ ./configure --with-wrapper-cflags=-ta:tesla + + # For earlier versions of PGI: + shell$ ./configure CFLAGS=-D__LP64__ --with-wrapper-cflags="-D__LP64__ -ta:tesla" + +///////////////////////////////////////////////////////////////////////// + +What kind of CUDA support exists in Open MPI? +--------------------------------------------- + +CUDA-aware support is defined as Open MPI automatically detecting that +the argument pointer being passed to an MPI routine is a CUDA device +memory pointer. + +See :ref:`this FAQ entry ` +for more details on which APIs are CUDA-aware. + + +.. error:: CUDA 4.0 is SUPER OLD! End users dont care about the + differences between cuda-aware, cuda-ipc, gpu-direct, and gpu-direct-rdma + +Open MPI depends on various features of CUDA 4.0, so one needs to have +at least the CUDA 4.0 driver and toolkit. The new features of +interest are the Unified Virtual Addressing (UVA) so that all pointers +within a program have unique addresses. In addition, there is a new +API that allows one to determine if a pointer is a CUDA device pointer +or host memory pointer. This API is used by the library to decide +what needs to be done with each buffer. In addition, CUDA 4.1 also +provides the ability to register host memory with the CUDA driver, +which can improve performance. CUDA 4.1 also added CUDA IPC support +for fast communication between GPUs on the same node. + +Note that derived datatypes |mdash| both contiguous and non-contiguous +|mdash| are supported. However, the non-contiguous datatypes +currently have high overhead because of the many calls to the CUDA +function ``cuMemcpy()`` to copy all the pieces of the buffer into the +intermediate buffer. + +CUDA-aware support is available in: + +* The UCX (``ucx``) PML +* The PSM2 (``psm2``) MTL with the CM (``cm``) PML. +* Both CUDA-ized shared memory (``smcuda``) and TCP (``tcp``) BTLs + with the OB1 (``ob1``) PML. +* The HCOLL (``hcoll``) COLL + +///////////////////////////////////////////////////////////////////////// + +PSM2 support for CUDA +--------------------- + +CUDA-aware support is present in PSM2 MTL. When running CUDA-aware +Open MPI on Intel Omni-path, the PSM2 MTL will automatically set +``PSM2_CUDA`` environment variable which enables PSM2 to handle GPU +buffers. If the user wants to use host buffers with a CUDA-aware Open +MPI, it is recommended to set ``PSM2_CUDA`` to ``0`` in the execution +environment. PSM2 also has support for the NVIDIA GPUDirect support +feature. To enable this, users will need to set ``PSM2_GPUDIRECT`` +to ``1`` in the execution environment. + +Note: The PSM2 library and ``hfi1`` driver with CUDA support are +requirements to use GPUDirect support on Intel Omni-Path. The minimum +PSM2 build version required is `PSM2 10.2.175 +`_. + +For more information refer to the `Intel Omni-Path documentation +`_. + +///////////////////////////////////////////////////////////////////////// + +How can I tell if Open MPI was built with CUDA support? +------------------------------------------------------- + +Use the ``ompi_info`` command: + +.. code-block:: + + shell$ ompi_info --parsable --all | grep mpi_built_with_cuda_support:value + mca:mpi:base:param:mpi_built_with_cuda_support:value:true + +///////////////////////////////////////////////////////////////////////// + +Can I get additional CUDA debug-level information at run-time? +-------------------------------------------------------------- + +Yes, by enabling some vebosity flags. + +* The ``opal_cuda_verbose`` parameter has only one level of verbosity: + + .. code-block:: + + shell$ mpirun --mca opal_cuda_verbose 10 ... + + +* The ``mpi_common_cuda_verbose`` parameter provides additional + information about CUDA-aware related activities. This can be set to + a variety of different values. There is really no need to use these + unless you have strange problems: + + .. code-block:: sh + + # A bunch of CUDA debug information + shell$ mpirun --mca mpi_common_cuda_verbose 10 ... + # Even more CUDA debug information + shell$ mpirun --mca mpi_common_cuda_verbose 20 ... + # Yet more CUDA debug information + shell$ mpirun --mca mpi_common_cuda_verbose 100 ... + +* The ``smcuda`` BTL has three MCA parameters related to the use of + CUDA IPC. By default, CUDA IPC is used where possible. But the + user can now turn it off if they prefer. + + .. code-block:: sh + + shell$ mpirun --mca btl_smcuda_use_cuda_ipc 0 ... + + In addition, it is assumed that CUDA IPC is possible when running on + the same GPU, and this is typically true. However, there is the + ability to turn it off. + + .. code-block:: sh + + shell$ mpirun --mca btl_smcuda_use_cuda_ipc_same_gpu 0 ... + + Last, to get some insight into whether CUDA IPC is being used, you + can turn on some verbosity that shows whether CUDA IPC gets enabled + between two GPUs. + + .. code-block:: sh + + shell$ mpirun --mca btl_smcuda_cuda_ipc_verbose 100 ... + +///////////////////////////////////////////////////////////////////////// + +.. _faq-cuda-mpi-cuda-numa-issues-label: + +NUMA Node Issues +---------------- + +When running on a node that has multiple GPUs, you may want to select +the GPU that is closest to the NUMA node on which your process is +running. One way to do this is to make use of the ``hwloc`` library. +The following is a C code snippet that can be used in your application +to select a GPU that is close. It will determine on which CPU it is +running and then look for the closest GPU. There could be multiple +GPUs that are the same distance away. This is dependent on having +``hwloc`` somewhere on your system. + +.. code-block:: c + + /** + * Test program to show the use of hwloc to select the GPU closest to the CPU + * that the MPI program is running on. Note that this works even without + * any libpciacces or libpci support as it keys off the NVIDIA vendor ID. + * There may be other ways to implement this but this is one way. + * January 10, 2014 + */ + #include + #include + #include "cuda.h" + #include "mpi.h" + #include "hwloc.h" + + #define ABORT_ON_ERROR(func) \ + { CUresult res; \ + res = func; \ + if (CUDA_SUCCESS != res) { \ + printf("%s returned error=%d\n", #func, res); \ + abort(); \ + } \ + } + static hwloc_topology_t topology = NULL; + static int gpuIndex = 0; + static hwloc_obj_t gpus[16] = {0}; + + /** + * This function searches for all the GPUs that are hanging off a NUMA + * node. It walks through each of the PCI devices and looks for ones + * with the NVIDIA vendor ID. It then stores them into an array. + * Note that there can be more than one GPU on the NUMA node. + */ + static void find_gpus(hwloc_topology_t topology, hwloc_obj_t parent, hwloc_obj_t child) { + hwloc_obj_t pcidev; + pcidev = hwloc_get_next_child(topology, parent, child); + if (NULL == pcidev) { + return; + } else if (0 != pcidev->arity) { + /* This device has children so need to look recursively at them */ + find_gpus(topology, pcidev, NULL); + find_gpus(topology, parent, pcidev); + } else { + if (pcidev->attr->pcidev.vendor_id == 0x10de) { + gpus[gpuIndex++] = pcidev; + } + find_gpus(topology, parent, pcidev); + } + } + + int main(int argc, char *argv[]) + { + int rank, retval, length; + char procname[MPI_MAX_PROCESSOR_NAME+1]; + const unsigned long flags = HWLOC_TOPOLOGY_FLAG_IO_DEVICES | HWLOC_TOPOLOGY_FLAG_IO_BRIDGES; + hwloc_cpuset_t newset; + hwloc_obj_t node, bridge; + char pciBusId[16]; + CUdevice dev; + char devName[256]; + + MPI_Init(&argc, &argv); + MPI_Comm_rank(MPI_COMM_WORLD, &rank); + if (MPI_SUCCESS != MPI_Get_processor_name(procname, &length)) { + strcpy(procname, "unknown"); + } + + /* Now decide which GPU to pick. This requires hwloc to work properly. + * We first see which CPU we are bound to, then try and find a GPU nearby. + */ + retval = hwloc_topology_init(&topology); + assert(retval == 0); + retval = hwloc_topology_set_flags(topology, flags); + assert(retval == 0); + retval = hwloc_topology_load(topology); + assert(retval == 0); + newset = hwloc_bitmap_alloc(); + retval = hwloc_get_last_cpu_location(topology, newset, 0); + assert(retval == 0); + + /* Get the object that contains the cpuset */ + node = hwloc_get_first_largest_obj_inside_cpuset(topology, newset); + + /* Climb up from that object until we find the HWLOC_OBJ_NODE */ + while (node->type != HWLOC_OBJ_NODE) { + node = node->parent; + } + + /* Now look for the HWLOC_OBJ_BRIDGE. All PCI busses hanging off the + * node will have one of these */ + bridge = hwloc_get_next_child(topology, node, NULL); + while (bridge->type != HWLOC_OBJ_BRIDGE) { + bridge = hwloc_get_next_child(topology, node, bridge); + } + + /* Now find all the GPUs on this NUMA node and put them into an array */ + find_gpus(topology, bridge, NULL); + + ABORT_ON_ERROR(cuInit(0)); + /* Now select the first GPU that we find */ + if (gpus[0] == 0) { + printf("No GPU found\n"); + } else { + sprintf(pciBusId, "%.2x:%.2x:%.2x.%x", gpus[0]->attr->pcidev.domain, gpus[0]->attr->pcidev.bus, + gpus[0]->attr->pcidev.dev, gpus[0]->attr->pcidev.func); + ABORT_ON_ERROR(cuDeviceGetByPCIBusId(&dev, pciBusId)); + ABORT_ON_ERROR(cuDeviceGetName(devName, 256, dev)); + printf("rank=%d (%s): Selected GPU=%s, name=%s\n", rank, procname, pciBusId, devName); + } + + MPI_Finalize(); + return 0; + } + +///////////////////////////////////////////////////////////////////////// + +How do I develop CUDA-aware Open MPI applications? +-------------------------------------------------- + +Developing CUDA-aware applications is a complex topic, and beyond the +scope of this document. CUDA-aware applications often have to take +machine-specific considerations into account, including the number of +GPUs installed on each node and how the GPUs are connected to the CPUs +and to each other. Often, when using a particular transport layer +(such as OPA/PSM2) there will be run-time decisions to make about +which CPU cores will be used with which GPUs. + +A good place to start is the `NVIDIA CUDA Toolkit Documentation +`_ including the `Programming Guide +`_ and the +`Best Practices Guide +`_. For +examples of how to write CUDA-aware MPI applications, the `NVIDIA +developers blog +`_ +offers examples and the `OSU Micro-Benchmarks +`_ offer an excellent +example of how to write CUDA-aware MPI applications. + +///////////////////////////////////////////////////////////////////////// + +.. _faq-cuda-mpi-apis-cuda-label: + +Which MPI APIs work with CUDA-aware? +------------------------------------ + +* MPI_Allgather +* MPI_Allgatherv +* MPI_Allreduce +* MPI_Alltoall +* MPI_Alltoallv +* MPI_Alltoallw +* MPI_Bcast +* MPI_Bsend +* MPI_Bsend_init +* MPI_Exscan +* MPI_Ibsend +* MPI_Irecv +* MPI_Isend +* MPI_Irsend +* MPI_Issend +* MPI_Gather +* MPI_Gatherv +* MPI_Get +* MPI_Put +* MPI_Rsend +* MPI_Rsend_init +* MPI_Recv +* MPI_Recv_init +* MPI_Reduce +* MPI_Reduce_scatter +* MPI_Reduce_scatter_block +* MPI_Scan +* MPI_Scatter +* MPI_Scatterv +* MPI_Send +* MPI_Send_init +* MPI_Sendrecv +* MPI_Ssend +* MPI_Ssend_init +* MPI_Win_create + +.. FIXME: We need to verify the above list. + +///////////////////////////////////////////////////////////////////////// + +Which MPI APIs do NOT work with CUDA-aware? +------------------------------------------- + +* MPI_Accumulate +* MPI_Compare_and_swap +* MPI_Fetch_and_op +* MPI_Get_Accumulate +* MPI_Iallgather +* MPI_Iallgatherv +* MPI_Iallreduce +* MPI_Ialltoall +* MPI_Ialltoallv +* MPI_Ialltoallw +* MPI_Ibcast +* MPI_Iexscan +* MPI_Rget +* MPI_Rput + +.. FIXME: We need to verify the above list. + +///////////////////////////////////////////////////////////////////////// + +How do I use CUDA-aware UCX for Open MPI? +----------------------------------------- + +Example of running ``osu_latency`` from the `OSU benchmarks +`_ with CUDA buffers +using Open MPI and UCX CUDA support: + +.. code-block:: + + shell$ mpirun -np 2 --mca pml ucx \ + -x UCX_TLS=rc,sm,cuda_copy,gdr_copy,cuda_ipc ./osu_latency D D + +///////////////////////////////////////////////////////////////////////// + +Which MPI APIs work with CUDA-aware UCX? +---------------------------------------- + +* MPI_Send +* MPI_Bsend +* MPI_Ssend +* MPI_Rsend +* MPI_Isend +* MPI_Ibsend +* MPI_Issend +* MPI_Irsend +* MPI_Send_init +* MPI_Bsend_init +* MPI_Ssend_init +* MPI_Rsend_init +* MPI_Recv +* MPI_Irecv +* MPI_Recv_init +* MPI_Sendrecv +* MPI_Bcast +* MPI_Gather +* MPI_Gatherv +* MPI_Allgather +* MPI_Reduce +* MPI_Reduce_scatter +* MPI_Reduce_scatter_block +* MPI_Allreduce +* MPI_Scan +* MPI_Exscan +* MPI_Allgatherv +* MPI_Alltoall +* MPI_Alltoallv +* MPI_Alltoallw +* MPI_Scatter +* MPI_Scatterv +* MPI_Iallgather +* MPI_Iallgatherv +* MPI_Ialltoall +* MPI_Iialltoallv +* MPI_Ialltoallw +* MPI_Ibcast +* MPI_Iexscan + +.. FIXME: We need to verify the above list. These _SHOULD_ be the same + as above. + +///////////////////////////////////////////////////////////////////////// + +Which MPI APIs do NOT work with CUDA-aware UCX? +----------------------------------------------- + +* All one-sided operations such as MPI_Put, MPI_Get, MPI_Accumulate, + MPI_Rget, MPI_Rput, MPI_Get_Accumulate, MPI_Fetch_and_op, + MPI_Compare_and_swap, etc +* All window creation calls such as MPI_Win_create +* All non-blocking reduction collectives like MPI_Ireduce, + MPI_Iallreduce, etc + +.. FIXME: Checking with nVidia. This may be more of an issue of OSC_UCX + not supporting CUDA, though perhaps it's just performance. + +///////////////////////////////////////////////////////////////////////// + +Can I tell at compile time or runtime whether I have CUDA-aware support? +------------------------------------------------------------------------ + +There is both a compile time check and a run-time check available. +You can use whichever is the most convenient for your program. To +access them, you need to include ``mpi-ext.h``. Note that +``mpi-ext.h`` is specific to Open MPI. The following program shows an +example of using the CUDA-aware macro and run-time check. + +.. code-block:: c + + /* + * Program that shows the use of CUDA-aware macro and runtime check. + */ + #include + #include "mpi.h" + + #if !defined(OPEN_MPI) || !OPEN_MPI + #error This source code uses an Open MPI-specific extension + #endif + + /* Needed for MPIX_Query_cuda_support(), below */ + #include "mpi-ext.h" + + int main(int argc, char *argv[]) + { + printf("Compile time check:\n"); + #if defined(MPIX_CUDA_AWARE_SUPPORT) && MPIX_CUDA_AWARE_SUPPORT + printf("This MPI library has CUDA-aware support.\n", MPIX_CUDA_AWARE_SUPPORT); + #elif defined(MPIX_CUDA_AWARE_SUPPORT) && !MPIX_CUDA_AWARE_SUPPORT + printf("This MPI library does not have CUDA-aware support.\n"); + #else + printf("This MPI library cannot determine if there is CUDA-aware support.\n"); + #endif /* MPIX_CUDA_AWARE_SUPPORT */ + + printf("Run time check:n"); + #if defined(MPIX_CUDA_AWARE_SUPPORT) + if (1 == MPIX_Query_cuda_support()) { + printf("This MPI library has CUDA-aware support.\n"); + } else { + printf("This MPI library does not have CUDA-aware support.\n"); + } + #else /* !defined(MPIX_CUDA_AWARE_SUPPORT) */ + printf("This MPI library cannot determine if there is CUDA-aware support.\n"); + #endif /* MPIX_CUDA_AWARE_SUPPORT */ + + return 0; + } + +///////////////////////////////////////////////////////////////////////// + +How do I limit how much CUDA IPC memory is held in the registration cache? +-------------------------------------------------------------------------- + +As mentioned earlier, the Open MPI library will make use of CUDA IPC support where +possible to move the GPU data quickly between GPUs that are on the same node and +same PCI root complex. The library holds on to registrations even after the data +transfer is complete as it is expensive to make some of the CUDA IPC registration +calls. If you want to limit how much memory is registered, you can use the +``mpool_rgpusm_rcache_size_limit`` MCA parameter. For example, this sets the limit +to 1000000 bytes: + +.. code-block:: + + shell$ mpirun --mca mpool_rgpusm_rcache_size_limit 1000000 ... + +When the cache reaches this size, it will kick out the least recently used until +it can fit the new registration in. + +There also is the ability to have the cache empty itself out when the +limit is reached: + +.. code-block:: + + shell$ mpirun --mca mpool_rgpusm_rcache_empty_cache 1 ... + +///////////////////////////////////////////////////////////////////////// + +What are some guidelines for using CUDA and Open MPI with Omni-Path? +-------------------------------------------------------------------- + +When developing CUDA-aware Open MPI applications for OPA-based +fabrics, the PSM2 transport is preferred and a CUDA-aware version of +PSM2 is provided with all versions of the Intel Omni-Path IFS software +suite. + +.. error:: TODO Are Intel/OPA references still correct? + +The PSM2 library provides a number of settings that will govern how it +will interact with CUDA, including ``PSM2_CUDA`` and ``PSM2_GPUDIRECT``, +which should be set in the environment before ``MPI_Init()`` is called. For +example: + +.. code-block:: + + shell$ mpirun -x PSM2_CUDA=1 -x PSM2_GPUDIRECT=1 --mca mtl psm2 mpi_hello + +In addition, each process of the application should select a specific +GPU card to use before calling ``MPI_Init()``, by using +``cudaChooseDevice()``, ``cudaSetDevice()`` and similar. The chosen +GPU should be within the same NUMA node as the CPU the MPI process is +running on. You will also want to use the ``mpirun`` +``--bind-to-core`` or ``--bind-to-socket`` option to ensure that MPI +processes do not move between NUMA nodes. See the section on +:ref:`NUMA Node Issues `, for +more information. + +For more information see the *Intel Performance Scaled Messaging 2 +(PSM2) Programmer's Guide* and the *Intel Omni-Path Performance Tuning +Guide*, which can be found on the `Intel Omni-Path web site +`_. + +.. error:: TODO Are Intel/OPA references still correct? + +///////////////////////////////////////////////////////////////////////// + +When do I need to select a CUDA device? +--------------------------------------- + +"mpi-cuda-dev-selection" + +OpenMPI requires CUDA resources allocated for internal use. These +are allocated lazily when they are first needed, e.g. CUDA IPC mem handles +are created when a communication routine first requires them during a +transfer. So, the CUDA device needs to be selected before the first MPI +call requiring a CUDA resource. MPI_Init and most communicator related +operations do not create any CUDA resources (guaranteed for MPI_Init, +MPI_Comm_rank, MPI_Comm_size, MPI_Comm_split_type and MPI_Comm_free). It +is thus possible to use those routines to query rank information and use +those to select a GPU, e.g. using + +.. code-block:: c + + int local_rank = -1; + { + MPI_Comm local_comm; + MPI_Comm_split_type(MPI_COMM_WORLD, MPI_COMM_TYPE_SHARED, rank, MPI_INFO_NULL, &local_comm); + MPI_Comm_rank(local_comm, &local_rank); + MPI_Comm_free(&local_comm); + } + int num_devices = 0; + cudaGetDeviceCount(&num_devices); + cudaSetDevice(local_rank % num_devices); + +MPI internal CUDA resources are released during MPI_Finalize. Thus it is an +application error to call cudaDeviceReset before MPI_Finalize is called. + + +///////////////////////////////////////////////////////////////////////// + +How do I enable CUDA support in HCOLL collective component +---------------------------------------------------------- + +HCOLL component supports CUDA GPU buffers for the following +collectives: + +MPI_Allreduce +MPI_Bcast +MPI_Allgather +MPI_Ibarrier +MPI_Ibcast +MPI_Iallgather +MPI_Iallreduce + +To enable CUDA GPU buffer support in these collectives pass the +following environment variables via mpirun: + +.. code-block:: + + shell$ mpirun -x HCOLL_GPU_ENABLE=1 -x HCOLL_ENABLE_NBC=1 .. + +See `nVidia HCOLL documentation `_ +for more information. diff --git a/docs/networking/ib-and-roce.rst b/docs/networking/ib-and-roce.rst new file mode 100644 index 00000000000..7239fe4f0ab --- /dev/null +++ b/docs/networking/ib-and-roce.rst @@ -0,0 +1,185 @@ +InifiniBand / RoCE support +========================== + +How are InfiniBand / RoCE devices supported in Open MPI? +-------------------------------------------------------- + +Open MPI's support for InfiniBand and RoCE devices has changed over +time. + +In the Open MPI |ompi_series| series, InfiniBand and RoCE devices are +supported via the UCX (``ucx``) PML. + +.. note:: Prior versions of Open MPI also included the ``openib`` BTL + for InfiniBand and RoCE devices. Open MPI |ompi_series| no + longer includes the ``openib`` BTL. + +///////////////////////////////////////////////////////////////////////// + +What is UCX? +------------ + +`UCX `_ is an open-source optimized +communication library which supports multiple networks, including +RoCE, InfiniBand, uGNI, TCP, shared memory, and others. UCX +mixes-and-matches transports and protocols which are available on the +system to provide optimal performance. It also has built-in support +for GPU transports (with CUDA and RoCM providers) which lets +RDMA-capable transports access the GPU memory directly. + +///////////////////////////////////////////////////////////////////////// + +How do I use UCX with Open MPI? +------------------------------- + +If Open MPI includes UCX support, then UCX is enabled and selected by +default for InfiniBand and RoCE network devices; typically, no +additional parameters are required. In this case, the network port +with the highest bandwidth on the system will be used for inter-node +communication, and shared memory will be used for intra-node +communication. To select a specific network device to use (for +example, ``mlx5_0`` device port 1): + +.. code-block:: + + shell$ mpirun -x UCX_NET_DEVICES=mlx5_0:1 ... + +It's also possible to force using UCX for MPI point-to-point and +one-sided operations: + +.. code-block:: + + shell$ mpirun --mca pml ucx --mca osc ucx ... + +For OpenSHMEM, in addition to the above, it's possible to force using +UCX for remote memory access and atomic memory operations: + +.. code-block:: + + shell$ mpirun --mca pml ucx --mca osc ucx --mca scoll ucx --mca atomic ucx ... + +///////////////////////////////////////////////////////////////////////// + +What is RDMA over Converged Ethernet (RoCE)? +-------------------------------------------- + +RoCE (which stands for *RDMA over Converged Ethernet*) provides +InfiniBand native RDMA transport on top of lossless Ethernet data +links. + +Since we're talking about Ethernet, there's no Subnet Manager, no +Subnet Administrator, no InfiniBand SL, nor any other InfiniBand +Subnet Administration parameters. + +Connection management in RoCE is based on the OFED RDMACM (RDMA +Connection Manager) service: + +* The OS IP stack is used to resolve remote (IP,hostname) tuples to + a DMAC. +* The outgoing Ethernet interface and VLAN are determined according + to this resolution. +* The appropriate RoCE device is selected accordingly. +* Network parameters (such as MTU, SL, timeout) are set locally by + the RDMACM in accordance with kernel policy. + +///////////////////////////////////////////////////////////////////////// + +How do I know what MCA parameters are available for tuning MPI performance? +--------------------------------------------------------------------------- + +The ``ompi_info`` command can display all the parameters available for +any Open MPI component. For example: + +.. code-block:: + + shell$ ompi_info --param pml ucx --level 9 + +.. important:: Unlike most other Open MPI components, the UCX PML + mainly uses environment variables for run-time tuning + |mdash| not Open MPI MCA parameters. Consult `the UCX + documentation + `_ for details + about what environment variables are available. + +///////////////////////////////////////////////////////////////////////// + +How do I tell Open MPI which IB Service Level to use? +----------------------------------------------------- + +In order to tell the UCX PML which SL to use, the IB SL must be +specified using the ``UCX_IB_SL`` environment variable. For example: + +.. code-block:: + + shell$ mpirun --mca pml ucx -x UCX_IB_SL=N ... + +The value of IB SL ``N`` should be between 0 and 15, where 0 is the +default value. + +///////////////////////////////////////////////////////////////////////// + +How do I run Open MPI over RoCE? +-------------------------------- + +In order to use RoCE with the UCX PML, the relevant Ethernet port must +be specified using the ``UCX_NET_DEVICES`` environment variable. For +example: + +.. code-block:: + + shell$ mpirun --mca pml ucx -x UCX_NET_DEVICES=mlx5_0:1 ... + +UCX selects IPv4 RoCEv2 by default. If different behavior is needed, +you can set a specific GID index: + +.. code-block:: + + shell$ mpirun --mca pml ucx -x UCX_NET_DEVICES=mlx5_0:1 -x UCX_IB_GID_INDEX=1 ... + +///////////////////////////////////////////////////////////////////////// + +.. _faq-ib-troubleshoot-label: + +I'm experiencing a problem with Open MPI on my InfiniBand / RoCE network; how do I troubleshoot and get help? +------------------------------------------------------------------------------------------------------------- + +In order for us to help you, it is *most* helpful if you can run a few +steps before sending an e-mail to both perform some basic +troubleshooting and provide us with enough information about your +environment to help you. Please include answers to the following +questions in your e-mail: + +#. Which UCX and OpenFabrics version are you running? Please specify + where you got the software from (e.g., from the OpenFabrics and/or + UCX community web sites, already included in your Linux + distribution, downloade from NVIDIA's web site, etc.). + +#. What distro and version of Linux are you running? What is your + kernel version? + +#. What is the output of the ``ibv_devinfo`` command on a known "good" + node and a known "bad" node? + + .. note:: There must be at least one port listed as "PORT_ACTIVE" + for Open MPI to work. If there is not at least one + PORT_ACTIVE port, something is wrong with your InfiniBand + / RoCE environment and Open MPI will not be able to run. + +#. What is the output of the ``ifconfig`` command on a known "good" + node and a known "bad" node? + + .. note:: Note that some Linux distributions do not put + ``ifconfig`` in the default path for normal users; look + for it at ``/sbin/ifconfig`` or ``/usr/sbin/ifconfig``. + +#. If running under Bourne shells, what is the output of the ``ulimit + -l`` command? + + If running under C shells, what is the output of the ``limit | grep + memorylocked`` command? + + .. note:: If the value is not ``unlimited``, ................. + + .. error:: TODO Would be good to point to some UCX/vendor docs here + about setting memory limits (rather than reproducing this + information ourselves). diff --git a/docs/networking/index.rst b/docs/networking/index.rst new file mode 100644 index 00000000000..ec55f1577f7 --- /dev/null +++ b/docs/networking/index.rst @@ -0,0 +1,28 @@ +Networking system support +========================= + +Open MPI supports a variety of different networking transports for +off-node communication. Not all of these transports are supported or +available on every platform. Many require specialized hardware, +operating system drivers, and/or network transport libraries. + +When Open MPI is being configured, it will search for a variety of +network transport libraries (and corresponding development header +files). By default, if ``configure`` can find a network transport +library and its development header files, it will include support for +that library. If ``configure`` does not find a library or its header +files, it will simply skip that library (and Open MPI will simply not +build support for that library). + +.. note:: The sections listed below are by no means comprehensive. + +.. toctree:: + :maxdepth: 1 + + ofi + tcp + shared-memory + opa + ib-and-roce + iwarp + cuda diff --git a/docs/networking/iwarp.rst b/docs/networking/iwarp.rst new file mode 100644 index 00000000000..3b1b56384ae --- /dev/null +++ b/docs/networking/iwarp.rst @@ -0,0 +1,26 @@ +iWARP Support +============= + +How are iWARP devices supported in Open MPI? +-------------------------------------------- + +Open MPI's support for iWARP devices has changed over time. + +In the Open MPI |ompi_series| series, iWARP devices are +supported via the OFI (``ofi``) MTL via the CM (``cm``) PML. + +.. note:: Prior versions of Open MPI supported iWARP devies via the + ``openib`` BTL. Open MPI |ompi_series| no longer includes + the ``openib`` BTL. + +Specifically, iWARP support is provides through the ``rxm`` provider +of the OpenFabrics Interfaces library ``libfabric``. There is +software emulation involved in the MPI support of iWARP devices (and +therefore at least some level of performance degredation), but the +current iWARP vendors have chosen not to provide a higher-performance +option. + +.. important:: iWARP support is not well tested or maintained in Open + MPI. The Open MPI community would love to have a + maintainer who can develop and provide support for + iWARP devices over time. diff --git a/docs/networking/ofi.rst b/docs/networking/ofi.rst new file mode 100644 index 00000000000..f6d8d8d1c36 --- /dev/null +++ b/docs/networking/ofi.rst @@ -0,0 +1,126 @@ +OpenFabrics Interfaces (OFI) / Libfabric support +================================================ + +What is OFI / Libfabric? +------------------------ + +"OFI" stands for the `OpenFabrics Interfaces +`_, which are implemented in the ``libfabric`` +library. These two terms are typically used interchangeably. + +Open MPI supports many different underlying networks via Libfabric, +including (but not limited to): + +* AWS EFA +* Cisco usNIC +* Cray uGNI +* Cornelius Networks Omni-Path + +In general, the OFI-based components in Open MPI will auto-select +themselves as appropriate at run time. + +That being said, additional questions are available in this FAQ +section to provide more information about specific OFI-based network +types and support. + +///////////////////////////////////////////////////////////////////////// + +What are the Libfabric (OFI) components in Open MPI? +---------------------------------------------------- + +Open MPI has three main components for Libfabric (a.k.a., OFI) +communications: + +#. ``ofi`` MTL: Available since Open MPI v1.10, this component is used + with the ``cm`` PML and is used for two-sided MPI communication + (e.g., ``MPI_SEND`` and ``MPI_RECV``). + + The ``ofi`` MTL requires that the Libfabric provider support + reliable datagrams with ordered tagged messaging (specifically: + ``FI_EP_RDM`` endpoints, ``FI_TAGGED`` capabilities, and + ``FI_ORDER_SAS`` ordering). + +#. ``ofi`` BTL: Available since Open MPI v4.0.0, this component is + used for one-sided MPI communications (e.g., ``MPI_PUT``). The + ``ofi`` BTL requires that the Libfabric provider support reliable + datagrams, RMA and atomic operations, and remote atomic completion + notifications (specifically: ``FI_EP_RDM`` endpoints, ``FI_RMA`` + and ``FI_ATOMIC`` capabilities, and ``FI_DELIVERY_COMPLETE`` op + flags). + +#. ``usnic`` BTL: This BTL is used exclusively with Cisco usNIC-based + networks. It will auto-select itself over the other OFI-based + components when run with Cisco usNIC-based networks. + +See each Lifabric provider man page (e.g., fi_sockets(7)) to understand which +provider will work for each of the above-listed Open MPI components. Some +providers may require to be used with one of the Libfabric utility providers; +for example, the verbs provider needs to be paired with utility provider +``ofi_rxm`` to provide reliable datagram endpoint support (``verbs;ofi_rxm``). + +Both components have MCA parameters to specify the Libfabric provider(s) that +will be included/excluded in the selection process. For example: + +.. code-block:: + + shell$ mpirun --mca pml cm --mca mtl ofi --mca mtl_ofi_provider_include psm2 mpi_hello + +In addition, each component has specific parameters for each one; see +``ompi_info --param -level 9`` for a full +list. For example: + +.. code-block:: + + shell$ ompi_info --param mtl ofi --level 9 + +For more information refer to the `Libfabric web site +`_. + +///////////////////////////////////////////////////////////////////////// + +Ompi-Path: How can the multi-rail settings be adjusted if multiple HFI (Host Fabric Interface) cards are installed on the system? +--------------------------------------------------------------------------------------------------------------------------------- + +Multi-Rail feature allows a process to use multiple HFIs to transfer a message +to improve message bandwidth. The PSM2 library handles the support for multi-rail +which is off by default. The multi-rail settings can be modified using the +following environment variables: + +* ``PSM2_MULTIRAIL=[0,1,2] ]``: 0=Disabled, 1=Enable across all HFIs in the + system, 2=Enable multi-rail within a NUMA node. +* ``PSM2_MULTIRAIL_MAP=unit:port,unit:port...`` + +The variables above may be included in the ``mpirun`` command line or in +the environment. For example: + +.. code-block:: + + shell$ mpirun -mca mtl [psm2|ofi] -x PSM2_MULTIRAIL=1 -np 2 -H host1,host2 ./a.out + +.. note:: When using the OFI MTL, please ensure that the PSM2 OFI + provider is used for communication with OPA devices. + +///////////////////////////////////////////////////////////////////////// + +Omni-Path: What is Multi-HFI support in PSM2 and how does it differ from multi-rail? +------------------------------------------------------------------------------------ + +Multi-HFI support is intended to describe the use of multiple HFIs in +a system among MPI ranks local to a node in order to load-balance the +hardware resources. It differs from the Multi-Rail feature, which is +intended to allow a single process to use all HFIs in the system. For +an MPI job with multiple processes on a single node, the default PSM2 +behavior depends on the affinity settings of the MPI process. The PSM2 +library defaults to using the HFI (Host Fabric Interface) that is in +the same NUMA node as that of the MPI process. + +Users can restrict access to a single HFI using the environment variable: + +* ``HFI_UNIT=N``: valid values of N are 0,1,2 and 3 + +More details can be found on the PSM2 Programmer's Guide and the Omni-Path +Fabric Performance Tuning Guide. + +Please also see `the full Omni-Path documentation +`_ +for more details. diff --git a/docs/networking/opa.rst b/docs/networking/opa.rst new file mode 100644 index 00000000000..ae5fb62b55d --- /dev/null +++ b/docs/networking/opa.rst @@ -0,0 +1,48 @@ +Omni-Path Architecture (OPA) +============================ + +How can the multi-rail settings be adjusted if multiple HFI (Host Fabric Interface) cards are installed on the system? +---------------------------------------------------------------------------------------------------------------------- + +Multi-Rail feature allows a process to use multiple HFIs to transfer a +message to improve message bandwidth. The PSM2 library handles the +support for multi-rail, and is off by default. The multi-rail settings +can be modified using the following environment variables: + +* ``PSM2_MULTIRAIL=[0,1,2]``: 0=Disabled, 1=Enable across all HFIs in + the system, 2=Enable multi-rail within a NUMA node +* ``PSM2_MULTIRAIL_MAP=unit:port,unit:port`` + +The variables above may be included in the ``mpirun`` command line or +in the environment. For example: + +.. code-block:: + + shell$ mpirun -mca mtl [psm2|ofi] -x PSM2_MULTIRAIL=1 -np 2 -H host1,host2 ./a.out + +.. note:: When using the OFI MTL, please ensure that the PSM2 OFI provider is used for + communication with OPA devices. + +///////////////////////////////////////////////////////////////////////// + +What is Multi-HFI support in PSM2 and how does it differ from multi-rail? +------------------------------------------------------------------------- + +Multi-HFI support is intended to describe the use of multiple HFIs in +a system among MPI ranks local to a node in order to load-balance the +hardware resources. It differs from Multi-Rail feature which is +intended to allow a single process to use all HFIs in the system. For +an MPI job with multiple ranks in a node, the default PSM2 behavior +depends on the affinity settings of the MPI process. PSM2 defaults to +using the HFI (Host Fabric Interface) that is in the same NUMA node as +that of the MPI process. Users can restrict access to a single HFI +using the environment variable: + +* ``HFI_UNIT=N``: valid values of ``N`` are 0, 1, 2, and 3 + +More details can be found on the PSM2 Programmer's Guide and Omni-Path +Fabric Performance Tuning Guide. The full documentation `is available +here +`_. + +.. error:: TODO Is this still a correct reference? diff --git a/docs/networking/shared-memory.rst b/docs/networking/shared-memory.rst new file mode 100644 index 00000000000..3a7f1129172 --- /dev/null +++ b/docs/networking/shared-memory.rst @@ -0,0 +1,88 @@ +Shared Memory +============= + +What is the sm BTL? +------------------- + +The ``sm`` BTL is a low-latency, high-bandwidth mechanism for +transferring data between two processes via shared memory. This BTL +can only be used between processes executing on the same node. + +.. note:: Between Open MPI version 1.8.0 and 4.1.x, the shared memory + BTL was named ``vader``. As of Open MPI version 5.0.0, the + BTL has been renamed ``sm``. + +.. warning:: In Open MPI version 5.0.x, the name ``vader`` is simply + an alias for the ``sm`` BTL. Similarly, all + ``vader_``-prefixed MCA parameters are automatically + aliased to their corresponding ``sm_``-prefixed MCA + parameter. + + This alias mechanism is a legacy transition device, and + will likely disappear in a future release of Open MPI. + +///////////////////////////////////////////////////////////////////////// + +How do I specify use of sm for MPI messages? +-------------------------------------------- + +Typically, it is unnecessary to do so; OMPI will use the best BTL available +for each communication. + +Nevertheless, you may use the MCA parameter ``btl``. You should also +specify the ``self`` BTL for communications between a process and +itself. Furthermore, if not all processes in your job will run on the +same, single node, then you also need to specify a BTL for internode +communications. For example: + +.. code-block:: sh + + shell$ mpirun --mca btl self,sm,tcp -np 16 ./a.out + +///////////////////////////////////////////////////////////////////////// + +How can I tune these parameters to improve performance? +------------------------------------------------------- + +Mostly, the default values of the MCA parameters have already +been chosen to give good performance. To improve performance further +is a little bit of an art. Sometimes, it's a matter of trading off +performance for memory. + +* ``btl_sm_eager_limit``: If message data plus header information fits + within this limit, the message is sent "eagerly" |mdash| that is, a + sender attempts to write its entire message to shared buffers + without waiting for a receiver to be ready. Above this size, a + sender will only write the first part of a message, then wait for + the receiver to acknowledge its readiness before continuing. Eager + sends *can* improve performance by decoupling senders from + receivers. + +* ``btl_sm_max_send_size``: Large messages are sent in fragments of + this size. Larger segments *can* lead to greater efficiencies, + though they could perhaps also inhibit pipelining between sender and + receiver. + +* ``btl_sm_free_list_num``: This is the initial number of fragments on + each (eager and max) free list. The free lists can grow in response + to resource congestion, but you can increase this parameter to + pre-reserve space for more fragments. + +///////////////////////////////////////////////////////////////////////// + +Where is the shared memory mapped on the filesystem? + +.. error:: TODO Is this correct? + +The file will be in the OMPI session directory, which is typically +something like ``/tmp/openmpi-sessions-USERNAME@HOSTNAME/*``. +The file itself will have the name +``shared_mem_pool.HOSTNAME``. For example, the full path could be +``/tmp/openmpi-sessions-johndoe@node0_0/1543/1/shared_mem_pool.node0``. + +.. error:: TODO The filename above will certainly be wrong. + +To place the session directory in a non-default location, use the MCA parameter +``orte_tmpdir_base``. + +.. error:: TODO The MCA param name above is definitely wrong. diff --git a/docs/networking/tcp.rst b/docs/networking/tcp.rst new file mode 100644 index 00000000000..52677db5c72 --- /dev/null +++ b/docs/networking/tcp.rst @@ -0,0 +1,518 @@ +TCP +=== + +How do I specify to use the IP network for MPI messages? +-------------------------------------------------------- + +Open MPI will generally automatically use the ``tcp`` BTL when: + +#. The ``tcp`` BTL is available at run time (which it should be on + most POSIX-like systems), and +#. A higher-performance network is not available + +When the ``tcp`` BTL is used, it is typically also (automatically) +used with the ``self`` and ``sm`` BTLs for process-loopback and +node-loopback communication, respectively. + +If you want to guarantee that the ``tcp``, ``sm``, and ``self`` BTLs +are used, you can explicitly specify them on the ``mpirun`` command +line: + +.. code-block:: sh + + shell$ mpirun --mca pml ob1 --mca btl tcp,sm,self ... + +.. warning:: Failure to specify the ``sm`` BTL will likely result in + lower performance when Open MPI uses the TCP network + stack to send to peers on the same host. + +.. warning:: Failure to specify the ``self`` BTL may result in Open + MPI being unable to complete send-to-self scenarios + (meaning that your program will run fine until a process + tries to send to itself). + +///////////////////////////////////////////////////////////////////////// + +But wait |mdash| I'm using a high-speed network. Do I have to disable the TCP BTL? +----------------------------------------------------------------------------------- + +No. Following the so-called "Law of Least Astonishment", Open MPI +assumes that if you have both an IP network and at least one +high-speed network (such InfiniBand), you will likely only want to use +the high-speed network(s) for MPI message passing. Hence, the ``tcp`` +BTL component will sense this and automatically deactivate itself. + +That being said, Open MPI may still use TCP for setup and teardown +information |mdash| so you'll see traffic across your IP network during +startup and shutdown of your MPI job. This is normal and does not +affect the MPI message passing channels. + +///////////////////////////////////////////////////////////////////////// + +How do I know what MCA parameters are available for tuning MPI performance? +--------------------------------------------------------------------------- + +The ``ompi_info`` command can display all the parameters +available for the ``tcp`` BTL component (i.e., the component that uses +TCP for MPI communications): + +.. code-block:: sh + + shell$ ompi_info --param btl tcp --level 9 + +///////////////////////////////////////////////////////////////////////// + +Does Open MPI use the IP loopback interface? +-------------------------------------------- + +Usually not. + +In general message passing usage, there are two scenarios where using +the operating system IP loopback interface could be used: + +#. Sending a message from one process to itself +#. Sending a message from one process to another process on the same + machine + +The TCP BTL does not handle "send-to-self" scenarios in Open MPI; +indeed, it is not even capable of doing so. Instead, the ``self`` BTL +component is used for all send-to-self MPI communications. Not only +does this allow all Open MPI BTL components to avoid special case code +for send-to-self scenarios, it also allows avoiding using inefficient +loopback network stacks (such as the IP loopback device). + +Specifically: the ``self`` component uses its own mechanisms for +send-to-self scenarios; it does not use operating system network +interfaces such as the IP loopback interface. + +When sending to other processes on the same machine, Open MPI will +default to using a shared memory BTL (``sm``). If the user has +deactivated these BTLs, depending on what other BTL components are +available, it is possible that the TCP BTL will be chosen for message +passing to processes on the same node, in which case the IP loopback +device will likely be used. But this is not the default; either +shared memory has to fail to startup properly or the user must +specifically request not to use the shared memory BTL. + +///////////////////////////////////////////////////////////////////////// + +I have multiple IP networks on some/all of my cluster nodes. Which ones will Open MPI use? +------------------------------------------------------------------------------------------- + +In general, Open MPI will greedily use all IP networks that +it finds per its :ref:`reachability computations `. + +To change this behavior, you can either specifically include certain +networks or specifically exclude certain networks. :ref:`See this FAQ +entry ` for more details. + +///////////////////////////////////////////////////////////////////////// + +I'm getting TCP-related errors. What do they mean? + +TCP-related errors are usually reported by Open MPI in a message +similar to these: + +.. code-block:: + + btl_tcp_endpoint.c:572:mca_btl_tcp_endpoint_complete_connect: connect() failed with errno=113 + mca_btl_tcp_frag_send: writev failed with errno=104 + +If an `errno` number is displayed with no explanation string, you can +see what that specific error number means on your operating system. +On Linux, you can use the ``perror`` command: + +.. code-block:: sh + + # See what errno 113 is + shell$ perror 113 + OS error code 113: No route to host + + # See what errno 104 is + shell$ perror 104 + OS error code 104: Connection reset by peer + +Two types of errors are commonly reported to the Open MPI user's +mailing list: + +#. **No route to host:** These types of errors *usually* mean that + there are multiple IP interfaces available and they do not obey + Open MPI's assumptions about routability. See :ref:`the TCP + routability assumptions FAQ entry ` and + :ref:`the TCP selection FAQ entry ` for more + information. + +#. **Connection reset by peer:** These types of errors *usually* occur + after ``MPI_INIT`` has completed, and typically indicate that an + MPI process has died unexpectedly (e.g., due to a catastrphic error + such as a segmentation fault). The specific error message + indicates that a peer MPI process tried to write to the now-dead + MPI process and failed. + +///////////////////////////////////////////////////////////////////////// + +.. _faq-tcp-selection: + +How do I tell Open MPI which IP interfaces / networks to use? +------------------------------------------------------------- + +In some HPC environments, it is not uncommon to have multiple IP +interfaces on each node |mdash| for example, one IP network may be +"slow" and used for control information such as a batch scheduler, a +networked filesystem, and/or interactive logins. Another IP network +(or networks) may be "fast" and be intended for parallel applications +to use during their runs. As another example, some operating systems +may also have virtual interfaces for communicating with virtual +machines. + +Unless otherwise specified, Open MPI will greedily use all "up" IP +networks that it can find and try to connect to all peers *upon +demand* (i.e., Open MPI does not open sockets to all of its MPI peers +during ``MPI_INIT`` |mdash| see :ref:`this FAQ entry +` for more details). Hence, if you want MPI jobs to +not use specific IP networks |mdash| or not use any IP networks at all +|mdash| then you need to tell Open MPI. + +.. warning:: Aggressively using all "up" interfaces can cause problems + in some cases. For example, if you have a machine with a + local-only interface (e.g., the loopback device, or a + virtual-machine bridge device that can only be used *on + that machine*, and cannot be used to communicate with MPI + processes on other machines), you will likely need to + tell Open MPI to ignore these networks. + + Open MPI usually ignores loopback devices by default, but + *other local-only devices must be manually ignored.* + Users have reported cases where RHEL6 automatically + installed a ``virbr0`` device for Xen virtualization. + This interface was automatically given an IP address in + the 192.168.1.0/24 subnet and marked as "up". Since Open + MPI saw this 192.168.1.0/24 "up" interface in all MPI + processes on all nodes, it assumed that that network was + usable for MPI communications. This is obviously + incorrect, and it led to MPI applications hanging when + they tried to send or receive MPI messages. + +#. To disable Open MPI from using TCP for MPI communications, the + ``tcp`` MCA parameter should be set accordingly. You can either + *exclude* the TCP component or *include* all other components. + Specifically: + + .. code-block:: sh + + # This says to exclude the TCP BTL component + # (implicitly including all others) + shell$ mpirun --mca btl ^tcp... + + # This says to include only the listed BTL components + # (tcp is not listed, and therefore will not be used) + shell$ mpirun --mca btl self,vader,openib ... + +#. If you want to use TCP for MPI communications, but want to restrict + it from certain networks, use the ``btl_tcp_if_include`` or + ``btl_tcp_if_exclude`` MCA parameters (only one of the two should + be set). The values of these parameters can be a comma-delimited + list of network interfaces. For example: + + .. code-block:: sh + + # This says to not use the eth0 and lo interfaces. + # (and implicitly use all the rest). Per the description + # above, IP loopback and all local-only devices *must* + # be included if the exclude list is specified. + shell$ mpirun --mca btl_tcp_if_exclude lo,eth0 ... + + # This says to only use the eth1 and eth2 interfaces + # (and implicitly ignore the rest) + shell$ mpirun --mca btl_tcp_if_include eth1,eth2 ... + +#. You can also specify subnets in the include or exclude lists in + CIDR notation. For example: + + .. code-block:: sh + + # Only use the 192.168.1.0/24 and 10.10.0.0/16 subnets for MPI + # communications: + shell$ mpirun --mca btl_tcp_if_include 192.168.1.0/24,10.10.0.0/16 ... + + + .. note:: You must specify the CIDR notation for a given network + precisely. For example, if you have two IP networks + 10.10.0.0/24 and 10.10.1.0/24, Open MPI will not + recognize either of them if you specify "10.10.0.0/16". + +.. warning:: If you use the ``btl_tcp_if_include`` and + ``btl_tcp_if_exclude`` MCA parameters to shape the + behavior of the TCP BTL for MPI communications, you may + also need/want to investigate the corresponding MCA + parameters ``oob_tcp_if_include`` and + ``oob_tcp_if_exclude``, which are used to shape non-MPI + TCP-based communication (e.g., communications setup and + coordination during ``MPI_INIT`` and ``MPI_FINALIZE``). + +.. error:: TODO do corresponding OOB TCP params still exist in PMIx? + +Note that Open MPI will still use TCP for control messages, such as +data between ``mpirun`` and the MPI processes, rendezvous information +during ``MPI_INIT``, etc. To disable TCP altogether, you also need to +disable the ``tcp`` component from the OOB framework. + +.. error:: TODO Is this possible in PMIx? I doubt it...? + +///////////////////////////////////////////////////////////////////////// + +.. _faq-tcp-sockets: + +Does Open MPI open a bunch of sockets during ``MPI_INIT``? +---------------------------------------------------------- + +Although Open MPI is likely to open multiple TCP sockets during +``MPI_INIT``, the ``tcp`` BTL component *does not open one socket per +MPI peer process during MPI_INIT.* Open MPI opens sockets as they +are required |mdash| so the first time a process sends a message to a +peer and there is no TCP connection between the two, Open MPI will +automatically open a new socket. + +Hence, you should not have scalability issues with running large +numbers of processes (e.g., running out of per-process file +descriptors) if your parallel application is sparse in its +communication with peers. + +///////////////////////////////////////////////////////////////////////// + +Are there any Linux kernel TCP parameters that I should set? +------------------------------------------------------------ + +Everyone has different opinions on this, and it also depends +on your exact hardware and environment. Below are general guidelines +that some users have found helpful. + +#. ``net.ipv4.tcp_syn_retries``: Some Linux systems have very large + initial connection timeouts |mdash| they retry sending SYN packets + many times before determining that a connection cannot be made. If + MPI is going to fail to make socket connections, it would be better + for them to fail somewhat quickly (minutes vs. hours). You might + want to reduce this value to a smaller value; YMMV. + +#. ``net.ipv4.tcp_keepalive_time``: Some MPI applications send an + initial burst of MPI messages (over TCP) and then send nothing for + long periods of time (e.g., embarrassingly parallel applications). + Linux may decide that these dormant TCP sockets are dead because it + has seen no traffic on them for long periods of time. You might + therefore need to lengthen the TCP inactivity timeout. Many Linux + systems default to 7,200 seconds; increase it if necessary. + +#. Increase TCP buffering for 10G or 40G Ethernet. Many Linux + distributions come with good buffering presets for 1G Ethernet. In + a datacenter/HPC cluster with 10G or 40G Ethernet NICs, this amount + of kernel buffering is typically insufficient. Here's a set of + parameters that some have used for good 10G/40G TCP bandwidth: + + * ``net.core.rmem_max``: 16777216 + * ``net.core.wmem_max``: 16777216 + * ``net.ipv4.tcp_rmem``: 4096 87380 16777216 + * ``net.ipv4.tcp_wmem``: 4096 65536 16777216 + * ``net.core.netdev_max_backlog``: 30000 + * ``net.core.rmem_default``: 16777216 + * ``net.core.wmem_default``: 16777216 + * ``net.ipv4.tcp_mem``: '16777216 16777216 16777216' + * ``net.ipv4.route.flush``: 1 + + Each of the above items is a Linux kernel parameter that can be set + in multiple different ways. + + #. You can change the running kernel via the ``/proc`` filesystem: + + .. code-block:: sh + + shell# cat /proc/sys/net/ipv4/tcp_syn_retries + 5 + shell# echo 6 > /proc/sys/net/ipv4/tcp_syn_retries + + #. You can also use the ``sysctl`` command: + + .. code-block:: sh + + shell# sysctl net.ipv4.tcp_syn_retries + net.ipv4.tcp_syn_retries = 5 + shell# sysctl -w net.ipv4.tcp_syn_retries=6 + net.ipv4.tcp_syn_retries = 6 + + #. Or you can set them by adding entries in ``/etc/sysctl.conf``, + which are persistent across reboots: + + .. code-block:: sh + + shell$ grep tcp_syn_retries /etc/sysctl.conf + net.ipv4.tcp_syn_retries = 6 + + #. Your Linux distro may also support putting individual files in + ``/etc/sysctl.d`` (even if that directory does not yet exist), + which is actually better practice than putting them in + ``/etc/sysctl.conf``. For example: + + .. code-block:: sh + + shell$ cat /etc/sysctl.d/my-tcp-settings + net.ipv4.tcp_syn_retries = 6 + +///////////////////////////////////////////////////////////////////////// + +.. _faq-tcp-routability: + +How does Open MPI know which IP addresses are routable to each other? +--------------------------------------------------------------------- + +Open MPI assumes that all interfaces are routable as long as they have +the same address family, IPv4 or IPv6. We use graph theory and give +each possible connection a weight depending on the quality of the +connection. This allows the library to select the best connections +between nodes. This method also supports striping but prevents more +than one connection to any interface. + +The quality of the connection is defined as follows, with a higher +number meaning better connection. Note that when giving a weight to a +connection consisting of a private address and a public address, it +will give it the weight of ``PRIVATE_DIFFERENT_NETWORK``. + +.. code-block:: + + NO_CONNECTION = 0 + PRIVATE_DIFFERENT_NETWORK = 1 + PRIVATE_SAME_NETWORK = 2 + PUBLIC_DIFFERENT_NETWORK = 3 + PUBLIC_SAME_NETWORK = 4 + +An example will best illustrate how two processes on two different +nodes would connect up. Here we have two nodes with a variety of +interfaces: + +.. code-block:: + + Node A Node B + ---------------- ---------------- + | lo0 | | lo0 | + | 127.0.0.1/8 | | 127.0.0.1/8 | + | | | | + | eth0 | | eth0 | + | 10.8.47.1/24 | | 10.8.47.2/24 | + | | | | + | eth1 | | eth1 | + | 192.168.1.1/24 | | 192.168.1.2/24 | + | | | | + | eth2 | | | + | 192.168.2.2/24 | | | + ---------------- ------------------ + +From these two nodes, the software builds up a bipartite graph that +shows all the possible connections with all the possible weights. The +*lo0* interfaces are excluded as the ``btl_tcp_if_exclude`` MCA parameter +is set to *lo* by default. Here is what all the possible connections +with their weights look like. + +.. code-block:: + + Node A Node B + eth0 --------- 2 -------- eth0 + ------- 1 -------- eth1 + + eth1 --------- 1 -------- eth0 + ------- 2 -------- eth1 + + eth2 --------- 1 -------- eth0 + ------- 1 -------- eth1 + +The library then examines all the connections and picks the optimal +ones. This leaves us with two connections being established between +the two nodes. + +If you are curious about the actual ``connect()`` calls being made by +the processes, then you can run with ``--mca btl_base_verbose 30``. +This can be useful if you notice your job hanging and believe it may +be the library trying to make connections to unreachable hosts. + +.. code-block:: sh + + # Here is an example with some of the output deleted for clarity. + # One can see the connections that are attempted. + shell$ mpirun --mca btl self,sm,tcp --mca btl_base_verbose 30 -np 2 -host NodeA,NodeB a.out + [...snip...] + [NodeA:18003] btl: tcp: attempting to connect() to address 10.8.47.2 on port 59822 + [NodeA:18003] btl: tcp: attempting to connect() to address 192.168.1.2 on port 59822 + [NodeB:16842] btl: tcp: attempting to connect() to address 192.168.1.1 on port 44500 + [...snip...] + +In case you want more details about the theory behind the connection +code, you can find the background story in `this IEEE paper +`_. + +///////////////////////////////////////////////////////////////////////// + +Does Open MPI ever close TCP sockets? +------------------------------------- + +In general, no. + +Although TCP sockets are opened "lazily" (meaning that MPI +connections / TCP sockets are only opened upon demand |mdash| as opposed to +opening all possible sockets between MPI peer processes during +``MPI_INIT``), they are never closed. + +///////////////////////////////////////////////////////////////////////// + +Does Open MPI support IP interfaces that have more than one IP address? +----------------------------------------------------------------------- + +In general, no. + +For example, if the output from your ``ifconfig`` has a single IP device +with multiple IP addresses like this: + +.. code-block:: + + 0: eth0: mtu 1500 qdisc mq state UP qlen 1000 + link/ether 00:18:ae:f4:d2:29 brd ff:ff:ff:ff:ff:ff + inet 192.168.0.3/24 brd 192.168.0.255 scope global eth0:1 + inet 10.10.0.3/24 brf 10.10.0.255 scope global eth0 + inet6 fe80::218:aef2:29b4:2c4/64 scope link + valid_lft forever preferred_lft forever + +(note the two ``inet`` lines in there) + +Then Open MPI will be unable to use this device. + +///////////////////////////////////////////////////////////////////////// + +Does Open MPI support virtual IP interfaces? +-------------------------------------------- + +No. + +For example, if the output of your ``ifconfig`` has both ``eth0`` and +``eth0:0``, Open MPI will get confused if you use the TCP BTL, and +may hang or otherwise act unpredictably. + +Note that using ``btl_tcp_if_include`` or ``btl_tcp_if_exclude`` to avoid +using the virtual interface will *not* solve the issue. + +///////////////////////////////////////////////////////////////////////// + +Can I use multiple TCP connections to improve network performance? +------------------------------------------------------------------ + +Open MPI can use multiple TCP connections between any pair of MPI +processes, striping large messages across the connections. The +``btl_tcp_links`` parameter can be used to set how many TCP +connections should be established between MPI processes. + +Note that +this may not improve application performance for common use cases of +nearest-neighbor exchanges when there many MPI processes on each host. In +these cases, there are already many TCP connections between any two +hosts (because of the many processes all communicating), so the extra TCP +connections are likely just consuming extra resources and adding work +to the MPI implementation. + +However, for highly multi-threaded applications, where there are only +one or two MPI processes per host, the ``btl_tcp_links`` option may +improve TCP throughput considerably. diff --git a/docs/news/index.rst b/docs/news/index.rst new file mode 100644 index 00000000000..2361399f03c --- /dev/null +++ b/docs/news/index.rst @@ -0,0 +1,47 @@ +.. Open MPI NEWS + + This page serves as a high level NEWS page which includes NEWS + from various Open MPI series + +News +==== + +This file contains the main features as well as overviews of specific +bug fixes (and other actions) for each version of Open MPI since +version 1.0. + +.. error:: GP - move elsewhere and refer to software versioning here. + + As more fully described in the "Software Version Number" section in + the README file, Open MPI typically releases two separate version + series simultaneously. Since these series have different goals and + are semi-independent of each other, a single NEWS-worthy item may be + introduced into different series at different times. For example, + feature F was introduced in the vA.B series at version vA.B.C, and was + later introduced into the vX.Y series at vX.Y.Z. + + The first time feature F is released, the item will be listed in the + vA.B.C section, denoted as: + + (** also to appear: X.Y.Z) -- indicating that this item is also + likely to be included in future release + version vX.Y.Z. + + When vX.Y.Z is later released, the same NEWS-worthy item will also be + included in the vX.Y.Z section and be denoted as: + + (** also appeared: A.B.C) -- indicating that this item was previously + included in release version vA.B.C. + +:ref:`search` + +.. toctree:: + :maxdepth: 1 + + news-master + news-v4.1.x + news-v4.0.x + news-v3.1.x + news-v3.0.x + news-v2.x + news-v1.x diff --git a/docs/news/news-master.rst b/docs/news/news-master.rst new file mode 100644 index 00000000000..c00923fa767 --- /dev/null +++ b/docs/news/news-master.rst @@ -0,0 +1,209 @@ +Master updates (not on release branches yet) +============================================ + +This file generally contains all the updates for Open MPI that have +not yet appeared on a release branch. It reflects active development, +and is therefore a "loose" listing of features and changes. It is not +considered definitive. + +Open MPI version 5.0.0rc2 +------------------------- +:Date: 10 Oct 2021 + +.. admonition:: MPIR API has been removed + :class: warning + + As was announced in summer 2017, Open MPI has removed support of + MPIR-based tools beginning with the release of Open MPI v5.0.0. + + The new PRRTE based runtime environment supports PMIx-tools API + instead of the legacy MPIR API for debugging parallel jobs. + + see https://github.com/openpmix/mpir-to-pmix-guide for more + information + + +.. admonition:: zlib is suggested for better user experience + :class: note + + PMIx will optionally use zlib to compress large data streams. + This may result in shorter-than-normal startup times and + smaller memory footprints. It is recommended to install zlib + and zlib-devel for a better user experience. + +- ORTE, the underlying OMPI launcher has been removed, and replaced + with PRTE. +- Reworked how Open MPI integrates with 3rd party packages. + The decision was made to stop building 3rd-party packages + such as Libevent, HWLOC, PMIx, and PRRTE as MCA components + and instead 1) start relying on external libraries whenever + possible and 2) Open MPI builds the 3rd party libraries (if needed) + as independent libraries, rather than linked into libopen-pal. +- Update to use PMIx v4.1.1rc2 +- Update to use PRRTE v2.0.1rc2 +- Change the default component build behavior to prefer building + components as part of libmpi.so instead of individual DSOs. +- Remove pml/yalla, mxm, mtl/psm, and ikrit components. +- Remove all vestiges of the C/R support. +- Various ROMIO v3.4.1 updates. +- Use Pandoc to generate manpages +- 32 bit atomics are now only supported via C11 compliant compilers. +- Explicitly disable support for GNU gcc < v4.8.1 (note: the default + gcc compiler that is included in RHEL 7 is v4.8.5). +- Do not build Open SHMEM layer when there are no SPMLs available. + Currently, this means the Open SHMEM layer will only build if + the UCX library is found. +- Fix rank-by algorithms to properly rank by object and span. +- Updated the ``-mca pml`` option to only accept one pml, not a list. +- vprotocol/pessimist: Updated to support ``MPI_THREAD_MULLTIPLE``. +- btl/tcp: Updated to use reachability and graph solving for global + interface matching. This has been shown to improve ``MPI_Init()`` + performance under btl/tcp. +- fs/ime: Fixed compilation errors due to missing header inclusion + Thanks to Sylvain Didelot for finding + and fixing this issue. +- Fixed bug where MPI_Init_thread can give wrong error messages by + delaying error reporting until all infrastructure is running. +- Atomics support removed: S390/s390x, Sparc v9, ARMv4 and ARMv5 CMA + support. +- ``autogen.pl`` now supports a ``-j`` option to run multi-threaded. + Users can also use environment variable ``AUTOMAKE_JOBS``. +- PMI support has been removed for Open MPI apps. +- Legacy btl/sm has been removed, and replaced with btl/vader, which + was renamed to btl/sm. +- Update btl/sm to not use CMA in user namespaces. +- C++ bindings have been removed. +- The ``--am`` and ``--amca`` options have been deprecated. +- opal/mca/threads framework added. Currently supports + argobots, qthreads, and pthreads. See the --with-threads=x option + in configure. +- Various ``README.md`` fixes - thanks to: + Yixin Zhang , + Samuel Cho , + Robert Langfield , + Alex Ross , + Sophia Fang , + mitchelltopaloglu , + Evstrife , and + Hao Tong for their + contributions. +- osc/pt2pt: Removed. Users can use osc/rdma + btl/tcp + for OSC support using TCP, or other providers. +- Open MPI now links -levent_core instead of -levent. +- MPI-4: Added ``ERRORS_ABORT`` infrastructure. +- common/cuda docs: Various fixes. Thanks to + Simon Byrne for finding and fixing. +- osc/ucx: Add support for acc_single_intrinsic. +- Fixed ``buildrpm.sh -r`` option used for RPM options specification. + Thanks to John K. McIver III for + reporting and fixing. +- configure: Added support for setting the wrapper C compiler. + Adds new option ``--with-wrapper-cc=``. +- mpi_f08: Fixed Fortran-8-byte-INTEGER vs. C-4-byte-int issue. + Thanks to @ahaichen for reporting the bug. +- MPI-4: Added support for 'initial error handler'. +- opal/thread/tsd: Added thread-specific-data (tsd) api. +- MPI-4: Added error handling for 'unbound' errors to ``MPI_COMM_SELF``. +- Add missing ``MPI_Status`` conversion subroutines: + ``MPI_Status_c2f08()``, ``MPI_Status_f082c()``, ``MPI_Status_f082f()``, + ``MPI_Status_f2f08()`` and the ``PMPI_*`` related subroutines. +- patcher: Removed the Linux component. +- opal/util: Fixed typo in error string. Thanks to + NARIBAYASHI Akira for finding + and fixing the bug. +- fortran/use-mpi-f08: Generate PMPI bindings from the MPI bindings. +- Converted man pages to markdown. + Thanks to Fangcong Yin for their contribution + to this effort. +- Fixed ompi_proc_world error string and some comments in pml/ob1. + Thanks to Julien EMMANUEL for + finding and fixing these issues. +- oshmem/tools/oshmem_info: Fixed Fortran keyword issue when + compiling param.c. Thanks to Pak Lui for + finding and fixing the bug. +- autogen.pl: Patched libtool.m4 for OSX Big Sur. Thanks to + @fxcoudert for reporting the issue. +- Updgraded to HWLOC v2.4.0. +- Removed config/opal_check_pmi.m4. + Thanks to Zach Osman for the contribution. +- opal/atomics: Added load-linked, store-conditional atomics for + AArch6. +- Fixed envvar names to OMPI_MCA_orte_precondition_transports. + Thanks to Marisa Roman + for the contribution. +- fcoll/two_phase: Removed the component. All scenerios it was + used for has been replaced. +- btl/uct: Bumped UCX allowed version to v1.9.x. +- ULFM Fault Tolerance has been added. See ``README.FT.ULFM.md``. +- Fixed a crash during CUDA initialization. + Thanks to Yaz Saito for finding + and fixing the bug. +- Added CUDA support to the OFI MTL. +- ompio: Added atomicity support. +- Singleton comm spawn support has been fixed. +- Autoconf v2.7 support has been updated. +- fortran: Added check for ``ISO_FORTRAN_ENV:REAL16``. Thanks to + Jeff Hammond for reporting this issue. +- Changed the MCA component build style default to static. +- PowerPC atomics: Force usage of opal/ppc assembly. +- Removed C++ compiler requirement to build Open MPI. +- Fixed .la files leaking into wrapper compilers. +- Fixed bug where the cache line size was not set soon enough in + ``MPI_Init()``. +- coll/ucc and scoll/ucc components were added. +- coll/ucc: Added support for allgather and reduce collective + operations. +- autogen.pl: Fixed bug where it would not ignore all + excluded components. +- Various datatype bugfixes and performance improvements +- Various pack/unpack bugfixes and performance improvements +- Fix mmap infinite recurse in memory patcher +- Fix C to Fortran error code conversions. +- osc/ucx: Fix data corruption with non-contiguous accumulates +- Update coll/tuned selection rules +- Fix non-blocking collective ops +- btl/portals4: Fix flow control +- Various oshmem:ucx bugfixes and performance improvements +- common/ofi: Disable new monitor API until libfabric 1.14.0 +- Fix AVX detection with icc +- mpirun option ``--mca ompi_display_comm mpi_init``/``mpi_finalize`` + has been added. Enables a communication protocol report: + when ``MPI_Init`` is invoked (using the ``mpi_init`` value) and/or + when ``MPI_Finalize`` is invoked (using the ``mpi_finalize`` value). +- New algorithm for Allgather and Allgatherv added, based on the + paper *"Sparbit: a new logarithmic-cost and data locality-aware MPI + Allgather algorithm"*. Default algorithm selection rules are + un-changed, to use these algorithms add: + ``--mca coll_tuned_allgather_algorithm sparbit`` and/or + ``--mca coll_tuned_allgatherv_algorithm sparbit`` + Thanks to: Wilton Jaciel Loch , + and Guilherme Koslovski for their contribution. +- MPI-4: Persistent collectives have been moved to the MPI + namespace from MPIX. +- OFI: Delay patcher initialization until needed. It will now + be initialized only after the component is officially selected. +- MPI-4: Make ``MPI_Comm_get_info``, ``MPI_File_get_info``, and + ``MPI_Win_get_info`` compliant to the standard. +- Portable_platform file has been updated from GASNet. +- GCC versions < 4.8.1 are no longer supported. +- coll: Fix a bug with the libnbc ``MPI_AllReduce`` ring algorithm + when using ``MPI_IN_PLACE``. +- Updated the usage of .gitmodules to use relative paths from + absolute paths. This allows the submodule cloning to use the same + protocol as OMPI cloning. Thanks to Felix Uhl + for the contribution. +- osc/rdma: Add local leader pid in shm file name to make it unique. +- ofi: Fix memory handler unregistration. This change fixes a + segfault during shutdown if the common/ofi component was built + as a dynamic object. +- osc/rdma: Add support for MPI minimum alignment key. +- memory_patcher: Add ability to detect patched memory. Thanks + to Rich Welch for the contribution. +- build: Improve handling of compiler version string. This + fixes a compiler error with clang and armclang. +- Fix bug where the relocation of OMPI packages caused + the launch to fail. +- Various improvements to ``MPI_AlltoAll`` algorithms for both + performance and memory usage. +- coll/basic: Fix segmentation fault in ``MPI_Alltoallw`` with + ``MPI_IN_PLACE``. diff --git a/docs/news/news-v1.x.rst b/docs/news/news-v1.x.rst new file mode 100644 index 00000000000..d72e6b405f5 --- /dev/null +++ b/docs/news/news-v1.x.rst @@ -0,0 +1,3161 @@ +Open MPI v1.x series +==================== + +This file contains all the NEWS updates for all the Open MPI v1.x +series, in reverse chronological order. + +Open MPI 1.10.x series +---------------------- + +Open MPI version 1.10.7 +^^^^^^^^^^^^^^^^^^^^^^^ +:Date: 16 May 2017 + +- Fix bug in TCP BTL that impacted performance on 10GbE (and faster) + networks by not adjusting the TCP send/recv buffer sizes and using + system default values +- Add missing MPI_AINT_ADD and MPI_AINT_DIFF function delcarations in + mpif.h +- Fixed time reported by MPI_WTIME; it was previously reported as + dependent upon the CPU frequency. +- Fix platform detection on FreeBSD +- Fix a bug in the handling of ``MPI_TYPE_CREATE_DARRAY`` in + ``MPI_(R)(GET_)ACCUMULATE`` +- Fix openib memory registration limit calculation +- Add missing MPI_T_PVAR_SESSION_NULL in mpi.h +- Fix "make distcheck" when using external hwloc and/or libevent packages +- Add latest ConnectX-5 vendor part id to OpenIB device params +- Fix race condition in the UCX PML +- Fix signal handling for rsh launcher +- Fix Fortran compilation errors by removing MPI_SIZEOF in the Fortran + interfaces when the compiler does not support it +- Fixes for the pre-ignore-TKR "mpi" Fortran module implementation + (i.e., for older Fortran compilers -- these problems did not exist + in the "mpi" module implementation for modern Fortran compilers): + - Add ``PMPI_*`` interfaces + - Fix typo in MPI_FILE_WRITE_AT_ALL_BEGIN interface name + - Fix typo in MPI_FILE_READ_ORDERED_BEGIN interface name +- Fixed the type of MPI_DISPLACEMENT_CURRENT in all Fortran interfaces + to be an INTEGER(KIND=MPI_OFFSET_KIND). +- Fixed typos in ``MPI_INFO_GET_*`` man pages. Thanks to Nicolas Joly for + the patch +- Fix typo bugs in wrapper compiler script + + +Open MPI version 1.10.6 +^^^^^^^^^^^^^^^^^^^^^^^ +:Date: 17 Feb 2017 + +- Fix bug in timer code that caused problems at optimization settings + greater than 2 +- OSHMEM: make mmap allocator the default instead of sysv or verbs +- Support MPI_Dims_create with dimension zero +- Update USNIC support +- Prevent 64-bit overflow on timer counter +- Add support for forwarding signals +- Fix bug that caused truncated messages on large sends over TCP BTL +- Fix potential infinite loop when printing a stacktrace + + +Open MPI version 1.10.5 +^^^^^^^^^^^^^^^^^^^^^^^ +:Date: 19 Dec 2016 + +- Update UCX APIs +- Fix bug in darray that caused MPI/IO failures +- Use a MPI_Get_library_version() like string to tag the debugger DLL. + Thanks to Alastair McKinstry for the report +- Fix multi-threaded race condition in coll/libnbc +- Several fixes to OSHMEM +- Fix bug in UCX support due to uninitialized field +- Fix MPI_Ialltoallv with MPI_IN_PLACE and without MPI param check +- Correctly reset receive request type before init. Thanks Chris Pattison + for the report and test case. +- Fix bug in iallgather[v] +- Fix concurrency issue with MPI_Comm_accept. Thanks to Pieter Noordhuis + for the patch +- Fix ompi_coll_base_{gather,scatter}_intra_binomial +- Fixed an issue with MPI_Type_get_extent returning the wrong extent + for distributed array datatypes. +- Re-enable use of rtdtsc instruction as a monotonic clock source if + the processor has a core-invariant tsc. This is a partial fix for a + performance regression introduced in Open MPI v1.10.3. + + +Open MPI version 1.10.4 +^^^^^^^^^^^^^^^^^^^^^^^ +:Date: 10 Sep 2016 + +- Fix assembler support for MIPS +- Improve memory handling for temp buffers in collectives +- Fix [all]reduce with non-zero lower bound datatypes + Thanks Hristo Iliev for the report +- Fix non-standard ddt handling. Thanks Yuki Matsumoto for the report +- Various libnbc fixes. Thanks Yuki Matsumoto for the report +- Fix typos in request RMA bindings for Fortran. Thanks to @alazzaro + and @vondele for the assist +- Various bug fixes and enhancements to collective support +- Fix predefined types mapping in hcoll +- Revive the coll/sync component to resolve unexpected message issues + during tight loops across collectives +- Fix typo in wrapper compiler for Fortran static builds + + +Open MPI version 1.10.3 +^^^^^^^^^^^^^^^^^^^^^^^ +:Date: 15 June 2016 + +- Fix zero-length datatypes. Thanks to Wei-keng Liao for reporting + the issue. +- Minor manpage cleanups +- Implement atomic support in OSHMEM/UCX +- Fix support of MPI_COMBINER_RESIZED. Thanks to James Ramsey + for the report +- Fix computation of #cpus when ``--use-hwthread-cpus`` is used +- Add entry points for Allgatherv, iAllgatherv, Reduce, and iReduce + for the HCOLL library +- Fix an HCOLL integration bug that could signal completion of request + while still being worked +- Fix computation of cores when SMT is enabled. Thanks to Ben Menadue + for the report +- Various USNIC fixes +- Create a datafile in the per-proc directory in order to make it + unique per communicator. Thanks to Peter Wind for the report +- Fix zero-size malloc in one-sided pt-to-pt code. Thanks to Lisandro + Dalcín for the report +- Fix MPI_Get_address when passed MPI_BOTTOM to not return an error. + Thanks to Lisandro Dalcín for the report +- Fix MPI_TYPE_SET_ATTR with NULL value. Thanks to Lisandro Dalcín for + the report +- Fix various Fortran08 binding issues +- Fix memchecker no-data case. Thanks to Clinton Stimpson for the report +- Fix CUDA support under OS-X +- Fix various OFI/MTL integration issues +- Add MPI_T man pages +- Fix one-sided pt-to-pt issue by preventing communication from happening + before a target enters a fence, even in the no-precede case +- Fix a bug that disabled Totalview for MPMD use-case +- Correctly support MPI_UNWEIGHTED in topo-graph-neighbors. Thanks to + Jun Kudo for the report +- Fix singleton operations under SLURM when PMI2 is enabled +- Do not use MPI_IN_PLACE in neighborhood collectives for non-blocking + collectives (libnbc). Thanks to Jun Kudo for the report +- Silence autogen deprecation warnings for newer versions of Perl +- Do not return MPI_ERR_PENDING from collectives +- Use type ``int*`` for MPI_WIN_DISP_UNIT, MPI_WIN_CREATE_FLAVOR, and MPI_WIN_MODEL. + Thanks to Alastair McKinstry for the report +- Fix register_datarep stub function in IO/OMPIO. Thanks to Eric + Chamberland for the report +- Fix a bus error on MPI_WIN_[POST,START] in the shared memory one-sided component +- Add several missing MPI_WIN_FLAVOR constants to the Fortran support +- Enable connecting processes from different subnets using the openib BTL +- Fix bug in basic/barrier algorithm in OSHMEM +- Correct process binding for the ``--map-by node`` case +- Include support for subnet-to-subnet routing over InfiniBand networks +- Fix usnic resource check +- AUTHORS - Fix an errant reference to Subversion IDs +- Fix affinity for MPMD jobs running under LSF +- Fix many Fortran binding bugs +- Fix ``MPI_IN_PLACE`` related bugs +- Fix PSM/PSM2 support for singleton operations +- Ensure MPI transports continue to progress during RTE barriers +- Update HWLOC to 1.9.1 end-of-series +- Fix a bug in the Java command line parser when the + ``-Djava.library.path`` options was given by the user +- Update the MTL/OFI provider selection behavior +- Add support for clock_gettime on Linux. +- Correctly detect and configure for Solaris Studio 12.5 + beta compilers +- Correctly compute #slots when -host is used for MPMD case +- Fix a bug in the hcoll collectives due to an uninitialized field +- Do not set a binding policy when oversubscribing a node +- Fix hang in intercommunicator operations when oversubscribed +- Speed up process termination during MPI_Abort +- Disable backtrace support by default in the PSM/PSM2 libraries to + prevent unintentional conflicting behavior. + + +Open MPI version 1.10.2 +^^^^^^^^^^^^^^^^^^^^^^^ +:Date: 26 Jan 2016 + +- OSHMEM is now 1.2 compliant +- Fix NBC_Copy for legitimate zero-size messages +- Fix multiple bugs in OSHMEM +- Correctly handle ``mpirun --host @`` +- Centralize two MCA params to avoid duplication between OMPI and + OSHMEM layers: opal_abort_delay and opal_abort_print_stack +- Add support for Fujitsu compilers +- Add UCX support for OMPI and OSHMEM +- Correctly handle oversubscription when not given directives + to permit it. Thanks to @ammore1 for reporting it +- Fix rpm spec file to not include the /usr directory +- Add Intel HFI1 default parameters for the openib BTL +- Resolve symbol conflicts in the PSM2 library +- Add ability to empty the rgpusm cache when full if requested +- Fix another libtool bug when -L requires a space between it + and the path. Thanks to Eric Schnetter for the patch. +- Add support for OSHMEM v1.2 APIs +- Improve efficiency of oshmem_preconnect_all algorithm +- Fix bug in buffered sends support +- Fix double free in edge case of mpirun. Thanks to @jsharpe for + the patch +- Multiple one-sided support fixes +- Fix integer overflow in the tuned "reduce" collective when + using buffers larger than INT_MAX in size +- Fix parse of user environment variables in mpirun. Thanks to + Stefano Garzarella for the patch +- Performance improvements in PSM2 support +- Fix NBS iBarrier for inter-communicators +- Fix bug in vader BTL during finalize +- Improved configure support for Fortran compilers +- Fix rank_file mapper to support default ``--slot-set``. Thanks + to Matt Thompson for reporting it +- Update MPI_Testsome man page. Thanks to Eric Schnetter for + the suggestion +- Fix missing resize of the returned type for subarray and + darray types. Thanks to Keith Bennett and Dan Garmann for + reporting it +- Fix Java support on OSX 10.11. Thanks to Alexander Daryin + for reporting the problem +- Fix some compilation issues on Solaris 11.2. Thanks to + Paul Hargrove for his continued help in such areas + + +Open MPI version 1.10.1 +^^^^^^^^^^^^^^^^^^^^^^^ +:Date: 4 Nov 2015 + +- Workaround an optimization problem with gcc compilers >= 4.9.2 that + causes problems with memory registration, and forced + mpi_leave_pinned to default to 0 (i.e., off). Thanks to @oere for + the fix. +- Fix use of MPI_LB and MPI_UB in subarray and darray datatypes. + Thanks to Gus Correa and Dimitar Pashov for pointing out the issue. +- Minor updates to mpi_show_mpi_alloc_mem_leaks and + ompi_debug_show_handle_leaks functionality. +- Fix segv when invoking non-blocking reductions with a user-defined + operation. Thanks to Rupert Nash and Georg Geiser for identifying + the issue. +- No longer probe for PCI topology on Solaris (unless running as root). +- Fix for Intel Parallel Studio 2016 ifort partial support of the + !GCC$ pragma. Thanks to Fabrice Roy for reporting the problem. +- Bunches of Coverity / static analysis fixes. +- Fixed ROMIO to look for lstat in . Thanks to William + Throwe for submitting the patch both upstream and to Open MPI. +- Fixed minor memory leak when attempting to open plugins. +- Fixed type in MPI_IBARRIER C prototype. Thanks to Harald Servat for + reporting the issue. +- Add missing man pages for MPI_WIN_CREATE_DYNAMIC, MPI_WIN_ATTACH, + MPI_WIN_DETACH, MPI_WIN_ALLOCATE, MPI_WIN_ALLOCATE_SHARED. +- When mpirun-launching new applications, only close file descriptors + that are actually open (resulting in a faster launch in some + environments). +- Fix "test ==" issues in Open MPI's configure script. Thank to Kevin + Buckley for pointing out the issue. +- Fix performance issue in usnic BTL: ensure progress thread is + throttled back to not aggressively steal CPU cycles. +- Fix cache line size detection on POWER architectures. +- Add missing #include in a few places. Thanks to Orion Poplawski for + supplying the patch. +- When OpenSHMEM building is disabled, no longer install its header + files, help files, or man pages. Add man pages for oshrun, oshcc, + and oshfort. +- Fix mpi_f08 implementations of MPI_COMM_SET_INFO, and profiling + versions of MPI_BUFFER_DETACH, MPI_WIN_ALLOCATE, + MPI_WIN_ALLOCATE_SHARED, MPI_WTICK, and MPI_WTIME. +- Add orte_rmaps_dist_device MCA param, allowing users to map near a + specific device. +- Various updates/fixes to the openib BTL. +- Add missing defaults for the Mellanox ConnectX 3 card to the openib BTL. +- Minor bug fixes in the OFI MTL. +- Various updates to Mellanox's MXM, hcoll, and FCA components. +- Add OpenSHMEM man pages. Thanks to Tony Curtis for sharing the man + pages files from openshmem.org. +- Add missing "const" attributes to MPI_COMPARE_AND_SWAP, + MPI_FETCH_AND_OP, MPI_RACCUMULATE, and MPI_WIN_DETACH prototypes. + Thanks to Michael Knobloch and Takahiro Kawashima for bringing this + to our attention. +- Fix linking issues on some platforms (e.g., SLES 12). +- Fix hang on some corner cases when MPI applications abort. +- Add missing options to mpirun man page. Thanks to Daniel Letai + for bringing this to our attention. +- Add new ``--with-platform-patches-dir`` configure option +- Adjust relative selection priorities to ensure that MTL + support is favored over BTL support when both are available +- Use CUDA IPC for all sized messages for performance + + +Open MPI version 1.10.0 +^^^^^^^^^^^^^^^^^^^^^^^ +:Date: 25 Aug 2015 + +.. important:: + NOTE: The v1.10.0 release marks the transition to Open MPI's new + version numbering scheme. The v1.10.x release series is based on + the v1.8.x series, but with a few new features. v2.x will be the + next series after the v1.10.x series, and complete the transition + to the new version numbering scheme. See README for more details + on the new versioning scheme. + +.. note:: + In accordance with OMPI version numbering, the v1.10 is **not** + API compatible with the v1.8 release series. + +- Added libfabric support (see README for more details): +- usNIC BTL updated to use libfabric. +- Added OFI MTL (usable with PSM in libfabric v1.1.0). +- Added Intel Omni-Path support via new PSM2 MTL. +- Added "yalla" PML for faster MXM support. +- Removed support for MX +- Added persistent distributed virtual machine (pDVM) support for fast + workflow executions. +- Fixed typo in GCC inline assembly introduced in Open MPI v1.8.8. + Thanks to Paul Hargrove for pointing out the issue. +- Add missing man pages for MPI_Win_get|set_info(3). +- Ensure that session directories are cleaned up at the end of a run. +- Fixed linking issues on some OSs where symbols of dependent + libraries are not automatically publicly available. +- Improve hcoll and fca configury library detection. Thanks to David + Shrader for helping track down the issue. +- Removed the LAMA mapper (for use in setting affinity). Its + functionality has been largely superseded by other mpirun CLI + options. +- CUDA: Made the asynchronous copy mode be the default. +- Fix a malloc(0) warning in MPI_IREDUCE_SCATTER_BLOCK. Thanks to + Lisandro Dalcín for reporting the issue. +- Fix typo in MPI_Scatter(3) man page. Thanks to Akshay Venkatesh for + noticing the mistake. +- Add rudimentary protection from TCP port scanners. +- Fix typo in Open MPI error handling. Thanks to Åke Sandgren for + pointing out the error. +- Increased the performance of the CM PML (i.e., the Portals, PSM, + PSM2, MXM, and OFI transports). +- Restored visibility of blocking send requests in message queue + debuggers (e.g., TotalView, DDT). +- Fixed obscure IPv6-related bug in the TCP BTL. +- Add support for the "no_locks" MPI_Info key for one-sided + functionality. +- Fixed ibv_fork support for verbs-based networks. +- Fixed a variety of small bugs in OpenSHMEM. +- Fixed MXM configure with additional CPPFLAGS and LDFLAGS. Thanks to + David Shrader for the patch. +- Fixed incorrect memalign threshhold in the openib BTL. Thanks to + Xavier Besseron for pointing out the issue. + + +Open MPI 1.8.x series +--------------------- + +Open MPI version 1.8.8 +^^^^^^^^^^^^^^^^^^^^^^ +:Date: 5 Aug 2015 + +- Fix a segfault in MPI_FINALIZE with the PSM MTL. +- Fix mpi_f08 sentinels (e.g., MPI_STATUS_IGNORE) handling. +- Set some additional MXM default values for OSHMEM. +- Fix an invalid memory access in MPI_MRECV and MPI_IMRECV. +- Include two fixes that were mistakenly left out of the official + v1.8.7 tarball: +- Fixed MPI_WIN_POST and MPI_WIN_START for zero-size messages +- Protect the OOB TCP ports from segfaulting when accessed by port + scanners + + +Open MPI version 1.8.7 +^^^^^^^^^^^^^^^^^^^^^^ +:Date: 15 Jul 2015 + +.. note:: v1.8.7 technically breaks ABI with prior versions + in the 1.8 series because it repairs two incorrect API + signatures. However, users will only need to recompile + if they were using those functions - which they couldn't + have been, because the signatures were wrong :-) + +- Plugged a memory leak that impacted blocking sends +- Fixed incorrect declaration for MPI_T_pvar_get_index and added + missing return code MPI_T_INVALID_NAME. +- Fixed an uninitialized variable in PMI2 support +- Added new vendor part id for Mellanox ConnectX4-LX +- Fixed NBC_Copy for legitimate zero-size messages +- Fixed MPI_Win_post and MPI_Win_start for zero-size messages +- Protect the OOB ports from segfaulting when accessed by port scanners +- Fixed several Fortran typos +- Fixed configure detection of XRC support +- Fixed support for highly heterogeneous systems to avoid + memory corruption when printing out the bindings + + +Open MPI version 1.8.6 +^^^^^^^^^^^^^^^^^^^^^^ +:Date: 17 Jun 2015 + +- Fixed memory leak on Mac OS-X exposed by TCP keepalive +- Fixed keepalive support to ensure that daemon/node failure + results in complete job cleanup +- Update Java binding support +- Fixed MPI_THREAD_MULTIPLE bug in vader shared memory BTL +- Fixed issue during shutdown when CUDA initialization wasn't complete +- Fixed orted environment when no prefix given +- Fixed trivial typo in MPI_Neighbor_allgather manpage +- Fixed tree-spawn support for sh and ksh shells +- Several data type fixes +- Fixed IPv6 support bug +- Cleaned up an unlikely build issue +- Fixed PMI2 process map parsing for cyclic mappings +- Fixed memalign threshold in openib BTL +- Fixed debugger access to message queues for blocking send/recv + + +Open MPI version 1.8.5 +^^^^^^^^^^^^^^^^^^^^^^ +:Date: 5 May 2015 + +- Fixed configure problems in some cases when using an external hwloc + installation. Thanks to Erick Schnetter for reporting the error and + helping track down the source of the problem. +- Fixed linker error on OS X when using the clang compiler. Thanks to + Erick Schnetter for reporting the error and helping track down the + source of the problem. +- Fixed MPI_THREAD_MULTIPLE deadlock error in the vader BTL. Thanks + to Thomas Klimpel for reporting the issue. +- Fixed several Valgrind warnings. Thanks for Lisandro Dalcín for + contributing a patch fixing some one-sided code paths. +- Fixed version compatibility test in OOB that broke ABI within the + 1.8 series. NOTE: this will not resolve the problem between pre-1.8.5 + versions, but will fix it going forward. +- Fix some issues related to running on Intel Xeon Phi coprocessors. +- Opportunistically switch away from using GNU Libtool's libltdl + library when possible (by default). +- Fix some VampirTrace errors. Thanks to Paul Hargrove for reporting + the issues. +- Correct default binding patterns when ``--use-hwthread-cpus`` was + specified and nprocs <= 2. +- Fix warnings about -finline-functions when compiling with clang. +- Updated the embedded hwloc with several bug fixes, including the + "duplicate Lhwloc1 symbol" that multiple users reported on some + platforms. +- Do not error when mpirun is invoked with with default bindings + (i.e., no binding was specified), and one or more nodes do not + support bindings. Thanks to Annu Desari for pointing out the + problem. +- Let root invoke ``mpirun --version`` to check the version without + printing the "Don't run as root!" warnings. Thanks to Robert McLay + for the suggestion. +- Fixed several bugs in OpenSHMEM support. +- Extended vader shared memory support to 32-bit architectures. +- Fix handling of very large datatypes. Thanks to Bogdan Sataric for + the bug report. +- Fixed a bug in handling subarray MPI datatypes, and a bug when using + MPI_LB and MPI_UB. Thanks to Gus Correa for pointing out the issue. +- Restore user-settable bandwidth and latency PML MCA variables. +- Multiple bug fixes for cleanup during MPI_FINALIZE in unusual + situations. +- Added support for TCP keepalive signals to ensure timely termination + when sockets between daemons cannot be created (e.g., due to a + firewall). +- Added MCA parameter to allow full use of a SLURM allocation when + started from a tool (supports LLNL debugger). +- Fixed several bugs in the configure logic for PMI and hwloc. +- Fixed incorrect interface index in TCP communications setup. Thanks + to Mark Kettenis for spotting the problem and providing a patch. +- Fixed MPI_IREDUCE_SCATTER with single-process communicators when + MPI_IN_PLACE was not used. +- Added XRC support for OFED v3.12 and higher. +- Various updates and bug fixes to the Mellanox hcoll collective + support. +- Fix problems with Fortran compilers that did not support + ``REAL*16``/``COMPLEX*32`` types. Thanks to Orion Poplawski for + identifying the issue. +- Fixed problem with rpath/runpath support in pkg-config files. + Thanks to Christoph Junghans for notifying us of the issue. +- Man page fixes: + + - Removed erroneous "color" discussion from MPI_COMM_SPLIT_TYPE. + Thanks to Erick Schnetter for spotting the outdated text. + - Fixed prototypes for MPI_IBARRIER. Thanks to Maximilian for + finding the issue. + - Updated docs about buffer usage in non-blocking communications. + Thanks to Alexander Pozdneev for citing the outdated text. + - Added documentation about the 'ompi_unique' MPI_Info key with + MPI_PUBLISH_NAME. + - Fixed typo in MPI_INTERCOMM_MERGE. Thanks to Harald Servat for + noticing and sending a patch. + - Updated configure paths in HACKING. Thanks to Maximilien Levesque + for the fix. + - Fixed Fortran typo in MPI_WIN_LOCK_ALL. Thanks to Thomas Jahns + for pointing out the issue. + +- Fixed a number of MPI one-sided bugs. +- Fixed MPI_COMM_SPAWN when invoked from a singleton job. +- Fixed a number of minor issues with CUDA support, including + registering of shared memory and supporting reduction support for + GPU buffers. +- Improved support for building OMPI on Cray platforms. +- Fixed performance regression introduced by the inadvertent default + enabling of MPI_THREAD_MULTIPLE support. + + +Open MPI version 1.8.4 +^^^^^^^^^^^^^^^^^^^^^^ +:Date: 19 Dec 2014 + +- Fix MPI_SIZEOF; now available in mpif.h for modern Fortran compilers + (see README for more details). Also fixed various compiler/linker + errors. +- Fixed inadvertant Fortran ABI break between v1.8.1 and v1.8.2 in the + mpi interface module when compiled with gfortran >= v4.9. +- Fix various MPI_THREAD_MULTIPLE issues in the TCP BTL. +- mpirun no longer requires the ``--hetero-nodes`` switch; it will + automatically detect when running in heterogeneous scenarios. +- Update LSF support, to include revamped affinity functionality. +- Update embedded hwloc to v1.9.1. +- Fixed max registerable memory computation in the openib BTL. +- Updated error message when debuggers are unable to find various + symbols/types to be more clear. Thanks to Dave Love for raising the + issue. +- Added proper support for LSF and PBS/Torque libraries in static builds. +- Rankfiles now support physical processor IDs. +- Fixed potential hang in MPI_ABORT. +- Fixed problems with the PSM MTL and "re-connect" scenarios, such as + MPI_INTERCOMM_CREATE. +- Fix MPI_IREDUCE_SCATTER with a single process. +- Fix (rare) race condition in stdout/stderr funneling to mpirun where + some trailing output could get lost when a process terminated. +- Removed inadvertent change that set ``--enable-mpi-thread-multiple`` + "on" by default, thus impacting performance for non-threaded apps. +- Significantly reduced startup time by optimizing internal hash table + implementation. +- Fixed OS X linking with the Fortran mpi module when used with + gfortran >= 4.9. Thanks to Github user yafshar for raising the + issue. +- Fixed memory leak on Cygwin platforms. Thanks for Marco Atzeri for + reporting the issue. +- Fixed seg fault in neighborhood collectives when the degree of the + topology is higher than the communicator size. Thanks to Lisandro + Dalcín for reporting the issue. +- Fixed segfault in neighborhood collectives under certain use-cases. +- Fixed various issues regarding Solaris support. Thanks to Siegmar + Gross for patiently identifying all the issues. +- Fixed PMI configure tests for certain Slurm installation patterns. +- Fixed param registration issue in Java bindings. Thanks to Takahiro + Kawashima and Siegmar Gross for identifying the issue. +- Several man page fixes. +- Silence several warnings and close some memory leaks (more remain, + but it's better than it was). +- Re-enabled the use of CMA and knem in the shared memory BTL. +- Updated mpirun manpage to correctly explain new map/rank/binding options. +- Fixed MPI_IALLGATHER problem with intercommunicators. Thanks for + Takahiro Kawashima for the patch. +- Numerous updates and performance improvements to OpenSHMEM. +- Turned off message coalescing in the openib BTL until a proper fix + for that capability can be provided (tentatively expected for 1.8.5) +- Fix a bug in iof output that dates back to the dinosaurs which would + output extra bytes if the system was very heavily loaded +- Fix a bug where specifying mca_component_show_load_errors=0 could + cause ompi_info to segfault +- Updated valgrind suppression file + + +Open MPI version 1.8.3 +^^^^^^^^^^^^^^^^^^^^^^ +:Date: 26 Sep 2014 + +- Fixed application abort bug to ensure that MPI_Abort exits appropriately + and returns the provided exit status +- Fixed some alignment (not all) issues identified by Clang +- Allow CUDA-aware to work with nonblocking collectives. Forces packing to + happen when using GPU buffers. +- Fixed configure test issue with Intel 2015 Fortran compiler +- Fixed some PGI-related errors +- Provide better help message when encountering a firewall +- Fixed MCA parameter quoting to protect multi-word params and params + that contain special characters +- Improved the bind-to help message to clarify the defaults +- Add new MPI-3.1 tools interface +- Several performance optimizations and memory leak cleanups +- Turn off the coll/ml plugin unless specifically requested as it + remains in an experimental state +- Fix LSF support by adding required libraries for the latest LSF + releases. Thanks to Joshua Randal for supplying the initial + patches. + + +Open MPI version 1.8.2 +^^^^^^^^^^^^^^^^^^^^^^ +:Date: 25 Aug 2014 + +- Fix auto-wireup of OOB, allowing ORTE to automatically + test all available NICs +- "Un-deprecate" pernode, npernode, and npersocket options + by popular demand +- Add missing Fortran bindings for MPI_WIN_LOCK_ALL, + MPI_WIN_UNLOCK_ALL, and MPI_WIN_SYNC. +- Fix cascading/over-quoting in some cases with the rsh/ssh-based + launcher. Thanks to multiple users for raising the issue. +- Properly add support for gfortran 4.9 ignore TKR pragma (it was + erroneously only partially added in v1.7.5). Thanks to Marcus + Daniels for raising the issue. +- Update/improve help messages in the usnic BTL. +- Resolve a race condition in MPI_Abort. +- Fix obscure cases where static linking from wrapper compilers would + fail. +- Clarify the configure ``--help`` message about when OpenSHMEM is + enabled/disabled by default. Thanks to Paul Hargrove for the + suggestion. +- Align pages properly where relevant. Thanks to Paul Hargrove for + identifying the issue. +- Various compiler warning and minor fixes for OpenBSD, FreeBSD, and + Solaris/SPARC. Thanks to Paul Hargrove for the patches. +- Properly pass function pointers from Fortran to C in the mpi_f08 + module, thereby now supporting gfortran 4.9. Thanks to Tobias + Burnus for assistance and testing with this issue. +- Improve support for Cray CLE 5. +- Fix mpirun regression: ensure exit status is non-zero if mpirun is + terminated due to signal. +- Improved CUDA efficiency of asynchronous copies. +- Fix to parameter type in MPI_Type_indexed.3. Thanks to Bastian + Beischer for reporting the mistake. +- Fix NUMA distance calculations in the openib BTL. +- Decrease time required to shut down mpirun at the end of a job. +- More RMA fixes. +- More hostfile fixes from Tetsuya Mishima. +- Fix darray issue where UB was not computed correctly. +- Fix mpi_f08 parameter name for MPI_GET_LIBRARY_VERSION. Thanks to + Junchao Zhang for pointing out the issue. +- Ensure mpirun aborts properly when unable to map processes in + scheduled environments. +- Ensure that MPI RMA error codes show up properly. Thanks to + Lisandro Dalcín for reporting the issue. +- Minor bug fixes and improvements to the bash and zsh mpirun + autocompletion scripts. +- Fix sequential mpirun process mapper. Thanks to Bill Chen for + reporting the issue. +- Correct SLURM stdout/stderr redirection. +- Added missing portals 4 files. +- Performance improvements for blocking sends and receives. +- Lots of cleanup to the ml collective component +- Added new Java methods to provide full MPI coverage +- Many OSHMEM cleanups +- Prevent comm_spawn from automatically launching a VM across + all available nodes +- Close many memory leaks to achieve valgrind-clean operation +- Better handling of TCP connection discovery for mismatched networks + where we don't have a direct 1:1 subnet match between nodes +- Prevent segfault when OMPI info tools are used in pipes and user + exits one step of that pipe before completing output + + +Open MPI version 1.8.1 +^^^^^^^^^^^^^^^^^^^^^^ +:Date: 23 Apr 2014 + +- Fix for critical bug: mpirun removed files (but not directories) + from / when run as root. Thanks to Jay Fenlason and Orion Poplawski + for bringing the issue to our attention and helping identify the + fix. + + +Open MPI version 1.8.0 +^^^^^^^^^^^^^^^^^^^^^^ +:Date: 31 Mar 2014 + +- Commit upstream ROMIO fix for mixed NFS+local filesystem environments. +- Several fixes for MPI-3 one-sided support. For example, + arbitrary-length datatypes are now supported. +- Add config support for the Mellanox ConnectX 4 card. +- Add missing MPI_COMM_GET|SET_INFO functions, and missing + MPI_WEIGHTS_EMPTY and MPI_ERR_RMA_SHARED constants. Thanks to + Lisandro Dalcín for pointing out the issue. +- Update some help messages in OSHMEM, the usnic BTL, the TCP BTL, and + ORTE, and update documentation about ompi_info's ``--level`` option. +- Fix some compiler warnings. +- Ensure that ORTE daemons are not bound to a single processor + if TaskAffinity is set on by default in Slurm. Thanks to Artem Polyakov + for identifying the problem and providing a patch + + +Open MPI 1.7.x series +--------------------- + +Open MPI version 1.7.5 +^^^^^^^^^^^^^^^^^^^^^^ +:Date: 20 Mar 2014 + +.. attention:: + Open MPI is now fully MPI-3.0 compliant + +- Add Linux OpenSHMEM support built on top of Open MPI's MPI + layer. Thanks to Mellanox for contributing this new feature. +- Allow restricting ORTE daemons to specific cores using the + orte_daemon_cores MCA param. +- Ensure to properly set "locality" flags for processes launched via + MPI dynamic functions such as MPI_COMM_SPAWN. +- Fix MPI_GRAPH_CREATE when nnodes is smaller than the size of the old + communicator. +- usnic BTL now supports underlying UDP transport. +- usnic BTL now checks for common connectivty errors at first send to + a remote server. +- Minor scalability improvements in the usnic BTL. +- ompi_info now lists whether the Java MPI bindings are available or not. +- MPI-3: mpi.h and the Fortran interfaces now report MPI_VERSION==3 + and MPI_SUBVERSION==0. +- MPI-3: Added support for new RMA functions and functionality. +- Fix MPI_Info "const buglet. Thanks to Orion Poplawski for + identifying the issue. +- Multiple fixes to mapping/binding options. Thanks to Tetsuya Mishima + for his assistance. +- Multiple fixes for normal and abnormal process termination, + including singleton MPI_Abort and ensuring to kill entire process + groups when abnormally terminating a job. +- Fix DESTDIR install for javadocs. Thanks to Orion Poplawski for + pointing out the issue. +- Various performance improvements for the MPI Java bindings. +- OMPI now uses its own internal random number generator and will not + perturb srand() and friends. +- Some cleanups for Cygwin builds. Thanks to Marco Atzeri for the + patches. +- Add a new collective component (coll/ml) that provides substantially + improved performance. It is still experimental, and requires + setting coll_ml_priority > 0 to become active. +- Add version check during startup to ensure you are using the same + version of Open MPI on all nodes in a job. +- Significantly improved the performance of MPI_DIMS_CREATE for large + values. Thanks to Andreas Schäfer for the contribution. +- Removed ASYNCHRONOUS keyword from the "ignore TKR" mpi_f08 module. +- Deprecated the following mpirun options: + ``--bynode, --bycore, --byslot``: replaced with ``--map-by node|core|slot``. + ``--npernode, --npersocket``: replaced with ``--map-by ppr:N:node`` and + ``--map-by ppr:N:socket``, respectively +- Pick NFS "infinitely stale" fix from ROMIO upstream. +- Various PMI2 fixes and extension to support broader range of mappings. +- Improve launch performance at large scale. +- Add support for PBS/Torque environments that set environment + variables to indicate the number of slots available on each nodes. + Set the ras_tm_smp MCA parameter to "1" to enable this mode. +- Add new, more scalable endpoint exchange (commonly called "modex") + method that only exchanges endpoint data on a per-peer basis + on first message. Not all transports have been updated to use + this feature. Set the rte_orte_direct_modex parameter to "1" + to enable this mode. + + +Open MPI version 1.7.4 +^^^^^^^^^^^^^^^^^^^^^^ +:Date: 5 Feb 2014 + +.. important:: + As of release 1.7.4, OpenMPI's default mapping, ranking, and binding + settings have changed: + + - Mapping: + + * if #procs <= 2, default to map-by core + * if #procs > 2, default to map-by socket + + - Ranking: + + * if default mapping is used, then default to rank-by slot + * if map-by is given, then default to rank-by , + where is whatever object we mapped against + + - Binding: + + * default to bind-to core + + - Users can override any of these settings individually using the + corresponding MCA parameter. Note that multi-threaded applications + in particular may want to override at least the binding default + to allow threads to use multiple cores. + +- Restore version number output in ``ompi_info --all``. +- Various bug fixes for the mpi_f08 Fortran bindings. +- Fix ROMIO compile error with Lustre 2.4. Thanks to Adam Moody for + reporting the issue. +- Various fixes for 32 bit platforms. +- Add ability to selectively disable building the mpi or mpi_f08 + module. See the README file for details. +- Fix MX MTL finalization issue. +- Fix ROMIO issue when opening a file with MPI_MODE_EXCL. +- Fix PowerPC and MIPS assembly issues. +- Various fixes to the hcoll and FCA collective offload modules. +- Prevent integer overflow when creating datatypes. Thanks to + original patch from Gilles Gouaillardet. +- Port some upstream hwloc fixes to Open MPI's embedded copy for + working around buggy NUMA node cpusets and including mising header + files. Thanks to Jeff Becker and Paul Hargrove for reporting the + issues. +- Fix recursive invocation issues in the MXM MTL. +- Various bug fixes to the new MCA parameter back-end system. +- Have the posix fbtl module link against -laio on NetBSD platforms. + Thanks to Paul Hargrove for noticing the issue. +- Various updates and fixes to network filesystem detection to support + more operating systems. +- Add gfortran v4.9 "ignore TKR" syntax to the mpi Fortran module. +- Various compiler fixes for several BSD-based platforms. Thanks to + Paul Hargrove for reporting the issues. +- Fix when MPI_COMM_SPAWN[_MULTIPLE] is used on oversubscribed + systems. +- Change the output from ``--report`` bindings to simply state that a + process is not bound, instead of reporting that it is bound to all + processors. +- Per MPI-3.0 guidance, remove support for all MPI subroutines with + choice buffers from the TKR-based mpi Fortran module. Thanks to Jed + Brown for raising the issue. +- Only allow the usnic BTL to build on 64 bit platforms. +- Various bug fixes to SLURM support, to include ensuring proper + exiting on abnormal termination. +- Ensure that MPI_COMM_SPAWN[_MULTIPLE] jobs get the same mapping + directives that were used with mpirun. +- Fixed the application of TCP_NODELAY. +- Change the TCP BTL to not warn if a non-existent interface is + ignored. +- Restored the "--bycore" mpirun option for backwards compatibility. +- Fixed debugger attach functionality. Thanks to Ashley Pittman for + reporting the issue and suggesting the fix. +- Fixed faulty MPI_IBCAST when invoked on a communicator with only + one process. +- Add new Mellanox device IDs to the openib BTL. +- Progress towards cleaning up various internal memory leaks as + reported by Valgrind. +- Fixed some annoying flex-generated warnings that have been there for + years. Thanks to Tom Fogal for the initial patch. +- Support user-provided environment variables via the "env" info key + to MPI_COMM_SPAWN[_MULTIPLE]. Thanks to Tom Fogal for the feature + request. +- Fix uninitialized variable in MPI_DIST_GRAPH_CREATE. +- Fix a variety of memory errors on SPARC platforms. Thanks to + Siegmar Gross for reporting and testing all the issues. +- Remove Solaris threads support. When building on Solaris, pthreads + will be used. +- Correctly handle the convertor internal stack for persistent + receives. Thanks to Guillaume Gouaillardet for identifying the + problem. +- Add support for using an external libevent via ``--with-libevent``. + See the README for more details. +- Various OMPIO updates and fixes. +- Add support for the MPIEXEC_TIMEOUT environment variable. If set, + mpirun will terminate the job after this many seconds. +- Update the internal copy of ROMIO to that which shipped in MPICH + 3.0.4. +- Various performance tweaks and improvements in the usnic BTL, + including now reporting MPI_T performance variables for each usnic + device. +- Fix to not access send datatypes for non-root processes with + MPI_ISCATTER[V] and MPI_IGATHER[V]. Thanks to Pierre Jolivet for + supplying the initial patch. +- Update VampirTrace to 5.14.4.9. +- Fix ptmalloc2 hook disable when used with ummunotify. +- Change the default connection manager for the openib BTL to be based + on UD verbs data exchanges instead of ORTE OOB data exchanges. +- Fix Fortran compile error when compiling with 8-byte INTEGERs and + 4-byte ints. +- Fix C++11 issue identified by Jeremiah Willcock. +- Many changes, updates, and bug fixes to the ORTE run-time layer. +- Correctly handle MPI_REDUCE_SCATTER with recvcounts of 0. +- Update man pages for MPI-3, and add some missing man pages for + MPI-2.x functions. +- Updated mpi_f08 module in accordance with post-MPI-3.0 errata which + basically removed BIND(C) from all interfaces. +- Fixed MPI_IN_PLACE detection for MPI_SCATTER[V] in Fortran + routines. Thanks to Charles Gerlach for identifying the issue. +- Added support for routable RoCE to the openib BTL. +- Update embedded hwloc to v1.7.2. +- ErrMgr framework redesigned to better support fault tolerance development + activities. See the following RFC for details: + https://www.open-mpi.org/community/lists/devel/2010/03/7589.php +- Added database framework to OPAL and changed all modex operations + to flow thru it, also included additional system info in the + available data +- Added staged state machine to support sequential work flows +- Added distributed file system support for accessing files across + nodes that do not have networked file systems +- Extended filem framework to support scalable pre-positioning of + files for use by applications, adding new "raw" component that + transmits files across the daemon network +- Native Windows support has been removed. A cygwin package is + available from that group for Windows-based use. +- Added new MPI Java bindings. See the Javadocs for more details on + the API. +- Wrapper compilers now add rpath support by default to generated + executables on systems that support it. This behavior can be + disabled via ``--disable-wrapper-rpath``. See note in README about ABI + issues when using rpath in MPI applications. +- Added a new parallel I/O component and multiple new frameworks to + support parallel I/O operations. +- Fixed MPI_STATUS_SIZE Fortran issue when used with 8-byte Fortran + INTEGERs and 4-byte C ints. Since this issue affects ABI, it is + only enabled if Open MPI is configured with + ``--enable-abi-breaking-fortran-status-i8-fix``. Thanks to Jim Parker + for supplying the initial patch. +- Add support for Intel Phi SCIF transport. +- For CUDA-aware MPI configured with CUDA 6.0, use new pointer + attribute to avoid extra synchronization in stream 0 when using + CUDA IPC between GPUs on the same node. +- For CUDA-aware MPI configured with CUDA 6.0, compile in support + of GPU Direct RDMA in openib BTL to improve small message latency. +- Updated ROMIO from MPICH v3.0.4. +- MPI-3: Added support for remaining non-blocking collectives. +- MPI-3: Added support for neighborhood collectives. +- MPI-3: Updated C bindings with consistent use of []. +- MPI-3: Added the const keyword to read-only buffers. +- MPI-3: Added support for non-blocking communicator duplication. +- MPI-3: Added support for non-collective communicator creation. + + +Open MPI version 1.7.3 +^^^^^^^^^^^^^^^^^^^^^^ +:Date: 17 Oct 2013 + +- Make CUDA-aware support dynamically load libcuda.so so CUDA-aware + MPI library can run on systems without CUDA software. +- Fix various issues with dynamic processes and intercommunicator + operations under Torque. Thanks to Suraj Prabhakaran for reporting + the problem. +- Enable support for the Mellanox MXM2 library by default. +- Improve support for Portals 4. +- Various Solaris fixes. Many thanks to Siegmar Gross for his + incredible patience in reporting all the issues. +- MPI-2.2: Add reduction support for ``MPI_C_*COMPLEX`` and ``MPI::*COMPLEX``. +- Fixed internal accounting when openpty() fails. Thanks to Michal + Peclo for reporting the issue and providing a patch. +- Fixed too-large memory consumption in XRC mode of the openib BTL. + Thanks to Alexey Ryzhikh for the patch. +- Add bozo check for negative np values to mpirun to prevent a + deadlock. Thanks to Upinder Malhi for identifying the issue. +- Fixed MPI_IS_THREAD_MAIN behavior. Thanks to Lisandro Dalcín for + pointing out the problem. +- Various rankfile fixes. +- Fix functionality over iWARP devices. +- Various memory and performance optimizations and tweaks. +- Fix MPI_Cancel issue identified by Fujitsu. +- Add missing support for MPI_Get_address in the "use mpi" TKR + implementation. Thanks to Hugo Gagnon for identifying the issue. +- MPI-3: Add support for MPI_Count. +- MPI-2.2: Add missing MPI_IN_PLACE support for MPI_ALLTOALL. +- Added new usnic BTL to support the Cisco usNIC device. +- Minor VampirTrace update to 5.14.4.4. +- Removed support for ancient OS X systems (i.e., prior to 10.5). +- Fixed obscure packing/unpacking datatype bug. Thanks to Takahiro + Kawashima for identifying the issue. +- Add run-time support for PMI2 environments. +- Update openib BTL default parameters to include support for Mellanox + ConnectX3-Pro devices. +- Update libevent to v2.0.21. +- ``ompi_info --param `` now only shows a small number + of MCA parameters by default. Add ``--level 9`` or ``--all`` to see + **all** MCA parameters. See README for more details. +- Add support for asynchronous CUDA-aware copies. +- Add support for Mellanox MPI collective operation offload via the + "hcoll" library. +- MPI-3: Add support for the MPI_T interface. Open MPI's MCA + parameters are now accessible via the MPI_T control variable + interface. Support has been added for a small number of MPI_T + performance variables. +- Add Gentoo memory hooks override. Thanks to Justin Bronder for the + patch. +- Added new "mindist" process mapper, allowing placement of processes + via PCI locality information reported by the BIOS. +- MPI-2.2: Add support for MPI_Dist_graph functionality. +- Enable generic, client-side support for PMI2 implementations. Can + be leveraged by any resource manager that implements PMI2; e.g. SLURM, + versions 2.6 and higher. + + +Open MPI version 1.7.2 +^^^^^^^^^^^^^^^^^^^^^^ +:Date: 26 Jun 2013 + +- Major VampirTrace update to 5.14.4.2. + (** also appeared: 1.6.5) +- Fix to set flag==1 when MPI_IPROBE is called with MPI_PROC_NULL. + (** also appeared: 1.6.5) +- Set the Intel Phi device to be ignored by default by the openib BTL. + (** also appeared: 1.6.5) +- Decrease the internal memory storage used by intrinsic MPI datatypes + for Fortran types. Thanks to Takahiro Kawashima for the initial + patch. + (** also appeared: 1.6.5) +- Fix total registered memory calculation for Mellanox ConnectIB and + OFED 2.0. + (** also appeared: 1.6.5) +- Fix possible data corruption in the MXM MTL component. + (** also appeared: 1.6.5) +- Remove extraneous -L from hwloc's embedding. Thanks to Stefan + Friedel for reporting the issue. + (** also appeared: 1.6.5) +- Fix contiguous datatype memory check. Thanks to Eric Chamberland + for reporting the issue. + (** also appeared: 1.6.5) +- Make the openib BTL more friendly to ignoring verbs devices that are + not RC-capable. + (** also appeared: 1.6.5) +- Fix some MPI datatype engine issues. Thanks to Thomas Jahns for + reporting the issue. + (** also appeared: 1.6.5) +- Add INI information for Chelsio T5 device. + (** also appeared: 1.6.5) +- Integrate MXM STREAM support for MPI_ISEND and MPI_IRECV, and other + minor MXM fixes. + (** also appeared: 1.6.5) +- Fix to not show amorphous "MPI was already finalized" error when + failing to MPI_File_close an open file. Thanks to Brian Smith for + reporting the issue. + (** also appeared: 1.6.5) +- Add a distance-based mapping component to find the socket "closest" + to the PCI bus. +- Fix an error that caused epoll to automatically be disabled + in libevent. +- Upgrade hwloc to 1.5.2. +- **Really** fixed XRC compile issue in Open Fabrics support. +- Fix MXM connection establishment flow. +- Fixed parallel debugger ability to attach to MPI jobs. +- Fixed some minor memory leaks. +- Fixed datatype corruption issue when combining datatypes of specific + formats. +- Added Location Aware Mapping Algorithm (LAMA) mapping component. +- Fixes for MPI_STATUS handling in corner cases. +- Add a distance-based mapping component to find the socket "closest" + to the PCI bus. + + +Open MPI version 1.7.1 +^^^^^^^^^^^^^^^^^^^^^^ +:Date: 16 Apr 2013 + +- Fixed compile error when ``--without-memory-manager`` was specified + on Linux +- Fixed XRC compile issue in Open Fabrics support. + + +Open MPI version 1.7.0 +^^^^^^^^^^^^^^^^^^^^^^ +:Date: 1 Apr 2013 + +- Added MPI-3 functionality: + + - MPI_GET_LIBRARY_VERSION + - Matched probe + - MPI_TYPE_CREATE_HINDEXED_BLOCK + - Non-blocking collectives + - MPI_INFO_ENV support + - Fortran '08 bindings (see below) + +- Dropped support for checkpoint/restart due to loss of maintainer :-( +- Enabled compile-time warning of deprecated MPI functions by default + (in supported compilers). +- Revamped Fortran MPI bindings (see the README for details): + + - "mpifort" is now the preferred wrapper compiler for Fortran + - Added "use mpi_f08" bindings (for compilers that support it) + - Added better "use mpi" support (for compilers that support it) + - Removed incorrect MPI_SCATTERV interface from "mpi" module that + was added in the 1.5.x series for ABI reasons. + +- Lots of VampirTrace upgrades and fixes; upgrade to v5.14.3. +- Modified process affinity system to provide warning when bindings + result in being "bound to all", which is equivalent to not being + bound. +- Removed maffinity, paffinity, and carto frameworks (and associated + MCA params). +- Upgraded to hwloc v1.5.1. +- Added performance improvements to the OpenIB (OpenFabrics) BTL. +- Made malloc hooks more friendly to IO interprosers. Thanks to the + bug report and suggested fix from Darshan maintainer Phil Carns. +- Added support for the DMTCP checkpoint/restart system. +- Added support for the Cray uGNI interconnect. +- Fixed header file problems on OpenBSD. +- Fixed issue with MPI_TYPE_CREATE_F90_REAL. +- Wrapper compilers now explicitly list/link all Open MPI libraries if + they detect static linking CLI arguments. +- Open MPI now requires a C99 compiler to build. Please upgrade your + C compiler if you do not have a C99-compliant compiler. +- Fix MPI_GET_PROCESSOR_NAME Fortran binding to set ierr properly. + Thanks to LANL for spotting the error. +- Many MXM and FCA updates. +- Fixed erroneous free of putenv'ed string that showed up in Valgrind + reports. +- Fixed MPI_IN_PLACE case for MPI_ALLGATHER. +- Fixed a bug that prevented MCA params from being forwarded to + daemons upon launch. +- Fixed issues with VT and CUDA ``--with-cuda[-libdir]`` configuration + CLI parameters. +- Entirely new implementation of many MPI collective routines focused + on better performance. +- Revamped autogen / build system. +- Add new sensor framework to ORTE that includes modules for detecting + stalled applications and processes that consume too much memory. +- Added new state machine framework to ORTE that converts ORTE into an + event-driven state machine using the event library. +- Added a new MCA parameter (ess_base_stream_buffering) that allows the user + to override the system default for buffering of stdout/stderr streams + (via setvbuf). Parameter is not visible via ompi_info. +- Revamped the launch system to allow consideration of node hardware + in assigning process locations and bindings. +- Added the -novm option to preserve the prior launch behavior. +- Revamped the process mapping system to utilize node hardware by adding + new map-by, rank-by, and bind-to cmd line options. +- Added new MCA parameter to provide protection against IO forwarding + backlog. +- Dropped support for native Windows due to loss of maintainers. :-( +- Added a new parallel I/O component and multiple new frameworks to + support parallel I/O operations. +- Fix typo in orte_setup_hadoop.m4. Thanks to Aleksej Saushev for + reporting it +- Fix a very old error in opal_path_access(). Thanks to Marco Atzeri + for chasing it down. + + +Open MPI v1.6.x series +---------------------- + +Open MPI version 1.6.6 +^^^^^^^^^^^^^^^^^^^^^^ +:Date: Not released + +.. important:: + + v1.6.6 was not released + +- Prevent integer overflow in datatype creation. Thanks to Gilles + Gouaillardet for identifying the problem and providing a preliminary + version of the patch. +- Ensure help-opal-hwloc-base.txt is included in distribution + tarballs. Thanks to Gilles Gouaillardet for supplying the patch. +- Correctly handle the invalid status for NULL and inactive requests. + Thanks to KAWASHIMA Takahiro for submitting the initial patch. +- Fixed MPI_STATUS_SIZE Fortran issue when used with 8-byte Fortran + INTEGERs and 4-byte C ints. Since this issue affects ABI, it is + only enabled if Open MPI is configured with + ``--enable-abi-breaking-fortran-status-i8-fix``. Thanks to Jim Parker + for supplying the initial patch. +- Fix datatype issue for sending from the middle of non-contiguous + data. +- Fixed failure error with pty support. Thanks to Michal Pecio for + the patch. +- Fixed debugger support for direct-launched jobs. +- Fix MPI_IS_THREAD_MAIN to return the correct value. Thanks to + Lisandro Dalcín for pointing out the issue. +- Update VT to 5.14.4.4: + + - Fix C++-11 issue. + - Fix support for building RPMs on Fedora with CUDA libraries. + +- Add openib part number for ConnectX3-Pro HCA. +- Ensure to check that all resolved IP addresses are local. +- Fix MPI_COMM_SPAWN via rsh when mpirun is on a different server. +- Add Gentoo "sandbox" memory hooks override. + + +Open MPI version 1.6.5 +^^^^^^^^^^^^^^^^^^^^^^ +:Date: 26 Jun 2013 + +- Updated default SRQ parameters for the openib BTL. + (** also to appear: 1.7.2) +- Major VampirTrace update to 5.14.4.2. + (** also to appear: 1.7.2) +- Fix to set flag==1 when MPI_IPROBE is called with MPI_PROC_NULL. + (** also to appear: 1.7.2) +- Set the Intel Phi device to be ignored by default by the openib BTL. + (** also to appear: 1.7.2) +- Decrease the internal memory storage used by intrinsic MPI datatypes + for Fortran types. Thanks to Takahiro Kawashima for the initial + patch. + (** also to appear: 1.7.2) +- Fix total registered memory calculation for Mellanox ConnectIB and + OFED 2.0. + (** also to appear: 1.7.2) +- Fix possible data corruption in the MXM MTL component. + (** also to appear: 1.7.2) +- Remove extraneous -L from hwloc's embedding. Thanks to Stefan + Friedel for reporting the issue. + (** also to appear: 1.7.2) +- Fix contiguous datatype memory check. Thanks to Eric Chamberland + for reporting the issue. + (** also to appear: 1.7.2) +- Make the openib BTL more friendly to ignoring verbs devices that are + not RC-capable. + (** also to appear: 1.7.2) +- Fix some MPI datatype engine issues. Thanks to Thomas Jahns for + reporting the issue. + (** also to appear: 1.7.2) +- Add INI information for Chelsio T5 device. + (** also to appear: 1.7.2) +- Integrate MXM STREAM support for MPI_ISEND and MPI_IRECV, and other + minor MXM fixes. + (** also to appear: 1.7.2) +- Improved alignment for OpenFabrics buffers. +- Fix to not show amorphous "MPI was already finalized" error when + failing to MPI_File_close an open file. Thanks to Brian Smith for + reporting the issue. + (** also to appear: 1.7.2) + + +Open MPI version 1.6.4 +^^^^^^^^^^^^^^^^^^^^^^ +:Date: 21 Feb 2013 + +- Fix Cygwin shared memory and debugger plugin support. Thanks to + Marco Atzeri for reporting the issue and providing initial patches. +- Fix to obtaining the correct available nodes when a rankfile is + providing the allocation. Thanks to Siegmar Gross for reporting the + problem. +- Fix process binding issue on Solaris. Thanks to Siegmar Gross for + reporting the problem. +- Updates for MXM 2.0. +- Major VT update to 5.14.2.3. +- Fixed F77 constants for Cygwin/Cmake build. +- Fix a linker error when configuring ``--without-hwloc``. +- Automatically provide compiler flags that compile properly on some + types of ARM systems. +- Fix slot_list behavior when multiple sockets are specified. Thanks + to Siegmar Gross for reporting the problem. +- Fixed memory leak in one-sided operations. Thanks to Victor + Vysotskiy for letting us know about this one. +- Added performance improvements to the OpenIB (OpenFabrics) BTL. +- Improved error message when process affinity fails. +- Fixed MPI_MINLOC on man pages for MPI_REDUCE(_LOCAL). Thanks to Jed + Brown for noticing the problem and supplying a fix. +- Made malloc hooks more friendly to IO interprosers. Thanks to the + bug report and suggested fix from Darshan maintainer Phil Carns. +- Restored ability to direct launch under SLURM without PMI support. +- Fixed MPI datatype issues on OpenBSD. +- Major VT update to 5.14.2.3. +- Support FCA v3.0+. +- Fixed header file problems on OpenBSD. +- Fixed issue with MPI_TYPE_CREATE_F90_REAL. +- Fix an issue with using external libltdl installations. Thanks to + opolawski for identifying the problem. +- Fixed MPI_IN_PLACE case for MPI_ALLGATHER for FCA. +- Allow SLURM PMI support to look in lib64 directories. Thanks to + Guillaume Papaure for the patch. +- Restore "use mpi" ABI compatibility with the rest of the 1.5/1.6 + series (except for v1.6.3, where it was accidentally broken). +- Fix a very old error in opal_path_access(). Thanks to Marco Atzeri + for chasing it down. + + +Open MPI version 1.6.3 +^^^^^^^^^^^^^^^^^^^^^^ +:Date: 30 Oct 2012 + +- Fix mpirun ``--launch-agent`` behavior when a prefix is specified. + Thanks to Reuti for identifying the issue. +- Fixed memchecker configury. +- Brought over some compiler warning squashes from the development trunk. +- Fix spawning from a singleton to multiple hosts when the "add-host" + MPI_Info key is used. Thanks to Brian Budge for pointing out the + problem. +- Add Mellanox ConnextIB IDs and max inline value. +- Fix rankfile when no -np is given. +- FreeBSD detection improvement. Thanks to Brooks Davis for the + patch. +- Removed TCP warnings on Windows. +- Improved collective algorithm selection for very large messages. +- Fix PSM MTL affinity settings. +- Fix issue with MPI_OP_COMMUTATIVE in the mpif.h bindings. Thanks to + Åke Sandgren for providing a patch to fix the issue. +- Fix issue with MPI_SIZEOF when using CHARACTER and LOGICAL types in + the mpi module. Thanks to Åke Sandgren for providing a patch to fix + the issue. + + +Open MPI version 1.6.2 +^^^^^^^^^^^^^^^^^^^^^^ +:Date: 25 Sep 2012 + +- Fix issue with MX MTL. Thanks to Doug Eadline for raising the issue. +- Fix singleton MPI_COMM_SPAWN when the result job spans multiple nodes. +- Fix MXM hang, and update for latest version of MXM. +- Update to support Mellanox FCA 2.5. +- Fix startup hang for large jobs. +- Ensure MPI_TESTANY / MPI_WAITANY properly set the empty status when + count==0. +- Fix MPI_CART_SUB behavior of not copying periods to the new + communicator properly. Thanks to John Craske for the bug report. +- Add btl_openib_abort_not_enough_reg_mem MCA parameter to cause Open + MPI to abort MPI jobs if there is not enough registered memory + available on the system (vs. just printing a warning). Thanks to + Brock Palen for raising the issue. +- Minor fix to Fortran MPI_INFO_GET: only copy a value back to the + user's buffer if the flag is .TRUE. +- Fix VampirTrace compilation issue with the PGI compiler suite. + + +Open MPI version 1.6.1 +^^^^^^^^^^^^^^^^^^^^^^ +:Date: 22 Aug 2012 + +- A bunch of changes to eliminate hangs on OpenFabrics-based networks. + Users with Mellanox hardware are **STRONGLY ENCOURAGED** to check + their registered memory kernel module settings to ensure that the OS + will allow registering more than 8GB of memory. See this FAQ item + for details: + + https://www.open-mpi.org/faq/?category=openfabrics#ib-low-reg-mem + + - Fall back to send/receive semantics if registered memory is + unavilable for RDMA. + - Fix two fragment leaks when registered memory is exhausted. + - Hueristically determine how much registered memory is available + and warn if it's significantly less than all of RAM. + - Artifically limit the amount of registered memory each MPI process + can use to about 1/Nth to total registered memory available. + - Improve error messages when events occur that are likely due to + unexpected registered memory exhaustion. + +- Fix double semicolon error in the C++ in . Thanks to John + Foster for pointing out the issue. +- Allow -Xclang to be specified multiple times in CFLAGS. Thanks to + A. Martin for raising the issue. +- Break up a giant ``print *`` statement in the ABI-preserving incorrect + MPI_SCATTER interface in the "large" Fortran "mpi" module. Thanks + to Juan Escobar for the initial patch. +- Switch the MPI_ALLTOALLV default algorithm to a pairwise exchange. +- Increase the openib BTL default CQ length to handle more types of + OpenFabrics devices. +- Lots of VampirTrace fixes; upgrade to v5.13.0.4. +- Map MPI_2INTEGER to underlying MPI_INTEGERs, not MPI_INTs. +- Ensure that the OMPI version number is toleant of handling spaces. + Thanks to dragonboy for identifying the issue. +- Fixed IN parameter marking on Fortran "mpi" module + MPI_COMM_TEST_INTER interface. +- Various MXM improvements. +- Make the output of ``mpirun --report-bindings`` much more friendly / + human-readable. +- Properly handle MPI_COMPLEX8|16|32. +- More fixes for mpirun's processor affinity options (--bind-to-core + and friends). +- Use aligned memory for OpenFabrics registered memory. +- Multiple fixes for parameter checking in MPI_ALLGATHERV, + MPI_REDUCE_SCATTER, MPI_SCATTERV, and MPI_GATHERV. Thanks to the + mpi4py community (Bennet Fauber, Lisandro Dalcín, Jonathan Dursi). +- Fixed file positioning overflows in MPI_FILE_GET_POSITION, + MPI_FILE_GET_POSITION_SHARED, FILE_GET_SIZE, FILE_GET_VIEW. +- Removed the broken ``mpirun --cpu-set`` option. +- Fix cleanup of MPI errorcodes. Thanks to Alexey Bayduraev for the + patch. +- Fix default hostfile location. Thanks to Götz Waschk for noticing + the issue. +- Improve several error messages. + + +Open MPI version 1.6.0 +^^^^^^^^^^^^^^^^^^^^^^ +:Date: 14 May 2012 + +- Fix some process affinity issues. When binding a process, Open MPI + will now bind to all available hyperthreads in a core (or socket, + depending on the binding options specified). + + .. note:: + Note that ``mpirun --bind-to-socket ...`` does not work on POWER6- + and POWER7-based systems with some Linux kernel versions. See + the FAQ on the Open MPI web site for more information. + +- Add support for ARM5 and ARM6 (in addition to the existing ARM7 + support). Thanks to Evan Clinton for the patch. +- Minor Mellanox MXM fixes. +- Properly detect FDR10, FDR, and EDR OpenFabrics devices. +- Minor fixes to the mpirun(1) and MPI_Comm_create(3) man pages. +- Prevent segv if COMM_SPAWN_MULTIPLE fails. Thanks to Fujitsu for + the patch. +- Disable interposed memory management in fakeroot environments. This + fixes a problem in some build environments. +- Minor hwloc updates. +- Array versions of MPI_TEST and MPI_WAIT with a count==0 will now + return immediately with MPI_SUCCESS. Thanks to Jeremiah Willcock + for the suggestion. +- Update VampirTrace to v5.12.2. +- Properly handle forwarding stdin to all processes when ``mpirun + --stdin all`` is used. +- Workaround XLC assembly bug. +- OS X Tiger (10.4) has not been supported for a while, so forcibly + abort configure if we detect it. +- Fix segv in the openib BTL when running on SPARC 64 systems. +- Fix some include file ordering issues on some BSD-based platforms. + Thanks to Paul Hargove for this (and many, many other) fixes. +- Properly handle .FALSE. return parameter value to attribute copy + callback functions. +- Fix a bunch of minor C++ API issues; thanks to Fujitsu for the patch. +- Fixed the default hostfile MCA parameter behavior. +- Per the MPI spec, ensure not to touch the port_name parameter to + MPI_CLOSE_PORT (it's an IN parameter). + + +Open MPI v1.5.x series +---------------------- + +Open MPI version 1.5.5 +^^^^^^^^^^^^^^^^^^^^^^ +:Date: 27 Mar 2012 + +- Many, many portability configure/build fixes courtesy of Paul + Hargrove. Thanks, Paul! +- Fixed shared memory fault tolerance support compiler errors. +- Removed not-production-quality rshd and tmd PLM launchers. +- Minor updates to the Open MPI SRPM spec file. +- Fixed mpirun's ``--bind-to-socket`` option. +- A few MPI_THREAD_MULTIPLE fixes in the shared memory BTL. +- Upgrade the GNU Autotools used to bootstrap the 1.5/1.6 series to + all the latest versions at the time of this release. +- Categorically state in the README that if you're having a problem + with Open MPI with the Linux Intel 12.1 compilers, **upgrade your + Intel Compiler Suite to the latest patch version**, and the problems + will go away. :-) +- Fix the ``--without-memory-manager`` configure option. +- Fixes for Totalview/DDT MPI-capable debuggers. +- Update rsh/ssh support to properly handle the Mac OS X library path + (i.e., DYLD_LIBRARY_PATH). +- Make warning about shared memory backing files on a networked file + system be optional (i.e., can be disabled via MCA parameter). +- Several fixes to processor and memory affinity. +- Various shared memory infrastructure improvements. +- Various checkpoint/restart fixes. +- Fix MPI_IN_PLACE (and other MPI sentinel values) on OS X. Thanks to + Dave Goodell for providing the magic OS X gcc linker flags necessary. +- Various man page corrections and typo fixes. Thanks to Fujitsu for + the patch. +- Updated wrapper compiler man pages to list the various ``--showme`` + options that are available. +- Add PMI direct-launch support (e.g., "srun mpi_application" under + SLURM). +- Correctly compute the aligned address when packing the + datatype description. Thanks to Fujitsu for the patch. +- Fix MPI obscure corner case handling in packing MPI datatypes. + Thanks to Fujitsu for providing the patch. +- Workaround an Intel compiler v12.1.0 2011.6.233 vector optimization + bug. +- Output the MPI API in ompi_info output. +- Major VT update to 5.12.1.4. +- Upgrade embedded Hardware Locality (hwloc) v1.3.2, plus some + post-1.3.2-release bug fixes. All processor and memory binding is + now done through hwloc. Woo hoo! Note that this fixes core binding + on AMD Opteron 6200 and 4200 series-based systems (sometimes known + as Interlagos, Valencia, or other Bulldozer-based chips). +- New MCA parameters to control process-wide memory binding policy: + hwloc_base_mem_alloc_policy, hwloc_base_mem_bind_failure_action (see + ``ompi_info --param hwloc base``). +- Removed direct support for libnuma. Libnuma support may now be + picked up through hwloc. +- Added MPI_IN_PLACE support to MPI_EXSCAN. +- Various fixes for building on Windows, including MinGW support. +- Removed support for the OpenFabrics IBCM connection manager. +- Updated Chelsio T4 and Intel NE OpenFabrics default buffer settings. +- Increased the default RDMA CM timeout to 30 seconds. +- Issue a warning if both btl_tcp_if_include and btl_tcp_if_exclude + are specified. +- Many fixes to the Mellanox MXM transport. + + +Open MPI version 1.5.4 +^^^^^^^^^^^^^^^^^^^^^^ +:Date: 18 Aug 2011 + +- Add support for the (as yet unreleased) Mellanox MXM transport. +- Add support for dynamic service levels (SLs) in the openib BTL. +- Fixed C++ bindings cosmetic/warnings issue with + MPI::Comm::NULL_COPY_FN and MPI::Comm::NULL_DELETE_FN. Thanks to + Júlio Hoffimann for identifying the issues. +- Also allow the word "slots" in rankfiles (i.e., not just "slot"). + (** also to appear in 1.4.4) +- Add Mellanox ConnectX 3 device IDs to the openib BTL defaults. + (** also to appear in 1.4.4) +- Various FCA updates. +- Fix 32 bit SIGBUS errors on Solaris SPARC platforms. +- Add missing ARM assembly code files. +- Update to allow more than 128 entries in an appfile. + (** also to appear in 1.4.4) +- Various VT updates and bug fixes. +- Update description of btl_openib_cq_size to be more accurate. + (** also to appear in 1.4.4) +- Various assembly "clobber" fixes. +- Fix a hang in carto selection in obscure situations. +- Guard the inclusion of execinfo.h since not all platforms have it. Thanks + to Aleksej Saushev for identifying this issue. + (** also to appear in 1.4.4) +- Support Solaris legacy munmap prototype changes. + (** also to appear in 1.4.4) +- Updated to Automake 1.11.1 per + https://www.open-mpi.org/community/lists/devel/2011/07/9492.php. +- Fix compilation of LSF support. +- Update MPI_Comm_spawn_multiple.3 man page to reflect what it + actually does. +- Fix for possible corruption of the environment. Thanks to Peter + Thompson for the suggestion. (** also to appear in 1.4.4) +- Enable use of PSM on direct-launch SLURM jobs. +- Update paffinity hwloc to v1.2, and to fix minor bugs affinity + assignment bugs on PPC64/Linux platforms. +- Let the openib BTL auto-detect its bandwidth. +- Support new MPI-2.2 datatypes. +- Updates to support more datatypes in MPI one-sided communication. +- Fix recursive locking bug when MPI-IO was used with + MPI_THREAD_MULTIPLE. (** also to appear in 1.4.4) +- Fix mpirun handling of prefix conflicts. +- Ensure mpirun's ``--xterm`` options leaves sessions attached. + (** also to appear in 1.4.4) +- Fixed type of sendcounts and displs in the "use mpi" F90 module. + ABI is preserved, but applications may well be broken. See the + README for more details. Thanks to Stanislav Sazykin for + identifying the issue. (** also to appear in 1.4.4) +- Fix indexed datatype leaks. Thanks to Pascal Deveze for supplying + the initial patch. (** also to appear in 1.4.4) +- Fix debugger mapping when mpirun's -npernode option is used. +- Fixed support for configure's ``--disable-dlopen`` option when + used with ``make distclean``. +- Fix segv associated with MPI_Comm_create with MPI_GROUP_EMPTY. + Thanks to Dominik Goeddeke for finding this. + (** also to appear in 1.4.4) +- Improved LoadLeveler ORTE support. +- Add new WinVerbs BTL plugin, supporting native OpenFabrics verbs on + Windows (the "wv" BTL). +- Add new btl_openib_gid_index MCA parameter to allow selecting which + GID to use on an OpenFabrics device's GID table. +- Add support for PCI relaxed ordering in the OpenFabrics BTL (when + available). +- Update rsh logic to allow correct SGE operation. +- Ensure that the mca_paffinity_alone MCA parameter only appears once + in the ompi_info output. Thanks to Gus Correa for identifying the + issue. +- Fixed return codes from MPI_PROBE and MPI_IPROBE. + (** also to appear in 1.4.4) +- Remove ``--enable-progress-thread`` configure option; it doesn't work on + the v1.5 branch. Rename ``--enable-mpi-threads`` to + ``--enable-mpi-thread-multiple``. Add new ``--enable-opal-multi-threads`` + option. +- Updates for Intel Fortran compiler version 12. +- Remove bproc support. Farewell bproc! +- If something goes wrong during MPI_INIT, fix the error + message to say that it's illegal to invoke MPI_INIT before + MPI_INIT. + + +Open MPI version 1.5.3 +^^^^^^^^^^^^^^^^^^^^^^ +:Date: 16 Mar 2011 + +- Add missing "affinity" MPI extension (i.e., the OMPI_Affinity_str() + API) that was accidentally left out of the 1.5.2 release. + + +Open MPI version 1.5.2 +^^^^^^^^^^^^^^^^^^^^^^ +:Date: 9 Mar 2011 + +- Replaced all custom topology / affinity code with initial support + for hwloc v1.1.1 (PLPA has been removed -- long live hwloc!). Note + that hwloc is bundled with Open MPI, but an external hwloc can be + used, if desired. See README for more details. +- Many CMake updates for Windows builds. +- Updated opal_cr_thread_sleep_wait MCA param default value to make it + less aggressive. +- Updated debugger support to allow Totalview attaching from jobs + launched directly via srun (not mpirun). Thanks to Nikolay Piskun + for the patch. +- Added more FTB/CIFTS support. +- Fixed compile error with the PGI compiler. +- Portability fixes to allow the openib BTL to run on the Solaris + verbs stack. +- Fixed multi-token command-line issues when using the mpirun + ``--debug`` switch. For example: + + .. code-block:: sh + + mpirun --debug -np 2 a.out "foo bar" + + Thanks to Gabriele Fatigati for reporting the issue. +- Added ARM support. +- Added the MPI_ROOT environment variable in the Open MPI Linux SRPM + for customers who use the BPS and LSF batch managers. +- Updated ROMIO from MPICH v1.3.1 (plus one additional patch). +- Fixed some deprecated MPI API function notification messages. +- Added new "bfo" PML that provides failover on OpenFabrics networks. +- Fixed some buffer memcheck issues in ``MPI_*_init``. +- Added Solaris-specific chip detection and performance improvements. +- Fix some compile errors on Solaris. +- Updated the "rmcast" framework with bug fixes, new functionality. +- Updated the Voltaire FCA component with bug fixes, new + functionality. Support for FCA version 2.1. +- Fix gcc 4.4.x and 4.5.x over-aggressive warning notifications on + possibly freeing stack variables. Thanks to the Gentoo packagers + for reporting the issue. +- Make the openib component be verbose when it disqualifies itself due + to MPI_THREAD_MULTIPLE. +- Minor man page fixes. +- Various checkpoint / restart fixes. +- Fix race condition in the one-sided unlock code. Thanks to + Guillaume Thouvenin for finding the issue. +- Improve help message aggregation. +- Add OMPI_Affinity_str() optional user-level API function (i.e., the + "affinity" MPI extension). See README for more details. +- Added btl_tcp_if_seq MCA parameter to select a different ethernet + interface for each MPI process on a node. This parameter is only + useful when used with virtual ethernet interfaces on a single + network card (e.g., when using virtual interfaces give dedicated + hardware resources on the NIC to each process). +- Changed behavior of mpirun to terminate if it receives 10 (or more) + SIGPIPEs. +- Fixed oversubscription detection. +- Added new mtl_mx_board and mtl_mx_endpoint MCA parameters. +- Added ummunotify support for OpenFabrics-based transports. See the + README for more details. + + +Open MPI version 1.5.1 +^^^^^^^^^^^^^^^^^^^^^^ +:Date: 15 Dec 2010 + +- Fixes for the Oracle Studio 12.2 Fortran compiler. +- Fix SPARC and SPARCv9 atomics. Thanks to Nicola Stange for the + initial patch. +- Fix Libtool issues with the IBM XL compiler in 64-bit mode. +- Restore the reset of the libevent progress counter to avoid + over-sampling the event library. +- Update memory barrier support. +- Use memmove (instead of memcpy) when necessary (e.g., source and + destination overlap). +- Fixed ompi-top crash. +- Fix to handle Autoconf ``--program-transforms`` properly and other + m4/configury updates. Thanks to the GASNet project for the + ``--program`` transforms fix. +- Allow hostfiles to specify usernames on a per-host basis. +- Update wrapper compiler scripts to search for perl during configure, + per request from the BSD maintainers. +- Minor man page fixes. +- Added ``--with-libltdl`` option to allow building Open MPI with an + external installation of libltdl. +- Fixed various issues with -D_FORTIFY_SOURCE=2. +- Various VT fixes and updates. + + +Open MPI version 1.5.0 +^^^^^^^^^^^^^^^^^^^^^^ +:Date: 10 Oct 2010 + +- Added "knem" support: direct process-to-process copying for shared + memory message passing. See https://runtime.bordeaux.inria.fr/knem/ + and the README file for more details. +- Updated shared library versioning scheme and linking style of MPI + applications. The MPI application ABI has been broken from the + v1.3/v1.4 series. MPI applications compiled against any prior + version of Open MPI will need to, at a minimum, re-link. See the + README file for more details. +- Added "fca" collective component, enabling MPI collective offload + support for Voltaire switches. +- Fixed MPI one-sided operations with large target displacements. + Thanks to Brian Price and Jed Brown for reporting the issue. +- Fixed MPI_GET_COUNT when used with large counts. Thanks to Jed + Brown for reporting the issue. +- Made the openib BTL safer if extremely low SRQ settings are used. +- Fixed handling of the array_of_argv parameter in the Fortran + binding of MPI_COMM_SPAWN_MULTIPLE (** also to appear: 1.4.3). +- Fixed malloc(0) warnings in some collectives. +- Fixed a problem with the Fortran binding for + MPI_FILE_CREATE_ERRHANDLER. Thanks to Secretan Yves for identifying + the issue (** also to appear: 1.4.3). +- Updates to the LSF PLM to ensure that the path is correctly passed. + Thanks to Teng Lin for the patch (** also to appear: 1.4.3). +- Fixes for the F90 MPI_COMM_SET_ERRHANDLER and MPI_WIN_SET_ERRHANDLER + bindings. Thanks to Paul Kapinos for pointing out the issue + (** also to appear: 1.4.3). +- Fixed extra_state parameter types in F90 prototypes for + MPI_COMM_CREATE_KEYVAL, MPI_GREQUEST_START, MPI_REGISTER_DATAREP, + MPI_TYPE_CREATE_KEYVAL, and MPI_WIN_CREATE_KEYVAL. +- Fixes for Solaris oversubscription detection. +- If the PML determines it can't reach a peer process, print a + slightly more helpful message. Thanks to Nick Edmonds for the + suggestion. +- Make btl_openib_if_include/exclude function the same way + btl_tcp_if_include/exclude works (i.e., supplying an _include list + overrides supplying an _exclude list). +- Apply more scalable reachability algorithm on platforms with more + than 8 TCP interfaces. +- Various assembly code updates for more modern platforms / compilers. +- Relax restrictions on using certain kinds of MPI datatypes with + one-sided operations. Users beware; not all MPI datatypes are valid + for use with one-sided operations! +- Improve behavior of MPI_COMM_SPAWN with regards to ``--bynode``. +- Various threading fixes in the openib BTL and other core pieces of + Open MPI. +- Various help file and man pages updates. +- Various FreeBSD and NetBSD updates and fixes. Thanks to Kevin + Buckley and Aleksej Saushev for their work. +- Fix case where freeing communicators in MPI_FINALIZE could cause + process failures. +- Print warnings if shared memory state files are opened on what look + like networked filesystems. +- Update libevent to v1.4.13. +- Allow propagating signals to processes that call fork(). +- Fix bug where MPI_GATHER was sometimes incorrectly examining the + datatype on non-root processes. Thanks to Michael Hofmann for + investigating the issue. +- Various Microsoft Windows fixes. +- Various Catamount fixes. +- Various checkpoint / restart fixes. +- Xgrid support has been removed until it can be fixed (patches + would be welcome). +- Added simplistic "libompitrace" contrib package. Using the MPI + profiling interface, it essentially prints out to stderr when select + MPI functions are invoked. +- Update bundled VampirTrace to v5.8.2. +- Add pkg-config(1) configuration files for ompi, ompi-c, ompi-cxx, + ompi-f77, ompi-f90. See the README for more details. +- Removed the libopenmpi_malloc library (added in the v1.3 series) + since it is no longer necessary +- Add several notifier plugins (generally used when Open MPI detects + system/network administrator-worthy problems); each have their own + MCA parameters to govern their usage. See ``ompi_info --param + notifier `` for more details. + + - command to execute arbitrary commands (e.g., run a script). + - file to send output to a file. + - ftb to send output to the Fault Tolerant Backplane (see: + https://wiki.mcs.anl.gov/cifts/index.php/CIFTS) + - hnp to send the output to mpirun. + - smtp (requires libesmtp) to send an email. + + +Open MPI v1.4.x series +---------------------- + +Open MPI version 1.4.5 +^^^^^^^^^^^^^^^^^^^^^^ +:Date: 12 Feb 2012 + +- Fixed the ``--disable-memory-manager`` configure switch. + (** also to appear in 1.5.5) +- Fix typos in code and man pages. Thanks to Fujitsu for these fixes. + (** also to appear in 1.5.5) +- Improve management of the registration cache; when full, try freeing + old entries and attempt to re-register. +- Fixed a data packing pointer alignment issue. Thanks to Fujitsu + for the patch. + (** also to appear in 1.5.5) +- Add ability to turn off warning about having the shared memory backing + store over a networked filesystem. Thanks to Chris Samuel for this + suggestion. + (** also to appear in 1.5.5) +- Removed an unnecessary memmove() and plugged a couple of small memory leaks + in the openib OOB connection setup code. +- Fixed some QLogic bugs. Thanks to Mark Debbage from QLogic for the patches. +- Fixed problem with MPI_IN_PLACE and other sentinel Fortran constants + on OS X. + (** also to appear in 1.5.5) +- Fix SLURM cpus-per-task allocation. + (** also to appear in 1.5.5) +- Fix the datatype engine for when data left over from the previous + pack was larger than the allowed space in the pack buffer. Thanks to + Yuki Matsumoto and Takahiro Kawashima for the bug report and the + patch. +- Fix Fortran value for MPI_MAX_PORT_NAME. Thanks to Enzo Dari for + raising the issue. +- Workaround an Intel compiler v12.1.0 2011.6.233 vector optimization + bug. +- Fix issues on Solaris with the openib BTL. +- Fixes for the Oracle Studio 12.2 Fortran compiler. +- Update iWARP parameters for the Intel NICs. + (** also to appear in 1.5.5) +- Fix obscure cases where MPI_ALLGATHER could crash. Thanks to Andrew + Senin for reporting the problem. + (** also to appear in 1.5.5) + + +Open MPI version 1.4.4 +^^^^^^^^^^^^^^^^^^^^^^ +:Date: 11 Oct 2011 + +- Modified a memcpy() call in the openib btl connection setup to use + memmove() instead because of the possibility of an overlapping + copy (as identified by valgrind). +- Changed use of sys_timer_get_cycles() to the more appropriate + wrapper: opal_timer_base_get_cycles(). Thanks to Jani Monoses + for this fix. +- Corrected the reported default value of btl_openib_ib_timeout + in the "IB retries exceeded" error message. Thanks to Kevin Buckley + for this correction. +- Increased rdmacm address resolution timeout from 1s to 30s & + updated Chelsio T4 openib BTL defaults. Thanks to Steve Wise + for these updates. + (** also to appear in 1.5.5) +- Ensure that MPI_Accumulate error return in 1.4 is consistent with + 1.5.x and trunk. +- Allow the word "slots" in rankfiles (i.e., not just "slot"). + (** also appeared in 1.5.4) +- Add Mellanox ConnectX 3 device IDs to the openib BTL defaults. + (** also appeared in 1.5.4) +- Update description of btl_openib_cq_size to be more accurate. +- Ensure mpirun's ``--xterm`` options leaves sessions attached. + (** also appeared in 1.5.4) +- Update to allow more than 128 entries in an appfile. + (** also appeared in 1.5.4) +- Update description of btl_openib_cq_size to be more accurate. + (** also appeared in 1.5.4) +- Fix for deadlock when handling recursive attribute keyval deletions + (e.g., when using ROMIO with MPI_THREAD_MULTIPLE). +- Fix indexed datatype leaks. Thanks to Pascal Deveze for supplying + the initial patch. (** also appeared in 1.5.4) +- Fixed the F90 types of the sendcounts and displs parameters to + MPI_SCATTERV. Thanks to Stanislav Sazykin for identifying the issue. + (** also appeared in 1.5.4) +- Exclude opal/libltdl from "make distclean" when ``--disable-dlopen`` is + used. Thanks to David Gunter for reporting the issue. +- Fixed a segv in MPI_Comm_create when called with GROUP_EMPTY. + Thanks to Dominik Goeddeke for finding this. + (** also appeared in 1.5.4) +- Fixed return codes from MPI_PROBE and MPI_IPROBE. + (** also appeared in 1.5.4) +- Fixed undefined symbol error when using the vtf90 profiling tool. +- Fix for referencing an uninitialized variable in DPM ORTE. Thanks + to Avinash Malik for reporting the issue. +- Fix for correctly handling multi-token args when using debuggers. +- Eliminated the unneeded ``u_int*_t`` datatype definitions. +- Change in ORTE DPM to get around gcc 4.[45].x compiler wanrings + about possibly calling free() on a non-heap variable, even though it + will never happen because the refcount will never go to zero. +- Fixed incorrect text in MPI_File_set_view man page. +- Fix in MPI_Init_thread for checkpoint/restart. +- Fix for libtool issue when using pgcc to compile ompi in conjunction + with the -tp option. +- Fixed a race condition in osc_rdma_sync. Thanks to Guillaume + Thouvenin for finding this issue. +- Clarification of MPI_Init_thread man page. +- Fixed an indexing problem in precondition_transports. +- Fixed a problem in which duplicated libs were being specified for + linking. Thanks to Hicham Mouline for noticing it. +- Various autogen.sh fixes. +- Fix for memchecking buffers during ``MPI_*INIT``. +- Man page cleanups. Thanks to Jeremiah Willcock and Jed Brown. +- Fix for VT rpmbuild on RHEL5. +- Support Solaris legacy munmap prototype changes. + (** also appeared in 1.5.4) +- Expands app_idx to int32_t to allow more than 127 app_contexts. +- Guard the inclusion of execinfo.h since not all platforms have it. Thanks + to Aleksej Saushev for identifying this issue. + (** also appeared in 1.5.4) +- Fix to avoid possible environment corruption. Thanks to Peter Thompson + for identifying the issue and supplying a patch. + (** also appeared in 1.5.4) +- Fixed paffinity base MCA duplicate registrations. Thanks to Gus + Correa for bringing this to our attention. +- Fix recursive locking bug when MPI-IO was used with + MPI_THREAD_MULTIPLE. (** also appeared in 1.5.4) +- F90 MPI API fixes. +- Fixed a misleading MPI_Bcast error message. Thanks to Jeremiah + Willcock for reporting this. +- Added to ptmalloc's hooks.c (it's not always included + by default on some systems). +- Libtool patch to get around a build problem when using the IBM XL + compilers. +- Fix to detect and avoid overlapping memcpy(). Thanks to + Francis Pellegrini for identifying the issue. +- Fix to allow ompi to work on top of RoCE vLANs. +- Restored a missing debugger flag to support TotalView. Thanks to + David Turner and the TV folks for supplying the fix. +- Updated SLURM support to 1.5.1. +- Removed an extraneous #include from the TCP BTL. +- When specifying OOB ports, fix to convert the ports into network + byte order before binding. +- Fixed use of memory barriers in the SM BTL. This fixed segv's when + compiling with Intel 10.0.025 or PGI 9.0-3. +- Fix to prevent the SM BTL from creating its mmap'd file in + directories that are remotely mounted. + + +Open MPI version 1.4.3 +^^^^^^^^^^^^^^^^^^^^^^ +:Date: 6 Sep 2010 + +- Fixed handling of the array_of_argv parameter in the Fortran + binding of MPI_COMM_SPAWN_MULTIPLE (** also to appear: 1.5). +- Fixed a problem with the Fortran binding for + MPI_FILE_CREATE_ERRHANDLER. Thanks to Secretan Yves for identifying + the issue (** also to appear: 1.5). +- Updates to the LSF PLM to ensure that the path is correctly passed. + Thanks to Teng Lin for the patch (** also to appear: 1.5). +- Fixes for the F90 MPI_COMM_SET_ERRHANDLER and MPI_WIN_SET_ERRHANDLER + bindings. Thanks to Paul Kapinos for pointing out the issue. + (** also to appear: 1.5). +- Fixed various MPI_THREAD_MULTIPLE race conditions. +- Fixed an issue with an undeclared variable from ptmalloc2 munmap on + BSD systems. +- Fixes for BSD interface detection. +- Various other BSD fixes. Thanks to Kevin Buckley helping to track. + all of this down. +- Fixed issues with the use of the -nper* mpirun command line arguments. +- Fixed an issue with coll tuned dynamic rules. +- Fixed an issue with the use of OPAL_DESTDIR being applied too aggressively. +- Fixed an issue with one-sided xfers when the displacement exceeds 2GBytes. +- Change to ensure TotalView works properly on Darwin. +- Added support for Visual Studio 2010. +- Fix to ensure proper placement of VampirTrace header files. +- Needed to add volatile keyword to a varialbe used in debugging + (MPIR_being_debugged). +- Fixed a bug in inter-allgather. +- Fixed malloc(0) warnings. +- Corrected a typo the MPI_Comm_size man page (intra -> inter). Thanks + to Simon number.cruncher for pointing this out. +- Fixed a SegV in orted when given more than 127 app_contexts. +- Removed xgrid source code from the 1.4 branch since it is no longer + supported in the 1.4 series. +- Removed the ``--enable-opal-progress-threads`` config option since + opal progress thread support does not work in 1.4.x. +- Fixed a defect in VampirTrace's vtfilter. +- Fixed wrong Windows path in hnp_contact. +- Removed the requirement for a paffinity component. +- Removed a hardcoded limit of 64 interconnected jobs. +- Fix to allow singletons to use ompi-server for rendezvous. +- Fixed bug in output-filename option. +- Fix to correctly handle failures in mx_init(). +- Fixed a potential Fortran memory leak. +- Fixed an incorrect branch in some ppc32 assembly code. Thanks + to Matthew Clark for this fix. +- Remove use of undocumented AS_VAR_GET macro during configuration. +- Fixed an issue with VampirTrace's wrapper for MPI_init_thread. +- Updated mca-btl-openib-device-params.ini file with various new vendor id's. +- Configuration fixes to ensure CPPFLAGS in handled properly if a non-standard + valgrind location was specified. +- Various man page updates + + +Open MPI version 1.4.2 +^^^^^^^^^^^^^^^^^^^^^^ +:Date: 4 May 2010 + +- Fixed problem when running in heterogeneous environments. Thanks to + Timur Magomedov for helping to track down this issue. +- Update LSF support to ensure that the path is passed correctly. + Thanks to Teng Lin for submitting a patch. +- Fixed some miscellaneous oversubscription detection bugs. +- IBM re-licensed its LoadLeveler code to be BSD-compliant. +- Various OpenBSD and NetBSD build and run-time fixes. Many thanks to + the OpenBSD community for their time, expertise, and patience + getting these fixes incorporated into Open MPI's main line. +- Various fixes for multithreading deadlocks, race conditions, and + other nefarious things. +- Fixed ROMIO's handling of "nearly" contiguous issues (e.g., with + non-zero true_lb). Thanks for Pascal Deveze for the patch. +- Bunches of Windows build fixes. Many thanks to several Windows + users for their help in improving our support on Windows. +- Now allow the graceful failover from MTLs to BTLs if no MTLs can + initialize successfully. +- Added "clobber" information to various atomic operations, fixing + erroneous behavior in some newer versions of the GNU compiler suite. +- Update various iWARP and InfiniBand device specifications in the + OpenFabrics .ini support file. +- Fix the use of hostfiles when a username is supplied. +- Various fixes for rankfile support. +- Updated the internal version of VampirTrace to 5.4.12. +- Fixed OS X TCP wireup issues having to do with IPv4/IPv6 confusion + (see https://svn.open-mpi.org/trac/ompi/changeset/22788 for more + details). +- Fixed some problems in processor affinity support, including when + there are "holes" in the processor namespace (e.g., offline + processors). +- Ensure that Open MPI's "session directory" (usually located in /tmp) + is cleaned up after process termination. +- Fixed some problems with the collective "hierarch" implementation + that could occur in some obscure conditions. +- Various MPI_REQUEST_NULL, API parameter checking, and attribute + error handling fixes. Thanks to Lisandro Dalcín for reporting the + issues. +- Fix case where MPI_GATHER erroneously used datatypes on non-root + nodes. Thanks to Michael Hofmann for investigating the issue. +- Patched ROMIO support for PVFS2 > v2.7 (patch taken from MPICH2 + version of ROMIO). +- Fixed ``mpirun`` ``--report-bindings`` behavior when used with + mpi_paffinity_alone=1. Also fixed mpi_paffinity_alone=1 behavior + with non-MPI applications. Thanks to Brice Goglin for noticing the + problem. +- Ensure that all OpenFabrics devices have compatible receive_queues + specifications before allowing them to communicate. See the lengthy + comment in https://svn.open-mpi.org/trac/ompi/changeset/22592 for + more details. +- Fix some issues with checkpoint/restart. +- Improve the pre-MPI_INIT/post-MPI_FINALIZE error messages. +- Ensure that loopback addresses are never advertised to peer + processes for RDMA/OpenFabrics support. +- Fixed a CSUM PML false positive. +- Various fixes for Catamount support. +- Minor update to wrapper compilers in how user-specific argv is + ordered on the final command line. Thanks to Jed Brown for the + suggestions. +- Removed flex.exe binary from Open MPI tarballs; now generate flex + code from a newer (Windows-friendly) flex when we make official + tarballs. + + +Open MPI version 1.4.1 +^^^^^^^^^^^^^^^^^^^^^^ +:Date: 15 Jan 2010 + +- Update to PLPA v1.3.2, addressing a licensing issue identified by + the Fedora project. See + https://svn.open-mpi.org/trac/plpa/changeset/262 for details. +- Add check for malformed checkpoint metadata files (Ticket #2141). +- Fix error path in ompi-checkpoint when not able to checkpoint + (Ticket #2138). +- Cleanup component release logic when selecting checkpoint/restart + enabled components (Ticket #2135). +- Fixed VT node name detection for Cray XT platforms, and fixed some + broken VT documentation files. +- Fix a possible race condition in tearing down RDMA CM-based + connections. +- Relax error checking on MPI_GRAPH_CREATE. Thanks to David Singleton + for pointing out the issue. +- Fix a shared memory "hang" problem that occurred on x86/x86_64 + platforms when used with the GNU >=4.4.x compiler series. +- Add fix for Libtool 2.2.6b's problems with the PGI 10.x compiler + suite. Inspired directly from the upstream Libtool patches that fix + the issue (but we need something working before the next Libtool + release). + + +Open MPI version 1.4.0 +^^^^^^^^^^^^^^^^^^^^^^ +:Date: 8 Dec 2009 + +- The **only** change in the Open MPI v1.4 release (as compared to v1.3.4) + was to update the embedded version of Libtool's libltdl to address a + potential security vulnerability. Specifically: Open MPI v1.3.4 was + created with GNU Libtool 2.2.6a; Open MPI v1.4 was created with GNU + Libtool 2.2.6b. There are no other changes between Open MPI v1.3.4 + and v1.4. + + +Open MPI v1.3.x series +---------------------- + +Open MPI version 1.3.4 +^^^^^^^^^^^^^^^^^^^^^^ +:Date: 13 Feb 2010 + +- Fix some issues in OMPI's SRPM with regard to shell_scripts_basename + and its use with mpi-selector. Thanks to Bill Johnstone for + pointing out the problem. +- Added many new MPI job process affinity options to mpirun. See the + newly-updated mpirun(1) man page for details. +- Several updates to mpirun's XML output. +- Update to fix a few Valgrind warnings with regards to the ptmalloc2 + allocator and Open MPI's use of PLPA. +- Many updates and fixes to the (non-default) "sm" collective + component (i.e., native shared memory MPI collective operations). +- Updates and fixes to some MPI_COMM_SPAWN_MULTIPLE corner cases. +- Fix some internal copying functions in Open MPI's use of PLPA. +- Correct some SLURM nodelist parsing logic that may have interfered + with large jobs. Additionally, per advice from the SLURM team, + change the environment variable that we use for obtaining the job's + allocation. +- Revert to an older, safer (but slower) communicator ID allocation + algorithm. +- Fixed minimum distance finding for OpenFabrics devices in the openib + BTL. +- Relax the parameter checking MPI_CART_CREATE a bit. +- Fix MPI_COMM_SPAWN[_MULTIPLE] to only error-check the info arguments + on the root process. Thanks to Federico Golfre Andreasi for + reporting the problem. +- Fixed some BLCR configure issues. +- Fixed a potential deadlock when the openib BTL was used with + MPI_THREAD_MULTIPLE. +- Fixed dynamic rules selection for the "tuned" coll component. +- Added a launch progress meter to mpirun (useful for large jobs; set + the orte_report_launch_progress MCA parameter to 1 to see it). +- Reduced the number of file descriptors consumed by each MPI process. +- Add new device IDs for Chelsio T3 RNICs to the openib BTL config file. +- Fix some CRS self component issues. +- Added some MCA parameters to the PSM MTL to tune its run-time + behavior. +- Fix some VT issues with MPI_BOTTOM/MPI_IN_PLACE. +- Man page updates from the Debain Open MPI package maintainers. +- Add cycle counter support for the Alpha and Sparc platforms. +- Pass visibility flags to libltdl's configure script, resulting in + those symbols being hidden. This appears to mainly solve the + problem of applications attempting to use different versions of + libltdl from that used to build Open MPI. + + +Open MPI version 1.3.3 +^^^^^^^^^^^^^^^^^^^^^^ +:Date: 14 Jul 2009 + +- Fix a number of issues with the openib BTL (OpenFabrics) RDMA CM, + including a memory corruption bug, a shutdown deadlock, and a route + timeout. Thanks to David McMillen and Hal Rosenstock for help in + tracking down the issues. +- Change the behavior of the EXTRA_STATE parameter that is passed to + Fortran attribute callback functions: this value is now stored + internally in MPI -- it no longer references the original value + passed by ``MPI_*_CREATE_KEYVAL``. +- Allow the overriding RFC1918 and RFC3330 for the specification of + "private" networks, thereby influencing Open MPI's TCP + "reachability" computations. +- Improve flow control issues in the sm btl, by both tweaking the + shared memory progression rules and by enabling the "sync" collective + to barrier every 1,000th collective. +- Various fixes for the IBM XL C/C++ v10.1 compiler. +- Allow explicit disabling of ptmalloc2 hooks at runtime (e.g., enable + support for Debian's builtroot system). Thanks to Manuel Prinz and + the rest of the Debian crew for helping identify and fix this issue. +- Various minor fixes for the I/O forwarding subsystem. +- Big endian iWARP fixes in the Open Fabrics RDMA CM support. +- Update support for various OpenFabrics devices in the openib BTL's + .ini file. +- Fixed undefined symbol issue with Open MPI's parallel debugger + message queue support so it can be compiled by Sun Studio compilers. +- Update MPI_SUBVERSION to 1 in the Fortran bindings. +- Fix MPI_GRAPH_CREATE Fortran 90 binding. +- Fix MPI_GROUP_COMPARE behavior with regards to MPI_IDENT. Thanks to + Geoffrey Irving for identifying the problem and supplying the fix. +- Silence gcc 4.1 compiler warnings about type punning. Thanks to + Number Cruncher for the fix. +- Added more Valgrind and other memory-cleanup fixes. Thanks to + various Open MPI users for help with these issues. +- Miscellaneous VampirTrace fixes. +- More fixes for openib credits in heavy-congestion scenarios. +- Slightly decrease the latency in the openib BTL in some conditions + (add "send immediate" support to the openib BTL). +- Ensure to allow MPI_REQUEST_GET_STATUS to accept an + MPI_STATUS_IGNORE parameter. Thanks to Shaun Jackman for the bug + report. +- Added Microsoft Windows support. See README.WINDOWS file for + details. + + +Open MPI version 1.3.2 +^^^^^^^^^^^^^^^^^^^^^^ +:Date: 22 Apr 2009 + +- Fixed a potential infinite loop in the openib BTL that could occur + in senders in some frequent-communication scenarios. Thanks to Don + Wood for reporting the problem. +- Add a new checksum PML variation on ob1 (main MPI point-to-point + communication engine) to detect memory corruption in node-to-node + messages +- Add a new configuration option to add padding to the openib + header so the data is aligned +- Add a new configuration option to use an alternative checksum algo + when using the checksum PML +- Fixed a problem reported by multiple users on the mailing list that + the LSF support would fail to find the appropriate libraries at + run-time. +- Allow empty shell designations from getpwuid(). Thanks to Sergey + Koposov for the bug report. +- Ensure that mpirun exits with non-zero status when applications die + due to user signal. Thanks to Geoffroy Pignot for suggesting the + fix. +- Ensure that MPI_VERSION / MPI_SUBVERSION match what is returned by + MPI_GET_VERSION. Thanks to Rob Egan for reporting the error. +- Updated ``MPI_*KEYVAL_CREATE`` functions to properly handle Fortran + extra state. +- A variety of ob1 (main MPI point-to-point communication engine) bug + fixes that could have caused hangs or seg faults. +- Do not install Open MPI's signal handlers in MPI_INIT if there are + already signal handlers installed. Thanks to Kees Verstoep for + bringing the issue to our attention. +- Fix GM support to not seg fault in MPI_INIT. +- Various VampirTrace fixes. +- Various PLPA fixes. +- No longer create BTLs for invalid (TCP) devices. +- Various man page style and lint cleanups. +- Fix critical OpenFabrics-related bug noted here: + https://www.open-mpi.org/community/lists/announce/2009/03/0029.php. + Open MPI now uses a much more robust memory intercept scheme that is + quite similar to what is used by MX. The use of "-lopenmpi-malloc" + is no longer necessary, is deprecated, and is expected to disappear + in a future release. -lopenmpi-malloc will continue to work for the + duration of the Open MPI v1.3 and v1.4 series. +- Fix some OpenFabrics shutdown errors, both regarding iWARP and SRQ. +- Allow the udapl BTL to work on Solaris platforms that support + relaxed PCI ordering. +- Fix problem where the mpirun would sometimes use rsh/ssh to launch on + the localhost (instead of simply forking). +- Minor SLURM stdin fixes. +- Fix to run properly under SGE jobs. +- Scalability and latency improvements for shared memory jobs: convert + to using one message queue instead of N queues. +- Automatically size the shared-memory area (mmap file) to match + better what is needed; specifically, so that large-np jobs will start. +- Use fixed-length MPI predefined handles in order to provide ABI + compatibility between Open MPI releases. +- Fix building of the posix paffinity component to properly get the + number of processors in loosely tested environments (e.g., + FreeBSD). Thanks to Steve Kargl for reporting the issue. +- Fix ``--with-libnuma`` handling in configure. Thanks to Gus Correa for + reporting the problem. + + +Open MPI version 1.3.1 +^^^^^^^^^^^^^^^^^^^^^^ +:Date: 19 Mar 2009 + +- Added "sync" coll component to allow users to synchronize every N + collective operations on a given communicator. +- Increased the default values of the IB and RNR timeout MCA parameters. +- Fix a compiler error noted by Mostyn Lewis with the PGI 8.0 compiler. +- Fix an error that prevented stdin from being forwarded if the + rsh launcher was in use. Thanks to Branden Moore for pointing out + the problem. +- Correct a case where the added datatype is considered as contiguous but + has gaps in the beginning. +- Fix an error that limited the number of comm_spawns that could + simultaneously be running in some environments +- Correct a corner case in OB1's GET protocol for long messages; the + error could sometimes cause MPI jobs using the openib BTL to hang. +- Fix a bunch of bugs in the IO forwarding (IOF) subsystem and add some + new options to output to files and redirect output to xterm. Thanks to + Jody Weissmann for helping test out many of the new fixes and + features. +- Fix SLURM race condition. +- Fix MPI_File_c2f(MPI_FILE_NULL) to return 0, not -1. Thanks to + Lisandro Dalcín for the bug report. +- Fix the DSO build of tm PLM. +- Various fixes for size disparity between C int's and Fortran + INTEGER's. Thanks to Christoph van Wullen for the bug report. +- Ensure that mpirun exits with a non-zero exit status when daemons or + processes abort or fail to launch. +- Various fixes to work around Intel (NetEffect) RNIC behavior. +- Various fixes for mpirun's ``--preload-files`` and ``--preload-binary`` + options. +- Fix the string name in MPI::ERRORS_THROW_EXCEPTIONS. +- Add ability to forward SIFTSTP and SIGCONT to MPI processes if you + set the MCA parameter orte_forward_job_control to 1. +- Allow the sm BTL to allocate larger amounts of shared memory if + desired (helpful for very large multi-core boxen). +- Fix a few places where we used PATH_MAX instead of OPAL_PATH_MAX, + leading to compile problems on some platforms. Thanks to Andrea Iob + for the bug report. +- Fix mca_btl_openib_warn_no_device_params_found MCA parameter; it + was accidentally being ignored. +- Fix some run-time issues with the sctp BTL. +- Ensure that RTLD_NEXT exists before trying to use it (e.g., it + doesn't exist on Cygwin). Thanks to Gustavo Seabra for reporting + the issue. +- Various fixes to VampirTrace, including fixing compile errors on + some platforms. +- Fixed missing MPI_Comm_accept.3 man page; fixed minor issue in + orterun.1 man page. Thanks to Dirk Eddelbuettel for identifying the + problem and submitting a patch. +- Implement the XML formatted output of stdout/stderr/stddiag. +- Fixed mpirun's -wdir switch to ensure that working directories for + multiple app contexts are properly handled. Thanks to Geoffroy + Pignot for reporting the problem. +- Improvements to the MPI C++ integer constants: + + - Allow ``MPI::SEEK_*`` constants to be used as constants + - Allow other MPI C++ constants to be used as array sizes + +- Fix minor problem with orte-restart's command line options. See + ticket #1761 for details. Thanks to Gregor Dschung for reporting + the problem. + + +Open MPI version 1.3.0 +^^^^^^^^^^^^^^^^^^^^^^ +:Date: 19 Jan 2009 + +- Extended the OS X 10.5.x (Leopard) workaround for a problem when + assembly code is compiled with -g[0-9]. Thanks to Barry Smith for + reporting the problem. See ticket #1701. +- Disabled MPI_REAL16 and MPI_COMPLEX32 support on platforms where the + bit representation of ``REAL*16`` is different than that of the C type + of the same size (usually long double). Thanks to Julien Devriendt + for reporting the issue. See ticket #1603. +- Increased the size of MPI_MAX_PORT_NAME to 1024 from 36. See ticket #1533. +- Added "notify debugger on abort" feature. See tickets #1509 and #1510. + Thanks to Seppo Sahrakropi for the bug report. +- Upgraded Open MPI tarballs to use Autoconf 2.63, Automake 1.10.1, + Libtool 2.2.6a. +- Added missing MPI::Comm::Call_errhandler() function. Thanks to Dave + Goodell for bringing this to our attention. +- Increased MPI_SUBVERSION value in mpi.h to 1 (i.e., MPI 2.1). +- Changed behavior of MPI_GRAPH_CREATE, MPI_TOPO_CREATE, and several + other topology functions per MPI-2.1. +- Fix the type of the C++ constant MPI::IN_PLACE. +- Various enhancements to the openib BTL: + + - Added ``btl_openib_if_[in|ex]clude`` MCA parameters for + including/excluding comma-delimited lists of HCAs and ports. + - Added RDMA CM support, includng ``btl_openib_cpc_[in|ex]clude`` + MCA parameters + - Added NUMA support to only use "near" network adapters + - Added "Bucket SRQ" (BSRQ) support to better utilize registered + memory, including btl_openib_receive_queues MCA parameter + - Added ConnectX XRC support (and integrated with BSRQ) + - Added btl_openib_ib_max_inline_data MCA parameter + - Added iWARP support + - Revamped flow control mechansisms to be more efficient + - ``mpi_leave_pinned=1`` is now the default when possible, + automatically improving performance for large messages when + application buffers are re-used + +- Elimiated duplicated error messages when multiple MPI processes fail + with the same error. +- Added NUMA support to the shared memory BTL. +- Add Valgrind-based memory checking for MPI-semantic checks. +- Add support for some optional Fortran datatypes (MPI_LOGICAL1, + MPI_LOGICAL2, MPI_LOGICAL4 and MPI_LOGICAL8). +- Remove the use of the STL from the C++ bindings. +- Added support for Platform/LSF job launchers. Must be Platform LSF + v7.0.2 or later. +- Updated ROMIO with the version from MPICH2 1.0.7. +- Added RDMA capable one-sided component (called rdma), which + can be used with BTL components that expose a full one-sided + interface. +- Added the optional datatype MPI_REAL2. As this is added to the "end of" + predefined datatypes in the fortran header files, there will not be + any compatibility issues. +- Added Portable Linux Processor Affinity (PLPA) for Linux. +- Addition of a finer symbols export control via the visibiliy feature + offered by some compilers. +- Added checkpoint/restart process fault tolerance support. Initially + support a LAM/MPI-like protocol. +- Removed "mvapi" BTL; all InfiniBand support now uses the OpenFabrics + driver stacks ("openib" BTL). +- Added more stringent MPI API parameter checking to help user-level + debugging. +- The ptmalloc2 memory manager component is now by default built as + a standalone library named libopenmpi-malloc. Users wanting to + use leave_pinned with ptmalloc2 will now need to link the library + into their application explicitly. All other users will use the + libc-provided allocator instead of Open MPI's ptmalloc2. This change + may be overriden with the configure option enable-ptmalloc2-internal +- The leave_pinned options will now default to using mallopt on + Linux in the cases where ptmalloc2 was not linked in. mallopt + will also only be available if munmap can be intercepted (the + default whenever Open MPI is not compiled with ``--without-memory-manager``. +- Open MPI will now complain and refuse to use leave_pinned if + no memory intercept / mallopt option is available. +- Add option of using Perl-based wrapper compilers instead of the + C-based wrapper compilers. The Perl-based version does not + have the features of the C-based version, but does work better + in cross-compile environments. + + +Open MPI v1.2.x series +---------------------- + +Open MPI version 1.2.9 +^^^^^^^^^^^^^^^^^^^^^^ +:Date: 14 Feb 2009 + +- Fix a segfault when using one-sided communications on some forms of derived + datatypes. Thanks to Dorian Krause for reporting the bug. See #1715. +- Fix an alignment problem affecting one-sided communications on + some architectures (e.g., SPARC64). See #1738. +- Fix compilation on Solaris when thread support is enabled in Open MPI + (e.g., when using ``--with-threads``). See #1736. +- Correctly take into account the MTU that an OpenFabrics device port + is using. See #1722 and + https://bugs.openfabrics.org/show_bug.cgi?id=1369. +- Fix two datatype engine bugs. See #1677. + Thanks to Peter Kjellstrom for the bugreport. +- Fix the bml r2 help filename so the help message can be found. See #1623. +- Fix a compilation problem on RHEL4U3 with the PGI 32 bit compiler + caused by . See ticket #1613. +- Fix the ``--enable-cxx-exceptions`` configure option. See ticket #1607. +- Properly handle when the MX BTL cannot open an endpoint. See ticket #1621. +- Fix a double free of events on the tcp_events list. See ticket #1631. +- Fix a buffer overun in opal_free_list_grow (called by MPI_Init). + Thanks to Patrick Farrell for the bugreport and Stephan Kramer for + the bugfix. See ticket #1583. +- Fix a problem setting OPAL_PREFIX for remote sh-based shells. + See ticket #1580. + + +Open MPI version 1.2.8 +^^^^^^^^^^^^^^^^^^^^^^ +:Date: 14 Oct 2008 + +- Tweaked one memory barrier in the openib component to be more conservative. + May fix a problem observed on PPC machines. See ticket #1532. +- Fix OpenFabrics IB partition support. See ticket #1557. +- Restore v1.1 feature that sourced .profile on remote nodes if the default + shell will not do so (e.g. ``/bin/sh`` and ``/bin/ksh``). See ticket #1560. +- Fix segfault in MPI_Init_thread() if ompi_mpi_init() fails. See ticket #1562. +- Adjust SLURM support to first look for $SLURM_JOB_CPUS_PER_NODE instead of + the deprecated $SLURM_TASKS_PER_NODE environment variable. This change + may be **required** when using SLURM v1.2 and above. See ticket #1536. +- Fix the MPIR_Proctable to be in process rank order. See ticket #1529. +- Fix a regression introduced in 1.2.6 for the IBM eHCA. See ticket #1526. + + +Open MPI version 1.2.7 +^^^^^^^^^^^^^^^^^^^^^^ +:Date: 28 Aug 2008 + +- Add some Sun HCA vendor IDs. See ticket #1461. +- Fixed a memory leak in MPI_Alltoallw when called from Fortran. + Thanks to Dave Grote for the bugreport. See ticket #1457. +- Only link in libutil when it is needed/desired. Thanks to + Brian Barret for diagnosing and fixing the problem. See ticket #1455. +- Update some QLogic HCA vendor IDs. See ticket #1453. +- Fix F90 binding for MPI_CART_GET. Thanks to Scott Beardsley for + bringing it to our attention. See ticket #1429. +- Remove a spurious warning message generated in/by ROMIO. See ticket #1421. +- Fix a bug where command-line MCA parameters were not overriding + MCA parameters set from environment variables. See ticket #1380. +- Fix a bug in the AMD64 atomics assembly. Thanks to Gabriele Fatigati + for the bug report and bugfix. See ticket #1351. +- Fix a gather and scatter bug on intercommunicators when the datatype + being moved is 0 bytes. See ticket #1331. +- Some more man page fixes from the Debian maintainers. + See tickets #1324 and #1329. +- Have openib BTL (OpenFabrics support) check for the presence of + /sys/class/infiniband before allowing itself to be used. This check + prevents spurious "OMPI did not find RDMA hardware!" notices on + systems that have the software drivers installed, but no + corresponding hardware. See tickets #1321 and #1305. +- Added vendor IDs for some ConnectX openib HCAs. See ticket #1311. +- Fix some RPM specfile inconsistencies. See ticket #1308. + Thanks to Jim Kusznir for noticing the problem. +- Removed an unused function prototype that caused warnings on + some systems (e.g., OS X). See ticket #1274. +- Fix a deadlock in inter-communicator scatter/gather operations. + Thanks to Martin Audet for the bug report. See ticket #1268. + + +Open MPI version 1.2.6 +^^^^^^^^^^^^^^^^^^^^^^ +:Date: 7 Apr 2008 + +- Fix a bug in the inter-allgather for asymmetric inter-communicators. + Thanks to Martin Audet for the bug report. See ticket #1247. +- Fix a bug in the openib BTL when setting the CQ depth. Thanks + to Jon Mason for the bug report and fix. See ticket #1245. +- On Mac OS X Leopard, the execinfo component will be used for + backtraces, making for a more durable solution. See ticket #1246. +- Added vendor IDs for some QLogic DDR openib HCAs. See ticket #1227. +- Updated the URL to get the latest config.guess and config.sub files. + Thanks to Ralf Wildenhues for the bug report. See ticket #1226. +- Added shared contexts support to PSM MTL. See ticket #1225. +- Added pml_ob1_use_early_completion MCA parameter to allow users + to turn off the OB1 early completion semantic and avoid "stall" + problems seen on InfiniBand in some cases. See ticket #1224. +- Sanitized some #define macros used in mpi.h to avoid compiler warnings + caused by MPI programs built with different autoconf versions. + Thanks to Ben Allan for reporting the problem, and thanks to + Brian Barrett for the fix. See ticket #1220. +- Some man page fixes from the Debian maintainers. See ticket #1219. +- Made the openib BTL a bit more resilient in the face of driver + errors. See ticket #1217. +- Fixed F90 interface for MPI_CART_CREATE. See ticket #1208. + Thanks to Michal Charemza for reporting the problem. +- Fixed some C++ compiler warnings. See ticket #1203. +- Fixed formatting of the orterun man page. See ticket #1202. + Thanks to Peter Breitenlohner for the patch. + + +Open MPI version 1.2.5 +^^^^^^^^^^^^^^^^^^^^^^ +:Date: 8 Jan 2008 + +- Fixed compile issue with open() on Fedora 8 (and newer) platforms. + Thanks to Sebastian Schmitzdorff for noticing the problem. +- Added run-time warnings during MPI_INIT when MPI_THREAD_MULTIPLE + and/or progression threads are used (the OMPI v1.2 series does not + support these well at all). +- Better handling of ECONNABORTED from connect on Linux. Thanks to + Bob Soliday for noticing the problem; thanks to Brian Barrett for + submitting a patch. +- Reduce extraneous output from OOB when TCP connections must + be retried. Thanks to Brian Barrett for submitting a patch. +- Fix for ConnectX devices and OFED 1.3. See ticket #1190. +- Fixed a configure problem for Fortran 90 on Cray systems. Ticket #1189. +- Fix an uninitialized variable in the error case in opal_init.c. + Thanks to Åke Sandgren for pointing out the mistake. +- Fixed a hang in configure if $USER was not defined. Thanks to + Darrell Kresge for noticing the problem. See ticket #900. +- Added support for parallel debuggers even when we have an optimized build. + See ticket #1178. +- Worked around a bus error in the Mac OS X 10.5.X (Leopard) linker when + compiling Open MPI with -g. See ticket #1179. +- Removed some warnings about 'rm' from Mac OS X 10.5 (Leopard) builds. +- Fix the handling of mx_finalize(). See ticket #1177. + Thanks to Åke Sandgren for bringing this issue to our attention. +- Fixed minor file descriptor leak in the Altix timer code. Thanks to + Paul Hargrove for noticing the problem and supplying the fix. +- Fix a problem when using a different compiler for C and Objective C. + See ticket #1153. +- Fix segfault in MPI_COMM_SPAWN when the user specified a working + directory. Thanks to Murat Knecht for reporting this and suggesting + a fix. +- A few manpage fixes from the Debian Open MPI maintainers. Thanks to + Tilman Koschnick, Sylvestre Ledru, and Dirk Eddelbuettel. +- Fixed issue with pthread detection when compilers are not all + from the same vendor. Thanks to Åke Sandgren for the bug + report. See ticket #1150. +- Fixed vector collectives in the self module. See ticket #1166. +- Fixed some data-type engine bugs: an indexing bug, and an alignment bug. + See ticket #1165. +- Only set the MPI_APPNUM attribute if it is defined. See ticket + #1164. + + +Open MPI version 1.2.4 +^^^^^^^^^^^^^^^^^^^^^^ +:Date: 26 Sep 2007 + +- Really added support for TotalView/DDT parallel debugger message queue + debugging (it was mistakenly listed as "added" in the 1.2 release). +- Fixed a build issue with GNU/kFreeBSD. Thanks to Petr Salinger for + the patch. +- Added missing MPI_FILE_NULL constant in Fortran. Thanks to + Bernd Schubert for bringing this to our attention. +- Change such that the UDAPL BTL is now only built in Linux when + explicitly specified via the ``--with-udapl`` configure command line + switch. +- Fixed an issue with umask not being propagated when using the TM + launcher. +- Fixed behavior if number of slots is not the same on all bproc nodes. +- Fixed a hang on systems without GPR support (ex. Cray XT3/4). +- Prevent users of 32-bit MPI apps from requesting >= 2GB of shared + memory. +- Added a Portals MTL. +- Fix 0 sized MPI_ALLOC_MEM requests. Thanks to Lisandro Dalcín for + pointing out the problem. +- Fixed a segfault crash on large SMPs when doing collectives. +- A variety of fixes for Cray XT3/4 class of machines. +- Fixed which error handler is used when MPI_COMM_SELF is passed + to MPI_COMM_FREE. Thanks to Lisandro Dalcín for the bug report. +- Fixed compilation on platforms that don't have hton/ntoh. +- Fixed a logic problem in the fortran binding for MPI_TYPE_MATCH_SIZE. + Thanks to Jeff Dusenberry for pointing out the problem and supplying + the fix. +- Fixed a problem with MPI_BOTTOM in various places of the f77-interface. + Thanks to Daniel Spangberg for bringing this up. +- Fixed problem where MPI-optional Fortran datatypes were not + correctly initialized. +- Fixed several problems with stdin/stdout forwarding. +- Fixed overflow problems with the sm mpool MCA parameters on large SMPs. +- Added support for the DDT parallel debugger via orterun's ``--debug`` + command line option. +- Added some sanity/error checks to the openib MCA parameter parsing + code. +- Updated the udapl BTL to use RDMA capabilities. +- Allow use of the BProc head node if it was allocated to the user. + Thanks to Sean Kelly for reporting the problem and helping debug it. +- Fixed a ROMIO problem where non-blocking I/O errors were not properly + reported to the user. +- Made remote process launch check the $SHELL environment variable if + a valid shell was not otherwise found for the user. + Thanks to Alf Wachsmann for the bugreport and suggested fix. +- Added/updated some vendor IDs for a few openib HCAs. +- Fixed a couple of failures that could occur when specifying devices + for use by the OOB. +- Removed dependency on sysfsutils from the openib BTL for + libibverbs >=v1.1 (i.e., OFED 1.2 and beyond). + + +Open MPI version 1.2.3 +^^^^^^^^^^^^^^^^^^^^^^ +:Date: 20 Jun 2007 + +- Fix a regression in comm_spawn functionality that inadvertently + caused the mapping of child processes to always start at the same + place. Thanks to Prakash Velayutham for helping discover the + problem. +- Fix segfault when a user's home directory is unavailable on a remote + node. Thanks to Guillaume Thomas-Collignon for bringing the issue + to our attention. +- Fix MPI_IPROBE to properly handle MPI_STATUS_IGNORE on mx and psm + MTLs. Thanks to Sophia Corwell for finding this and supplying a + reproducer. +- Fix some error messages in the tcp BTL. +- Use _NSGetEnviron instead of environ on Mac OS X so that there + are no undefined symbols in the shared libraries. +- On OS X, when MACOSX_DEPLOYMENT_TARGET is 10.3 or higher, support + building the Fortran 90 bindings as a shared library. Thanks to + Jack Howarth for his advice on making this work. +- No longer require extra include flag for the C++ bindings. +- Fix detection of weak symbols support with Intel compilers. +- Fix issue found by Josh England: ompi_info would not show framework + MCA parameters set in the environment properly. +- Rename the oob_tcp_include/exclude MCA params to oob_tcp_if_include/exclude + so that they match the naming convention of the btl_tcp_if_include/exclude + params. The old names are depreciated, but will still work. +- Add -wd as a synonym for the -wdir orterun/mpirun option. +- Fix the mvapi BTL to compile properly with compilers that do not support + anonymous unions. Thanks to Luis Kornblueh for reporting the bug. + + +Open MPI version 1.2.2 +^^^^^^^^^^^^^^^^^^^^^^ +:Date: 16 May 2007 + +- Fix regression in 1.2.1 regarding the handling of $CC with both + absolute and relative path names. +- Fix F90 array of status dimensions. Thanks to Randy Bramley for + noticing the problem. +- Add btl_openib_ib_pkey_value MCA parameter for controlling IB port selection. +- Fixed a variety of threading/locking bugs. +- Fixed some compiler warnings associated with ROMIO, OS X, and gridengine. +- If pbs-config can be found, use it to look for TM support. Thanks + to Bas van der Vlies for the inspiration and preliminary work. +- Fixed a deadlock in orterun when the rsh PLS encounters some errors. + + +Open MPI version 1.2.1 +^^^^^^^^^^^^^^^^^^^^^^ +:Date: 25 Apr 2007 + +- Fixed a number of connection establishment errors in the TCP out- + of-band messaging system. +- Fixed a memory leak when using mpi_comm calls. + Thanks to Bas van der Vlies for reporting the problem. +- Fixed various memory leaks in OPAL and ORTE. +- Improved launch times when using TM (PBS Pro, Torque, Open PBS). +- Fixed mpi_leave_pinned to work for all datatypes. +- Fix functionality allowing users to disable sbrk() (the + mpool_base_disable_sbrk MCA parameter) on platforms that support it. +- Fixed a pair of problems with the TCP "listen_thread" mode for the + oob_tcp_listen_mode MCA parameter that would cause failures when + attempting to launch applications. +- Fixed a segfault if there was a failure opening a BTL MX endpoint. +- Fixed a problem with mpirun's ``--nolocal`` option introduced in 1.2. +- Re-enabled MPI_COMM_SPAWN_MULTIPLE from singletons. +- LoadLeveler and TM configure fixes, Thanks to Martin Audet for the + bug report. +- Various C++ MPI attributes fixes. +- Fixed issues with backtrace code on 64 bit Intel & PPC OS X builds. +- Fixed issues with multi-word CC variables and libtool. + Thanks to Bert Wesarg for the bug reports. +- Fix issue with non-uniform node naming schemes in SLURM. +- Fix file descriptor leak in the Grid Engine/N1GE support. +- Fix compile error on OS X 10.3.x introduced with Open MPI 1.1.5. +- Implement MPI_TYPE_CREATE_DARRAY function (was in 1.1.5 but not 1.2). +- Recognize zsh shell when using rsh/ssh for launching MPI jobs. +- Ability to set the OPAL_DESTDIR or OPAL_PREFIX environment + variables to "re-root" an existing Open MPI installation. +- Always include -I for Fortran compiles, even if the prefix is + /usr/local. +- Support for "fork()" in MPI applications that use the + OpenFabrics stack (OFED v1.2 or later). +- Support for setting specific limits on registered memory. + + +Open MPI version 1.2.0 +^^^^^^^^^^^^^^^^^^^^^^ +:Date: 15 Mar 2007 + +- Fixed race condition in the shared memory fifo's, which led to + orphaned messages. +- Corrected the size of the shared memory file - subtracted out the + space the header was occupying. +- Add support for MPI_2COMPLEX and MPI_2DOUBLE_COMPLEX. +- Always ensure to create $(includedir)/openmpi, even if the C++ + bindings are disabled so that the wrapper compilers don't point to + a directory that doesn't exist. Thanks to Martin Audet for + identifying the problem. +- Fixes for endian handling in MPI process startup. +- Openib BTL initialization fixes for cases where MPI processes in the + same job has different numbers of active ports on the same physical + fabric. +- Print more descriptive information when displaying backtraces on + OS's that support this functionality, such as the hostname and PID + of the process in question. +- Fixes to properly handle MPI exceptions in C++ on communicators, + windows, and files. +- Much more reliable runtime support, particularly with regards to MPI + job startup scalability, BProc support, and cleanup in failure + scenarios (e.g., MPI_ABORT, MPI processes abnormally terminating, + etc.). +- Significant performance improvements for MPI collectives, + particularly on high-speed networks. +- Various fixes in the MX BTL component. +- Fix C++ typecast problems with MPI_ERRCODES_IGNORE. Thanks to + Satish Balay for bringing this to our attention. +- Allow run-time specification of the maximum amount of registered + memory for OpenFabrics and GM. +- Users who utilize the wrapper compilers (e.g., mpicc and mpif77) + will not notice, but the underlying library names for ORTE and OPAL + have changed to libopen-rte and libopen-pal, respectively (listed + here because there are undoubtedly some users who are not using the + wrapper compilers). +- Many bug fixes to MPI-2 one-sided support. +- Added support for TotalView message queue debugging. +- Fixes for MPI_STATUS_SET_ELEMENTS. +- Print better error messages when mpirun's "-nolocal" is used when + there is only one node available. +- Added man pages for several Open MPI executables and the MPI API + functions. +- A number of fixes for Alpha platforms. +- A variety of Fortran API fixes. +- Build the Fortran MPI API as a separate library to allow these + functions to be profiled properly. +- Add new ``--enable-mpirun-prefix-by-default`` configure option to always + imply the ``--prefix`` option to mpirun, preventing many rsh/ssh-based + users from needing to modify their shell startup files. +- Add a number of missing constants in the C++ bindings. +- Added tight integration with Sun N1 Grid Engine (N1GE) 6 and the + open source Grid Engine. +- Allow building the F90 MPI bindings as shared libraries for most + compilers / platforms. Explicitly disallow building the F90 + bindings as shared libraries on OS X because of complicated + situations with Fortran common blocks and lack of support for + unresolved common symbols in shared libraries. +- Added stacktrace support for Solaris and Mac OS X. +- Update event library to libevent-1.1b. +- Fixed standards conformance issues with MPI_ERR_TRUNCATED and + setting MPI_ERROR during MPI_TEST/MPI_WAIT. +- Addition of "cm" PML to better support library-level matching + interconnects, with support for Myrinet/MX, and QLogic PSM-based + networks. +- Addition of "udapl" BTL for transport across uDAPL interconnects. +- Really check that the $CXX given to configure is a C++ compiler + (not a C compiler that "sorta works" as a C++ compiler). +- Properly check for local host only addresses properly, looking + for 127.0.0.0/8, rather than just 127.0.0.1. + + +Open MPI v1.1.x series +---------------------- + +Open MPI version 1.1.5 +^^^^^^^^^^^^^^^^^^^^^^ +:Date: 19 Mar 2007 + +- Implement MPI_TYPE_CREATE_DARRAY function. +- Fix race condition in shared memory BTL startup that could cause MPI + applications to hang in MPI_INIT. +- Fix syntax error in a corner case of the event library. Thanks to + Bert Wesarg for pointing this out. +- Add new MCA parameter (mpi_preconnect_oob) for pre-connecting the + "out of band" channels between all MPI processes. Most helpful for + MPI applications over InfiniBand where process A sends an initial + message to process B, but process B does not enter the MPI library + for a long time. +- Fix for a race condition in shared memory locking semantics. +- Add major, minor, and release version number of Open MPI to mpi.h. + Thanks to Martin Audet for the suggestion. +- Fix the "restrict" compiler check in configure. +- Fix a problem with argument checking in MPI_TYPE_CREATE_SUBARRAY. +- Fix a problem with compiling the XGrid components with non-gcc + compilers. + + +Open MPI version 1.1.4 +^^^^^^^^^^^^^^^^^^^^^^ +:Date: 30 Jan 2007 + +- Fixed 64-bit alignment issues with TCP interface detection on + intel-based OS X machines. +- Adjusted TCP interface selection to automatically ignore Linux + channel-bonded slave interfaces. +- Fixed the type of the first parameter to the MPI F90 binding for + MPI_INITIALIZED. Thanks to Tim Campbell for pointing out the + problem. +- Fix a bunch of places in the Fortran MPI bindings where (``MPI_Fint*``) + was mistakenly being used instead of (``MPI_Aint*``). +- Fixes for fortran MPI_STARTALL, which could sometimes return + incorrect request values. Thanks to Tim Campbell for pointing out + the problem. +- Include both pre- and post-MPI-2 errata bindings for + MPI::Win::Get_attr. +- Fix math error on Intel OS X platforms that would greatly increase + shared memory latency. +- Fix type casting issue with MPI_ERRCODES_IGNORE that would cause + errors when using a C++ compiler. Thanks to Barry Smith for + bringing this to our attention. +- Fix possible segmentation fault during shutdown when using the + MX BTL. + + +Open MPI version 1.1.3 +^^^^^^^^^^^^^^^^^^^^^^ +:Date: 26 Jan 2007 + +- Remove the "hierarch" coll component; it was not intended to be + included in stable releases yet. +- Fix a race condition with stdout/stderr not appearing properly from + all processes upon termination of an MPI job. +- Fix internal accounting errors with the self BTL. +- Fix typos in the code path for when sizeof(int) != sizeof(INTEGER) + in the MPI F77 bindings functions. Thanks to Pierre-Matthieu + Anglade for bringing this problem to our attention. +- Fix for a memory leak in the derived datatype function + ompi_ddt_duplicate(). Thanks to Andreas Schäfer for reporting, + diagnosing, and patching the leak. +- Used better performing basic algorithm for MPI_ALLGATHERV. +- Added a workaround for a bug in the Intel 9.1 C++ compiler (all + versions up to and including 20060925) in the MPI C++ bindings that + caused run-time failures. Thanks to Scott Weitzenkamp for reporting + this problem. +- Fix MPI_SIZEOF implementation in the F90 bindings for COMPLEX + variable types. +- Fixes for persistent requests involving MPI_PROC_NULL. Thanks to + Lisandro Dalcín for reporting the problem. +- Fixes to ``MPI_TEST*`` and ``MPI_WAIT*`` for proper MPI exception reporting. + Thanks to Lisandro Dalcín for finding the issue. +- Various fixes for MPI generalized request handling; addition of + missing MPI::Grequest functionality to the C++ bindings. +- Add "mpi_preconnect_all" MCA parameter to force wireup of all MPI + connections during MPI_INIT (vs. making connections lazily whenever + the first MPI communication occurs between a pair of peers). +- Fix a problem for when $FC and/or $F77 were specified as multiple + tokens. Thanks to Orion Poplawski for identifying the problem and + to Ralf Wildenhues for suggesting the fix. +- Fix several ``MPI_*ERRHANDLER*`` functions and MPI_GROUP_TRANSLATE_RANKS + with respect to what arguments they allowed and the behavior that + they effected. Thanks to Lisandro Dalcín for reporting the + problems. + + +Open MPI version 1.1.2 +^^^^^^^^^^^^^^^^^^^^^^ +:Date: 18 Oct 2006 + +- Really fix Fortran status handling in MPI_WAITSOME and MPI_TESTSOME. +- Various datatype fixes, reported by several users as causing + failures in the BLACS testing suite. Thanks to Harald Forbert, Åke + Sandgren and, Michael Kluskens for reporting the problem. +- Correctness and performance fixes for heterogeneous environments. +- Fixed a error in command line parsing on some platforms (causing + mpirun to crash without doing anything). +- Fix for initialization hangs on 64 bit Mac OS X PowerPC systems. +- Fixed some memory allocation problems in mpirun that could cause + random problems if "-np" was not specified on the command line. +- Add Kerberos authentication support for XGrid. +- Added LoadLeveler support for jobs larger than 128 tasks. +- Fix for large-sized Fortran LOGICAL datatypes. +- Fix various error checking in MPI_INFO_GET_NTHKEY and + MPI_GROUP_TRANSLATE_RANKS, and some collective operations + (particularly with regards to MPI_IN_PLACE). Thanks to Lisandro + Dalcín for reporting the problems. +- Fix receiving messages to buffers allocated by MPI_ALLOC_MEM. +- Fix a number of race conditions with the MPI-2 Onesided + interface. +- Fix the "tuned" collective componenete where some cases where + MPI_BCAST could hang. +- Update TCP support to support non-uniform TCP environments. +- Allow the "poe" RAS component to be built on AIX or Linux. +- Only install mpif.h if the rest of the Fortran bindings are + installed. +- Fixes for BProc node selection. +- Add some missing Fortran MPI-2 IO constants. + + +Open MPI version 1.1.1 +^^^^^^^^^^^^^^^^^^^^^^ +:Date: 28 Aug 2006 + +- Fix for Fortran string handling in various MPI API functions. +- Fix for Fortran status handling in MPI_WAITSOME and MPI_TESTSOME. +- Various fixes for the XL compilers. +- Automatically disable using mallot() on AIX. +- Memory fixes for 64 bit platforms with registering MCA parameters in + the self and MX BTL components. +- Fixes for BProc to support oversubscription and changes to the + mapping algorithm so that mapping processes "by slot" works as + expected. +- Fixes for various abort cases to not hang and clean up nicely. +- If using the Intel 9.0 v20051201 compiler on an IA64 platform, the + ptmalloc2 memory manager component will automatically disable + itself. Other versions of the Intel compiler on this platform seem + to work fine (e.g., 9.1). +- Added "host" MPI_Info key to MPI_COMM_SPAWN and + MPI_COMM_SPAWN_MULTIPLE. +- Add missing C++ methods: MPI::Datatype::Create_indexed_block, + MPI::Datatype::Create_resized, MPI::Datatype::Get_true_extent. +- Fix OSX linker issue with Fortran bindings. +- Fixed MPI_COMM_SPAWN to start spawning new processes in slots that + (according to Open MPI) are not already in use. +- Added capability to "mpirun a.out" (without specifying -np) that + will run on all currently-allocated resources (e.g., within a batch + job such as SLURM, Torque, etc.). +- Fix a bug with one particular case of MPI_BCAST. Thanks to Doug + Gregor for identifying the problem. +- Ensure that the shared memory mapped file is only created when there + is more than one process on a node. +- Fixed problems with BProc stdin forwarding. +- Fixed problem with MPI_TYPE_INDEXED datatypes. Thanks to Yven + Fournier for identifying this problem. +- Fix some thread safety issues in MPI attributes and the openib BTL. +- Fix the BProc allocator to not potentially use the same resources + across multiple ORTE universes. +- Fix gm resource leak. +- More latency reduction throughout the code base. +- Make the TM PLS (PBS Pro, Torque, Open PBS) more scalable, and fix + some latent bugs that crept in v1.1. Thanks to the Thunderbird crew + at Sandia National Laboratories and Martin Schaffoner for access to + testing facilities to make this happen. +- Added new command line options to mpirun: + + - ``--nolocal``: Do not run any MPI processes on the same node as mpirun + (compatibility with the OSC mpiexec launcher) + - ``--nooversubscribe``: Abort if the number of processes requested would + cause oversubscription + - ``--quiet / -q``: do not show spurious status messages + - ``--version / -V``: show the version of Open MPI + +- Fix bus error in XGrid process starter. Thanks to Frank from the + Open MPI user's list for identifying the problem. +- Fix data size mismatches that caused memory errors on PPC64 + platforms during the startup of the openib BTL. +- Allow propagation of SIGUSR1 and SIGUSR2 signals from mpirun to + back-end MPI processes. +- Add missing MPI::Is_finalized() function. + + +Open MPI version 1.1.0 +^^^^^^^^^^^^^^^^^^^^^^ +:Date: 23 Jun 2006 + +- Various MPI datatype fixes, optimizations. +- Fixed various problems on the SPARC architecture (e.g., not + correctly aligning addresses within structs). +- Improvements in various run-time error messages to be more clear + about what they mean and where the errors are occurring. +- Various fixes to mpirun's handling of ``--prefix``. +- Updates and fixes for Cray/Red Storm support. +- Major improvements to the Fortran 90 MPI bindings: + + - General improvements in compile/linking time and portability + between different F90 compilers. + - Addition of "trivial", "small" (the default), and "medium" + Fortran 90 MPI module sizes (v1.0.x's F90 module was + equivalent to "medium"). See the README file for more + explanation. + - Fix various MPI F90 interface functions and constant types to + match. Thanks to Michael Kluskens for pointing out the problems + to us. + +- Allow short messagees to use RDMA (vs. send/receive semantics) to a + limited number peers in both the mvapi and openib BTL components. + This reduces communication latency over IB channels. +- Numerous performance improvements throughout the entire code base. +- Many minor threading fixes. +- Add a define OMPI_SKIP_CXX to allow the user to skip the mpicxx.h from + being included in mpi.h. It allows the user to compile C code with a CXX + compiler without including the CXX bindings. +- PERUSE support has been added. In order to activate it add + ``--enable-peruse`` to the configure options. All events described in + the PERUSE 2.0 draft are supported, plus one Open MPI + extension. PERUSE_COMM_REQ_XFER_CONTINUE allow to see how the data + is segmented internally, using multiple interfaces or the pipeline + engine. However, this version only support one event of each type + simultaneously attached to a communicator. +- Add support for running jobs in heterogeneous environments. + Currently supports environments with different endianness and + different representations of C++ bool and Fortran LOGICAL. + Mismatched sizes for other datatypes is not supported. +- Open MPI now includes an implementation of the MPI-2 One-Sided + Communications specification. +- Open MPI is now configurable in cross-compilation environments. + Several Fortran 77 and Fortran 90 tests need to be pre-seeded with + results from a config.cache-like file. +- Add ``--debug`` option to mpirun to generically invoke a parallel debugger. + + +Open MPI v1.0.x series +---------------------- + +Open MPI version 1.0.3 +^^^^^^^^^^^^^^^^^^^^^^ +:Date: Not released (all fixes included in 1.1.0) + +.. important:: + v1.0.3 was not released + +- Fix a problem noted by Chris Hennes where MPI_INFO_SET incorrectly + disallowed long values. +- Fix a problem in the launch system that could cause inconsistent + launch behavior, particularly when launching large jobs. +- Require that the openib BTL find . Thanks to Josh + Aune for the suggestion. +- Include updates to support the upcoming Autoconf 2.60 and Libtool + 2.0. Thanks to Ralf Wildenhues for all the work! +- Fix bug with infinite loop in the "round robin" process mapper. + Thanks to Paul Donohue for reporting the problem. +- Enusre that memory hooks are removed properly during MPI_FINALIZE. + Thanks to Neil Ludban for reporting the problem. +- Various fixes to the included support for ROMIO. +- Fix to ensure that MPI_LONG_LONG and MPI_LONG_LONG_INT are actually + synonyms, as defined by the MPI standard. Thanks to Martin Audet + for reporting this. +- Fix Fortran 90 configure tests to properly utilize LDFLAGS and LIBS. + Thanks to Terry Reeves for reporting the problem. +- Fix shared memory progression in asynchronous progress scenarios. + Thanks to Mykael Bouquey for reporting the problem. +- Fixed back-end operations for predefined MPI_PROD for some + datatypes. Thanks to Bert Wesarg for reporting this. +- Adapted configure to be able to handle Torque 2.1.0p0's (and above) + new library name. Thanks to Brock Palen for pointing this out and + providing access to a Torque 2.1.0p0 cluster to test with. +- Fixed situation where mpirun could set a shell pipeline's stdout + to non-blocking, causing the shell pipeline to prematurely fail. + Thanks to Darrell Kresge for figuring out what was happening. +- Fixed problems with leave_pinned that could cause Badness with the + mvapi BTL. +- Fixed problems with MPI_FILE_OPEN and non-blocking MPI-2 IO access. +- Fixed various InfiniBand port matching issues during startup. + Thanks to Scott Weitzenkamp for identifying these problems. +- Fixed various configure, build and run-time issues with ROMIO. + Thanks to Dries Kimpe for bringing them to our attention. +- Fixed error in MPI_COMM_SPLIT when dealing with intercommunicators. + Thanks to Bert Wesarg for identifying the problem. +- Fixed backwards handling of "high" parameter in MPI_INTERCOMM_MERGE. + Thanks to Michael Kluskens for pointing this out to us. +- Fixed improper handling of string arguments in Fortran bindings + for MPI-IO functionality +- Fixed segmentation fault with 64 bit applications on Solaris when + using the shared memory transports. +- Fixed MPI_COMM_SELF attributes to free properly at the beginning of + MPI_FINALIZE. Thanks to Martin Audet for bringing this to our + attention. +- Fixed alignment tests for cross-compiling to not cause errors with + recent versions of GCC. + + +Open MPI version 1.0.2 +^^^^^^^^^^^^^^^^^^^^^^ +:Date: 7 Apr 2006 + +- Fixed assembly race condition on AMD64 platforms. +- Fixed residual .TRUE. issue with copying MPI attributes set from + Fortran. +- Remove unnecessary logic from Solaris pty I/O forwarding. Thanks to + Francoise Roch for bringing this to our attention. +- Fixed error when count = 0 was given for multiple completion MPI + functions (MPI_TESTSOME, MPI_TESTANY, MPI_TESTALL, MPI_WAITSOME, + MPI_WAITANY, MPI_WAITALL). +- Better handling in MPI_ABORT for when peer processes have already + died, especially under some resource managers. +- Random updates to README file, to include notes about the Portland + compilers. +- Random, small threading fixes to prevent deadlock. +- Fixed a problem with handling long mpirun app files. Thanks to Ravi + Manumachu for identifying the problem. +- Fix handling of strings in several of the Fortran 77 bindings. +- Fix LinuxPPC assembly issues. Thanks to Julian Seward for reporting + the problem. +- Enable pty support for standard I/O forwarding on platforms that + have ptys but do not have openpty(). Thanks to Pierre Valiron for + bringing this to our attention. +- Disable inline assembly for PGI compilers to avoid compiler errors. + Thanks to Troy Telford for bringing this to our attention. +- Added MPI_UNSIGNED_CHAR and MPI_SIGNED_CHAR to the allowed reduction + types. +- Fix a segv in variable-length message displays on Opterons running + Solaris. Thanks to Pierre Valiron for reporting the issue. +- Added MPI_BOOL to the intrinsic reduction operations MPI_LAND, + MPI_LOR, MPI_LXOR. Thanks to Andy Selle for pointing this out to us. +- Fixed TCP BTL network matching logic during MPI_INIT; in some cases + on multi-NIC nodes, a NIC could get paired with a NIC on another + network (typically resulting in deadlock). Thanks to Ken Mighell + for pointing this out to us. +- Change the behavior of orterun (mpirun, mpirexec) to search for + argv[0] and the cwd on the target node (i.e., the node where the + executable will be running in all systems except BProc, where the + searches are run on the node where orterun is invoked). +- Fix race condition in shared memory transport that could cause + crashes on machines with weak memory consistency models (including + POWER/PowerPC machines). +- Fix warnings about setting read-only MCA parameters on bproc systems. +- Change the exit status set by mpirun when an application process is + killed by a signal. The exit status is now set to signo + 128, which + conforms with the behavior of (almost) all shells. +- Correct a datatype problem with the convertor when partially + unpacking data. Now we can position the convertor to any position + not only on the predefined types boundaries. Thanks to Yvan Fournier + for reporting this to us. +- Fix a number of standard I/O forwarding issues, including the + ability to background mpirun and a loss of data issue when + redirecting mpirun's standard input from a file. +- Fixed bug in ompi_info where rcache and bml MCA parameters would not + be displayed. +- Fixed umask issues in the session directory. Thanks to Glenn Morris + for reporting this to us. +- Fixed tcsh-based LD_LIBRARY_PATH issues with ``--prefix``. Thanks to + Glen Morris for identifying the problem and suggesting the fix. +- Removed extraneous \n's when setting PATH and LD_LIBRARY_PATH in the + rsh startup. Thanks to Glen Morris for finding these typos. +- Fixed missing constants in MPI C++ bindings. +- Fixed some errors caused by threading issues. +- Fixed openib BTL flow control logic to not overrun the number of + send wqes available. +- Update to match newest OpenIB user-level library API. Thanks to + Roland Dreier for submitting this patch. +- Report errors properly when failing to register memory in the openib + BTL. +- Reduce memory footprint of openib BTL. +- Fix parsing problem with mpirun's "-tv" switch. Thanks to Chris + Gottbrath for supplying the fix. +- Fix Darwin net/if.h configure warning. +- The GNU assembler unbelievably defaults to making stacks executable. + So when using gas, add flags to explicitly tell it to not make + stacks executable (lame but necessary). +- Add missing MPI::Request::Get_status() methods. Thanks to Bill + Saphir for pointing this out to us. +- Improved error messages on memory registration errors (e.g., when + using high-speed networks). +- Open IB support now checks firmware for how many outstanding RDMA + requests are supported. Thanks to Mellanox for pointing this out to + us. +- Enable printing of stack traces in MPI processes upon SIGBUS, + SIGSEGV, and SIGFPE if the platform supports it. +- Fixed F90 compilation support for the Lahey compiler. +- Fixed issues with ROMIO shared library support. +- Fixed internal accounting problems with rsh support. +- Update to GNU Libtool 1.5.22. +- Fix error in configure script when setting CCAS to ias (the Intel + assembler). +- Added missing MPI::Intercomm collectives. +- Fixed MPI_IN_PLACE handling for Fortran collectives. +- Fixed some more C++ const_cast<> issues. Thanks for Martin Audet + (again) for bringing this to our attention. +- Updated ROMIO with the version from MPICH 1.2.7p1, marked as version + 2005-06-09. +- Fixes for some cases where the use of MPI_BOTTOM could cause + problems. +- Properly handle the case where an mVAPI does not have shared receive + queue support (such as the one shipped by SilverStorm / Infinicon + for OS X). + + +Open MPI version 1.0.1 +^^^^^^^^^^^^^^^^^^^^^^ +:Date: 12 Dec 2005 + +- Fixed assembly on Solaris AMD platforms. Thanks to Pierre Valiron + for bringing this to our attention. +- Fixed long messages in the send-to-self case. +- Ensure that when the "leave_pinned" option is used, the memory hooks + are also enabled. Thanks to Gleb Natapov for pointing this out. +- Fixed compile errors for IRIX. +- Allow hostfiles to have integer host names (for BProc clusters). +- Fixed a problem with message matching of out-of-order fragments in + multiple network device scenarios. +- Converted all the C++ MPI bindings to use proper const_cast<>'s + instead of old C-style casts to get rid of const-ness. Thanks to + Martin Audet for raising the issue with us. +- Converted MPI_Offset to be a typedef instead of a #define because it + causes problems for some C++ parsers. Thanks to Martin Audet for + bringing this to our attention. +- Improved latency of TCP BTL. +- Fixed index value in MPI_TESTANY to be MPI_UNDEFINED if some + requests were not MPI_REQUEST_NULL, but no requests finished. +- Fixed several Fortran MPI API implementations that incorrectly used + integers instead of logicals or address-sized integers. +- Fix so that Open MPI correctly handles the Fortran value for .TRUE., + regardless of what the Fortran compiler's value for .TRUE. is. +- Improved scalability of MX startup. +- Fix datatype offset handling in the coll basic component's + MPI_SCATTERV implementation. +- Fix EOF handling on stdin. +- Fix missing MPI_F_STATUS_IGNORE and MPI_F_STATUSES_IGNORE + instanatiations. Thanks to Anthony Chan for pointing this out. +- Add a missing value for MPI_WIN_NULL in mpif.h. +- Bring over some fixes for the sm btl that somehow didn't make it + over from the trunk before v1.0. Thanks to Beth Tibbitts and Bill + Chung for helping identify this issue. +- Bring over some fixes for the iof that somehow didn't make it over + from the trunk before v1.0. +- Fix for ``--with-wrapper-ldflags`` handling. Thanks to Dries Kimpe for + pointing this out to us. + + +Open MPI version 1.0.0 +^^^^^^^^^^^^^^^^^^^^^^ +:Date: 17 Nov 2005 + +- Initial public release. diff --git a/docs/news/news-v2.x.rst b/docs/news/news-v2.x.rst new file mode 100644 index 00000000000..a534c3aa5e5 --- /dev/null +++ b/docs/news/news-v2.x.rst @@ -0,0 +1,836 @@ +Open MPI v2.x series +==================== + +This file contains all the NEWS updates for all the Open MPI v2.x +series, in reverse chronological order. + +Open MPI v2.1.x series +---------------------- + +Open MPI version 2.1.5 +^^^^^^^^^^^^^^^^^^^^^^ +:Date: August 2018 + +- A subtle race condition bug was discovered in the "vader" BTL + (shared memory communications) that, in rare instances, can cause + MPI processes to crash or incorrectly classify (or effectively drop) + an MPI message sent via shared memory. If you are using the "ob1" + PML with "vader" for shared memory communication (note that vader is + the default for shared memory communication with ob1), you need to + upgrade to v2.1.5 to fix this issue. You may also upgrade to the + following versions to fix this issue: + + - Open MPI v3.0.1 (released March, 2018) or later in the v3.0.x + series + - Open MPI v3.1.2 (expected end of August, 2018) or later + +- A link issue was fixed when the UCX library was not located in the + linker-default search paths. + +Open MPI version 2.1.4 +^^^^^^^^^^^^^^^^^^^^^^ +:Date: August, 2018 + +- Disable the POWER 7/BE block in configure. Note that POWER 7/BE is + still not a supported platform, but it is no longer automatically + disabled. See + https://github.com/open-mpi/ompi/issues/4349#issuecomment-374970982 + for more information. +- Fix bug with request-based one-sided MPI operations when using the + "rdma" component. +- Fix issue with large data structure in the TCP BTL causing problems + in some environments. Thanks to @lgarithm for reporting the issue. +- Minor Cygwin build fixes. +- Minor fixes for the openib BTL: + 1. Support for the QLogic RoCE HCA + 2. Support for the Boradcom Cumulus RoCE HCA + 3. Enable support for HDR link speeds +- Fix MPI_FINALIZED hang if invoked from an attribute destructor + during the MPI_COMM_SELF destruction in MPI_FINALIZE. Thanks to + @AndrewGaspar for reporting the issue. +- Java fixes: + + - Modernize Java framework detection, especially on OS X/MacOS. + Thanks to Bryce Glover for reporting and submitting the fixes. + - Prefer "javac -h" to "javah" to support newer Java frameworks. + +- Fortran fixes: + + - Use conformant dummy parameter names for Fortran bindings. Thanks + to Themos Tsikas for reporting and submitting the fixes. + - Build the MPI_SIZEOF() interfaces in the "TKR"-style "mpi" module + whenever possible. Thanks to Themos Tsikas for reporting the + issue. + - Fix array of argv handling for the Fortran bindings of + MPI_COMM_SPAWN_MULTIPLE (and its associated man page). + - Make NAG Fortran compiler support more robust in configure. + +- Disable the "pt2pt" one-sided MPI component when MPI_THREAD_MULTIPLE + is used. This component is simply not safe in MPI_THREAD_MULTIPLE + scenarios, and will not be fixed in the v2.1.x series. +- Make the "external" hwloc component fail gracefully if it is tries + to use an hwloc v2.x.y installation. hwloc v2.x.y will not be + supported in the Open MPI v2.1.x series. +- Fix "vader" shared memory support for messages larger than 2GB. + Thanks to Heiko Bauke for the bug report. +- Configure fixes for external PMI directory detection. Thanks to + Davide Vanzo for the report. + + +Open MPI version 2.1.3 +^^^^^^^^^^^^^^^^^^^^^^ +:Date: March, 2018 + +- Update internal PMIx version to 1.2.5. +- Fix a problem with ompi_info reporting using param option. + Thanks to Alexander Pozdneev for reporting. +- Correct PMPI_Aint_{add|diff} to be functions (not subroutines) + in the Fortran mpi_f08 module. +- Fix a problem when doing MPI I/O using data types with large + extents in conjunction with MPI_TYPE_CREATE_SUBARRAY. Thanks to + Christopher Brady for reporting. +- Fix a problem when opening many files using MPI_FILE_OPEN. + Thanks to William Dawson for reporting. +- Fix a problem with debuggers failing to attach to a running job. + Thanks to Dirk Schubert for reporting. +- Fix a problem when using madvise and the OpenIB BTL. Thanks to + Timo Bingmann for reporting. +- Fix a problem in the Vader BTL that resulted in failures of + IMB under certain circumstances. Thanks to Nicolas Morey- + Chaisemartin for reporting. +- Fix a problem preventing Open MPI from working under Cygwin. + Thanks to Marco Atzeri for reporting. +- Reduce some verbosity being emitted by the USNIC BTL under certain + circumstances. Thanks to Peter Forai for reporting. +- Fix a problem with misdirection of SIGKILL. Thanks to Michael Fern + for reporting. +- Replace use of posix_memalign with malloc for small allocations. Thanks + to Ben Menaude for reporting. +- Fix a problem with Open MPI's out of band TCP network for file descriptors + greater than 32767. Thanks to Wojtek Wasko for reporting and fixing. +- Plug a memory leak in MPI_Mem_free(). Thanks to Philip Blakely for reporting. + + +Open MPI version 2.1.2 +^^^^^^^^^^^^^^^^^^^^^^ +:Date: September, 2017 + +- Update internal PMIx version to 1.2.3. +- Fix some problems when using the NAG Fortran compiler to build Open MPI + and when using the compiler wrappers. Thanks to Neil Carlson for reporting. +- Fix a compilation problem with the SM BTL. Thanks to Paul Hargrove for + reporting. +- Fix a problem with MPI_IALLTOALLW when using zero-length messages. + Thanks to Dahai Guo for reporting. +- Fix a problem with C11 generic type interface for SHMEM_G. Thanks + to Nick Park for reporting. +- Switch to using the lustreapi.h include file when building Open MPI + with Lustre support. +- Fix a problem in the OB1 PML that led to hangs with OSU collective tests. +- Fix a progression issue with MPI_WIN_FLUSH_LOCAL. Thanks to + Joseph Schuchart for reporting. +- Fix an issue with recent versions of PBSPro requiring libcrypto. + Thanks to Petr Hanousek for reporting. +- Fix a problem when using MPI_ANY_SOURCE with MPI_SENDRECV. +- Fix an issue that prevented signals from being propagated to ORTE + daemons. +- Ensure that signals are forwarded from ORTE daemons to all processes + in the process group created by the daemons. Thanks to Ted Sussman + for reporting. +- Fix a problem with launching a job under a debugger. Thanks to + Greg Lee for reporting. +- Fix a problem with Open MPI native I/O MPI_FILE_OPEN when using + a communicator having an associated topology. Thanks to + Wei-keng Liao for reporting. +- Fix an issue when using MPI_ACCUMULATE with derived datatypes. +- Fix a problem with Fortran bindings that led to compilation errors + for user defined reduction operations. Thanks to Nathan Weeks for + reporting. +- Fix ROMIO issues with large writes/reads when using NFS file systems. +- Fix definition of Fortran MPI_ARGV_NULL and MPI_ARGVS_NULL. +- Enable use of the head node of a SLURM allocation on Cray XC systems. +- Fix a problem with synchronous sends when using the UCX PML. +- Use default socket buffer size to improve TCP BTL performance. +- Add a mca parameter ras_base_launch_orted_on_hn to allow for launching + MPI processes on the same node where mpirun is executing using a separate + orte daemon, rather than the mpirun process. This may be useful to set to + true when using SLURM, as it improves interoperability with SLURM's signal + propagation tools. By default it is set to false, except for Cray XC systems. +- Fix ``--without-lsf`` when lsf is installed in the default search path. +- Remove support for big endian PowerPC. +- Remove support for XL compilers older than v13.1 +- Remove IB XRC support from the OpenIB BTL due to loss of maintainer. + + +Open MPI version 2.1.1 +^^^^^^^^^^^^^^^^^^^^^^ +:Date: April, 2017 + +- Fix a problem with one of Open MPI's fifo data structures which led to + hangs in a make check test. Thanks to Nicolas Morey-Chaisemartin for + reporting. +- Add missing MPI_AINT_ADD/MPI_AINT_DIFF function definitions to mpif.h. + Thanks to Aboorva Devarajan for reporting. +- Fix the error return from MPI_WIN_LOCK when rank argument is invalid. + Thanks to Jeff Hammond for reporting and fixing this issue. +- Fix a problem with mpirun/orterun when started under a debugger. Thanks + to Gregory Leff for reporting. +- Add configury option to disable use of CMA by the vader BTL. Thanks + to Sascha Hunold for reporting. +- Add configury check for MPI_DOUBLE_COMPLEX datatype support. + Thanks to Alexander Klein for reporting. +- Fix memory allocated by MPI_WIN_ALLOCATE_SHARED to + be 64 bit aligned. Thanks to Joseph Schuchart for + reporting. +- Update MPI_WTICK man page to reflect possibly higher + resolution than 10e-6. Thanks to Mark Dixon for + reporting +- Add missing MPI_T_PVAR_SESSION_NULL definition to mpi.h + include file. Thanks to Omri Mor for this contribution. +- Enhance the Open MPI spec file to install modulefile in ``/opt`` + if installed in a non-default location. Thanks to Kevin + Buckley for reporting and supplying a fix. +- Fix a problem with conflicting PMI symbols when linking statically. + Thanks to Kilian Cavalotti for reporting. + +.. note:: Known issues (to be addressed in v2.1.2): + + - See the list of fixes slated for v2.1.2 here: + https://github.com/open-mpi/ompi/milestone/28?closed=1 + + +Open MPI version 2.1.0 +^^^^^^^^^^^^^^^^^^^^^^ +:Date: March, 2017 + +.. important:: Major new features: + + - The main focus of the Open MPI v2.1.0 release was to update to PMIx + v1.2.1. When using PMIx (e.g., via mpirun-based launches, or via + direct launches with recent versions of popular resource managers), + launch time scalability is improved, and the run time memory + footprint is greatly decreased when launching large numbers of MPI / + OpenSHMEM processes. + - Update OpenSHMEM API conformance to v1.3. + - The usnic BTL now supports MPI_THREAD_MULTIPLE. + - General/overall performance improvements to MPI_THREAD_MULTIPLE. + - Add a summary message at the bottom of configure that tells you many + of the configuration options specified and/or discovered by Open MPI. + +.. attention:: Removed legacy support: + + - The ptmalloc2 hooks have been removed from the Open MPI code base. + This is not really a user-noticable change; it is only mentioned + here because there was much rejoycing in the Open MPI developer + community. + +- New MCA parameters: + + - **iof_base_redirect_app_stderr_to_stdout**: as its name implies, it + combines MPI / OpenSHMEM applications' stderr into its stdout + stream. + - **opal_event_include**: allow the user to specify which FD selection + mechanism is used by the underlying event engine. + - opal_stacktrace_output: indicate where stacktraces should be sent + upon MPI / OpenSHMEM process crashes (``none``, ``stdout``, + ``stderr``, ``file:filename``). + - **orte_timeout_for_stack_trace**: number of seconds to wait for stack + traces to be reported (or ``<=0`` to wait forever). + - **mtl_ofi_control_prog_type**/**mtl_ofi_data_prog_type**: specify + libfabric progress model to be used for control and data. + +- Fix MPI_WTICK regression where the time reported may be inaccurate + on systems with processor frequency scalaing enabled. +- Fix regression that lowered the memory maximum message bandwidth for + large messages on some BTL network transports, such as openib, sm, + and vader. +- Fix a name collision in the shared file pointer MPI IO file locking + scheme. Thanks to Nicolas Joly for reporting the issue. +- Fix datatype extent/offset errors in MPI_PUT and MPI_RACCUMULATE + when using the Portals 4 one-sided component. +- Add support for non-contiguous datatypes to the Portals 4 one-sided + component. +- Various updates for the UCX PML. +- Updates to the following man pages: + + - mpirun(1) + - MPI_COMM_CONNECT(3) + - MPI_WIN_GET_NAME(3). Thanks to Nicolas Joly for reporting the + typo. + - MPI_INFO_GET_[NKEYS|NTHKEY](3). Thanks to Nicolas Joly for + reporting the typo. + +- Fixed a problem in the TCP BTL when using MPI_THREAD_MULTIPLE. + Thanks to Evgueni Petrov for reporting. +- Fixed external32 representation in the romio314 module. Note that + for now, external32 representation is not correctly supported by the + ompio module. Thanks to Thomas Gastine for bringing this to our + attention. +- Add note how to disable a warning message about when a high-speed + MPI transport is not found. Thanks to Susan Schwarz for reporting + the issue. +- Ensure that sending SIGINT when using the rsh/ssh launcher does not + orphan children nodes in the launch tree. +- Fix the help message when showing deprecated MCA param names to show + the correct (i.e., deprecated) name. +- Enable support for the openib BTL to use multiple different + InfiniBand subnets. +- Fix a minor error in MPI_AINT_DIFF. +- Fix bugs with MPI_IN_PLACE handling in: + + - MPI_ALLGATHER[V] + - MPI_[I][GATHER|SCATTER][V] + - MPI_IREDUCE[_SCATTER] + - Thanks to all the users who helped diagnose these issues. + +- Allow qrsh to tree spawn (if the back-end system supports it). +- Fix MPI_T_PVAR_GET_INDEX to return the correct index. +- Correctly position the shared file pointer in append mode in the + OMPIO component. +- Add some deprecated names into shmem.h for backwards compatibility + with legacy codes. +- Fix MPI_MODE_NOCHECK support. +- Fix a regression in PowerPC atomics support. Thanks to Orion + Poplawski for reporting the issue. +- Fixes for assembly code with aggressively-optimized compilers on + x86_64/AMD64 platforms. +- Fix one more place where configure was mangling custom CFLAGS. + Thanks to Phil Tooley (@Telemin) for reporting the issue. +- Better handle builds with external installations of hwloc. +- Fixed a hang with MPI_PUT and MPI_WIN_LOCK_ALL. +- Fixed a bug when using MPI_GET on non-contiguous datatypes and + MPI_LOCK/MPI_UNLOCK. +- Fixed a bug when using POST/START/COMPLETE/WAIT after a fence. +- Fix configure portability by cleaning up a few uses of "==" with + "test". Thanks to Kevin Buckley for pointing out the issue. +- Fix bug when using darrays with lib and extent of darray datatypes. +- Updates to make Open MPI binary builds more bit-for-bit + reproducable. Thanks to Alastair McKinstry for the suggestion. +- Fix issues regarding persistent request handling. +- Ensure that shmemx.h is a standalone OpenSHMEM header file. Thanks + to Nick Park (@nspark) for the report. +- Ensure that we always send SIGTERM prior to SIGKILL. Thanks to Noel + Rycroft for the report. +- Added ConnectX-5 and Chelsio T6 device defaults for the openib BTL. +- OpenSHMEM no longer supports MXM less than v2.0. +- Plug a memory leak in ompi_osc_sm_free. Thanks to Joseph Schuchart + for the report. +- The "self" BTL now uses less memory. +- The vader BTL is now more efficient in terms of memory usage when + using XPMEM. +- Removed the ``--enable-openib-failover`` configure option. This is not + considered backwards-incompatible because this option was stale and + had long-since stopped working, anyway. +- Allow jobs launched under Cray aprun to use hyperthreads if + opal_hwloc_base_hwthreads_as_cpus MCA parameter is set. +- Add support for 32-bit and floating point Cray Aries atomic + operations. +- Add support for network AMOs for MPI_ACCUMULATE, MPI_FETCH_AND_OP, + and MPI_COMPARE_AND_SWAP if the "ompi_single_intrinsic" info key is + set on the window or the "acc_single_intrinsic" MCA param is set. +- Automatically disqualify RDMA CM support in the openib BTL if + MPI_THREAD_MULTIPLE is used. +- Make configure smarter/better about auto-detecting Linux CMA + support. +- Improve the scalability of MPI_COMM_SPLIT_TYPE. +- Fix the mixing of C99 and C++ header files with the MPI C++ + bindings. Thanks to Alastair McKinstry for the bug report. +- Add support for ARM v8. +- Several MCA parameters now directly support MPI_T enumerator + semantics (i.e., they accept a limited set of values -- e.g., MCA + parameters that accept boolean values). +- Added ``--with-libmpi-name=`` configure option for vendor + releases of Open MPI. See the README for more detail. +- Fix a problem with Open MPI's internal memory checker. Thanks to Yvan + Fournier for reporting. +- Fix a multi-threaded issue with MPI_WAIT. Thanks to Pascal Deveze for + reporting. + +.. note:: Known issues (to be addressed in v2.1.1): + + - See the list of fixes slated for v2.1.1 here: + https://github.com/open-mpi/ompi/milestone/26?closed=1 + + +Open MPI v2.0.x series +---------------------- + +Open MPI version 2.0.4 +^^^^^^^^^^^^^^^^^^^^^^ +:Date: November, 2017 + +- Fix an issue with visibility of functions defined in the built-in PMIx. + Thanks to Siegmar Gross for reporting this issue. +- Add configure check to prevent trying to build this release of + Open MPI with an external hwloc 2.0 or newer release. +- Add ability to specify layered providers for OFI MTL. +- Fix a correctness issue with Open MPI's memory manager code + that could result in corrupted message data. Thanks to + Valentin Petrov for reporting. +- Fix issues encountered when using newer versions of PBS Pro. + Thanks to Petr Hanousek for reporting. +- Fix a problem with MPI_GET when using the vader BTL. Thanks + to Dahai Guo for reporting. +- Fix a problem when using MPI_ANY_SOURCE with MPI_SENDRECV_REPLACE. + Thanks to Dahai Guo for reporting. +- Fix a problem using MPI_FILE_OPEN with a communicator with an + attached cartesian topology. Thanks to Wei-keng Liao for reporting. +- Remove IB XRC support from the OpenIB BTL due to lack of support. +- Remove support for big endian PowerPC. +- Remove support for XL compilers older than v13.1 + + +Open MPI version 2.0.3 +^^^^^^^^^^^^^^^^^^^^^^ +:Date: June 2017 + +- Fix a problem with MPI_IALLTOALLW when zero size messages are present. + Thanks to @mathbird for reporting. +- Add missing MPI_USER_FUNCTION definition to the mpi_f08 module. + Thanks to Nathan Weeks for reporting this issue. +- Fix a problem with MPI_WIN_LOCK not returning an error code when + a negative rank is supplied. Thanks to Jeff Hammond for reporting and + providing a fix. +- Fix a problem with make check that could lead to hangs. Thanks to + Nicolas Morey-Chaisemartin for reporting. +- Resolve a symbol conflict problem with PMI-1 and PMI-2 PMIx components. + Thanks to Kilian Cavalotti for reporting this issue. +- Insure that memory allocations returned from MPI_WIN_ALLOCATE_SHARED are + 64 byte aligned. Thanks to Joseph Schuchart for reporting this issue. +- Make use of DOUBLE_COMPLEX, if available, for Fortran bindings. Thanks + to Alexander Klein for reporting this issue. +- Add missing MPI_T_PVAR_SESSION_NULL definition to Open MPI mpi.h include + file. Thanks to Omri Mor for reporting and fixing. +- Fix a problem with use of MPI shared file pointers when accessing + a file from independent jobs. Thanks to Nicolas Joly for reporting + this issue. +- Optimize zero size MPI_IALLTOALL{V,W} with MPI_IN_PLACE. Thanks to + Lisandro Dalcín for the report. +- Fix a ROMIO buffer overflow problem for large transfers when using NFS + filesystems. +- Fix type of MPI_ARGV[S]_NULL which prevented it from being used + properly with MPI_COMM_SPAWN[_MULTIPLE] in the mpi_f08 module. +- Ensure to add proper linker flags to the wrapper compilers for + dynamic libraries on platforms that need it (e.g., RHEL 7.3 and + later). +- Get better performance on TCP-based networks 10Gbps and higher by + using OS defaults for buffer sizing. +- Fix a bug with ``MPI_[R][GET_]ACCUMULATE`` when using DARRAY datatypes. +- Fix handling of ``--with-lustre`` configure command line argument. + Thanks to Prentice Bisbal and Tim Mattox for reporting the issue. +- Added MPI_AINT_ADD and MPI_AINT_DIFF declarations to mpif.h. Thanks + to Aboorva Devarajan (@AboorvaDevarajan) for the bug report. +- Fix a problem in the TCP BTL when Open MPI is initialized with + MPI_THREAD_MULTIPLE support. Thanks to Evgueni Petro for analyzing and + reporting this issue. +- Fix yalla PML to properly handle underflow errors, and fixed a + memory leak with blocking non-contiguous sends. +- Restored ability to run autogen.pl on official distribution tarballs + (although this is still not recommended for most users!). +- Fix accuracy problems with MPI_WTIME on some systems by always using + either clock_gettime(3) or gettimeofday(3). +- Fix a problem where MPI_WTICK was not returning a higher time resolution + when available. Thanks to Mark Dixon for reporting this issue. +- Restore SGE functionality. Thanks to Kevin Buckley for the initial + report. +- Fix external hwloc compilation issues, and extend support to allow + using external hwloc installations as far back as v1.5.0. Thanks to + Orion Poplawski for raising the issue. +- Added latest Mellanox Connect-X and Chelsio T-6 adapter part IDs to + the openib list of default values. +- Do a better job of cleaning up session directories (e.g., in ``/tmp``). +- Update a help message to indicate how to suppress a warning about + no high performance networks being detected by Open MPI. Thanks to + Susan Schwarz for reporting this issue. +- Fix a problem with mangling of custom CFLAGS when configuring Open MPI. + Thanks to Phil Tooley for reporting. +- Fix some minor memory leaks and remove some unused variables. + Thanks to Joshua Gerrard for reporting. +- Fix MPI_ALLGATHERV bug with MPI_IN_PLACE. + +.. note:: Known issues (to be addressed in v2.0.4): + + - See the list of fixes slated for v2.0.4 here: + https://github.com/open-mpi/ompi/milestone/29?closed=1 + + +Open MPI version 2.0.2 +^^^^^^^^^^^^^^^^^^^^^^ +:Date: 26 Jan 2017 + +- Fix a problem with MPI_FILE_WRITE_SHARED when using MPI_MODE_APPEND and + Open MPI's native MPI-IO implementation. Thanks to Nicolas Joly for + reporting. +- Fix a typo in the MPI_WIN_GET_NAME man page. Thanks to Nicolas Joly + for reporting. +- Fix a race condition with ORTE's session directory setup. Thanks to + @tbj900 for reporting this issue. +- Fix a deadlock issue arising from Open MPI's approach to catching calls to + munmap. Thanks to Paul Hargrove for reporting and helping to analyze this + problem. +- Fix a problem with PPC atomics which caused make check to fail unless builtin + atomics configure option was enabled. Thanks to Orion Poplawski for reporting. +- Fix a problem with use of x86_64 cpuid instruction which led to segmentation + faults when Open MPI was configured with -O3 optimization. Thanks to Mark + Santcroos for reporting this problem. +- Fix a problem when using built in atomics configure options on PPC platforms + when building 32 bit applications. Thanks to Paul Hargrove for reporting. +- Fix a problem with building Open MPI against an external hwloc installation. + Thanks to Orion Poplawski for reporting this issue. +- Remove use of DATE in the message queue version string reported to debuggers to + insure bit-wise reproducibility of binaries. Thanks to Alastair McKinstry + for help in fixing this problem. +- Fix a problem with early exit of a MPI process without calling MPI_FINALIZE + or MPI_ABORT that could lead to job hangs. Thanks to Christof Koehler for + reporting. +- Fix a problem with forwarding of SIGTERM signal from mpirun to MPI processes + in a job. Thanks to Noel Rycroft for reporting this problem +- Plug some memory leaks in MPI_WIN_FREE discovered using Valgrind. Thanks + to Joseph Schuchart for reporting. +- Fix a problems MPI_NEIGHOR_ALLTOALL when using a communicator with an empty topology + graph. Thanks to Daniel Ibanez for reporting. +- Fix a typo in a PMIx component help file. Thanks to @njoly for reporting this. +- Fix a problem with Valgrind false positives when using Open MPI's internal memchecker. + Thanks to Yvan Fournier for reporting. +- Fix a problem with MPI_FILE_DELETE returning MPI_SUCCESS when + deleting a non-existent file. Thanks to Wei-keng Liao for reporting. +- Fix a problem with MPI_IMPROBE that could lead to hangs in subsequent MPI + point to point or collective calls. Thanks to Chris Pattison for reporting. +- Fix a problem when configure Open MPI for powerpc with ``--enable-mpi-cxx`` + enabled. Thanks to Alastair McKinstry for reporting. +- Fix a problem using MPI_IALLTOALL with MPI_IN_PLACE argument. Thanks to + Chris Ward for reporting. +- Fix a problem using MPI_RACCUMULATE with the Portals4 transport. Thanks to + @PDeveze for reporting. +- Fix an issue with static linking and duplicate symbols arising from PMIx + Slurm components. Thanks to Limin Gu for reporting. +- Fix a problem when using MPI dynamic memory windows. Thanks to + Christoph Niethammer for reporting. +- Fix a problem with Open MPI's pkgconfig files. Thanks to Alastair McKinstry + for reporting. +- Fix a problem with MPI_IREDUCE when the same buffer is supplied for the + send and recv buffer arguments. Thanks to Valentin Petrov for reporting. +- Fix a problem with atomic operations on PowerPC. Thanks to Paul + Hargrove for reporting. + +.. note:: Known issues (to be addressed in v2.0.3): + + - See the list of fixes slated for v2.0.3 here: + https://github.com/open-mpi/ompi/milestone/23?closed=1 + + +Open MPI version 2.0.1 +^^^^^^^^^^^^^^^^^^^^^^ +:Date: 2 Sep 2016 + +- Short message latency and message rate performance improvements for + all transports. +- Fix shared memory performance when using RDMA-capable networks. + Thanks to Tetsuya Mishima and Christoph Niethammer for reporting. +- Fix bandwith performance degredation in the yalla (MXM) PML. Thanks + to Andreas Kempf for reporting the issue. +- Fix OpenSHMEM crash when running on non-Mellanox MXM-based networks. + Thanks to Debendra Das for reporting the issue. +- Fix a crash occuring after repeated calls to MPI_FILE_SET_VIEW with + predefined datatypes. Thanks to Eric Chamberland and Matthew + Knepley for reporting and helping chase down this issue. +- Fix stdin propagation to MPI processes. Thanks to Jingchao Zhang + for reporting the issue. +- Fix various runtime and portability issues by updating the PMIx + internal component to v1.1.5. +- Fix process startup failures on Intel MIC platforms due to very + large entries in ``/proc/mounts``. +- Fix a problem with use of relative path for specifing executables to + mpirun / oshrun. Thanks to David Schneider for reporting. +- Various improvements when running over portals-based networks. +- Fix thread-based race conditions with GNI-based networks. +- Fix a problem with MPI_FILE_CLOSE and MPI_FILE_SET_SIZE. Thanks + to Cihan Altinay for reporting. +- Remove all use of rand(3) from within Open MPI so as not to perturb + applications use of it. Thanks to Matias Cabral and Noel Rycroft + for reporting. +- Fix crash in MPI_COMM_SPAWN. +- Fix types for MPI_UNWEIGHTED and MPI_WEIGHTS_EMPTY. Thanks to + Lisandro Dalcín for reporting. +- Correctly report the name of MPI_INTEGER16. +- Add some missing MPI constants to the Fortran bindings. +- Fixed compile error when configuring Open MPI with ``--enable-timing``. +- Correctly set the shared library version of libompitrace.so. Thanks + to Alastair McKinstry for reporting. +- Fix errors in the MPI_RPUT, MPI_RGET, MPI_RACCUMULATE, and + MPI_RGET_ACCUMULATE Fortran bindings. Thanks to Alfio Lazzaro and + Joost VandeVondele for tracking this down. +- Fix problems with use of derived datatypes in non-blocking + collectives. Thanks to Yuki Matsumoto for reporting. +- Fix problems with OpenSHMEM header files when using CMake. Thanks to + Paul Kapinos for reporting the issue. +- Fix problem with use use of non-zero lower bound datatypes in + collectives. Thanks to Hristo Iliev for reporting. +- Fix a problem with memory allocation within MPI_GROUP_INTERSECTION. + Thanks to Lisandro Dalcín for reporting. +- Fix an issue with MPI_ALLGATHER for communicators that don't consist + of two ranks. Thanks to David Love for reporting. +- Various fixes for collectives when used with esoteric MPI datatypes. +- Fixed corner cases of handling DARRAY and HINDEXED_BLOCK datatypes. +- Fix a problem with filesystem type check for OpenBSD. + Thanks to Paul Hargrove for reporting. +- Fix some debug input within Open MPI internal functions. Thanks to + Durga Choudhury for reporting. +- Fix a typo in a configury help message. Thanks to Paul Hargrove for + reporting. +- Correctly support MPI_IN_PLACE in MPI_[I]ALLTOALL[V|W] and + MPI_[I]EXSCAN. +- Fix alignment issues on SPARC platforms. + +.. note:: Known issues (to be addressed in v2.0.2): + + - See the list of fixes slated for v2.0.2 here: + https://github.com/open-mpi/ompi/milestone/20?closed=1, and + https://github.com/open-mpi/ompi-release/milestone/19?closed=1 + (note that the "ompi-release" Github repo will be folded/absorbed + into the "ompi" Github repo at some point in the future) + + +Open MPI version 2.0.0 +^^^^^^^^^^^^^^^^^^^^^^ +:Date: 12 Jul 2016 + +.. attention:: Open MPI is now fully MPI-3.1 compliant + +.. important:: Major new features: + + - Many enhancements to MPI RMA. Open MPI now maps MPI RMA operations + on to native RMA operations for those networks which support this + capability. + - Greatly improved support for MPI_THREAD_MULTIPLE (when configured + with ``--enable-mpi-thread-multiple``). + - Enhancements to reduce the memory footprint for jobs at scale. A + new MCA parameter, "mpi_add_procs_cutoff", is available to set the + threshold for using this feature. + - Completely revamped support for memory registration hooks when using + OS-bypass network transports. + - Significant OMPIO performance improvements and many bug fixes. + - Add support for PMIx - Process Management Interface for Exascale. + Version 1.1.2 of PMIx is included internally in this release. + - Add support for PLFS file systems in Open MPI I/O. + - Add support for UCX transport. + - Simplify build process for Cray XC systems. Add support for + using native SLURM. + - Add a ``--tune`` mpirun command line option to simplify setting many + environment variables and MCA parameters. + - Add a new MCA parameter "orte_default_dash_host" to offer an analogue + to the existing "orte_default_hostfile" MCA parameter. + - Add the ability to specify the number of desired slots in the mpirun + ``--host`` option. + +.. note:: Known issues (to be addressed in v2.0.1): + + - See the list of fixes slated for v2.0.1 here: + https://github.com/open-mpi/ompi/milestone/16?closed=1, and + https://github.com/open-mpi/ompi-release/milestone/16?closed=1 + (note that the "ompi-release" Github repo will be folded/absorbed + into the "ompi" Github repo at some point in the future) + + - ompi-release#986: Fix data size counter for large ops with fcoll/static + - ompi-release#987: Fix OMPIO performance on Lustre + - ompi-release#1013: Fix potential inconsistency in btl/openib default settings + - ompi-release#1014: Do not return MPI_ERR_PENDING from collectives + - ompi-release#1056: Remove dead profile code from oshmem + - ompi-release#1081: Fix MPI_IN_PLACE checking for IALLTOALL{V|W} + - ompi-release#1081: Fix memchecker in MPI_IALLTOALLW + - ompi-release#1081: Support MPI_IN_PLACE in MPI_(I)ALLTOALLW and MPI_(I)EXSCAN + - ompi-release#1107: Allow future PMIx support for RM spawn limits + - ompi-release#1108: Fix sparse group process reference counting + - ompi-release#1109: If specified to be oversubcribed, disable binding + - ompi-release#1122: Allow NULL arrays for empty datatypes + - ompi-release#1123: Fix signed vs. unsigned compiler warnings + - ompi-release#1123: Make max hostname length uniform across code base + - ompi-release#1127: Fix MPI_Compare_and_swap + - ompi-release#1127: Fix MPI_Win_lock when used with MPI_Win_fence + - ompi-release#1132: Fix typo in help message for ``--enable-mca-no-build`` + - ompi-release#1154: Ensure pairwise coll algorithms disqualify themselves properly + - ompi-release#1165: Fix typos in debugging/verbose message output + - ompi-release#1178: Fix ROMIO filesystem check on OpenBSD 5.7 + - ompi-release#1197: Fix Fortran pthread configure check + - ompi-release#1205: Allow using external PMIx 1.1.4 and 2.0 + - ompi-release#1215: Fix configure to support the NAG Fortran compiler + - ompi-release#1220: Fix combiner args for MPI_HINDEXED_BLOCK + - ompi-release#1225: Fix combiner args for MPI_DARRAY + - ompi-release#1226: Disable old memory hooks with recent gcc versions + - ompi-release#1231: Fix new "patcher" support for some XLC platforms + - ompi-release#1244: Fix Java error handling + - ompi-release#1250: Ensure TCP is not selected for RDMA operations + - ompi-release#1252: Fix verbose output in coll selection + - ompi-release#1253: Set a default name for user-defined MPI_Op + - ompi-release#1254: Add count==0 checks in some non-blocking colls + - ompi-release#1258: Fix ``make distclean`` when using external pmix/hwloc/libevent + - ompi-release#1260: Clean up/uniform mca/coll/base memory management + - ompi-release#1261: Remove "patcher" warning message for static builds + - ompi-release#1263: Fix IO MPI_Request for 0-size read/write + - ompi-release#1264: Add blocking fence for SLURM operations + +- In environments where mpirun cannot automatically determine the + number of slots available (e.g., when using a hostfile that does not + specify "slots", or when using ``--host`` without specifying a ":N" + suffix to hostnames), mpirun now requires the use of "-np N" to + specify how many MPI processes to launch. +- The MPI C++ bindings (which were removed from the MPI standard in + v3.0) are no longer built by default and will be removed in some + future version of Open MPI. Use the ``--enable-mpi-cxx-bindings`` + configure option to build the deprecated/removed MPI C++ bindings. +- ompi_info now shows all components, even if they do not have MCA + parameters. The prettyprint output now separates groups with a + dashed line. +- OMPIO is now the default implementation of parallel I/O, with the + exception for Lustre parallel filesystems (where ROMIO is still the + default). The default selection of OMPI vs. ROMIO can be controlled + via the "--mca io ompi|romio" command line switch to mpirun. +- Per Open MPI's versioning scheme (see the README), increasing the + major version number to 2 indicates that this version is not + ABI-compatible with prior versions of Open MPI. You will need to + recompile MPI and OpenSHMEM applications to work with this version + of Open MPI. +- Removed checkpoint/restart code due to loss of maintainer. :-( +- Change the behavior for handling certain signals when using PSM and + PSM2 libraries. Previously, the PSM and PSM2 libraries would trap + certain signals in order to generate tracebacks. The mechanism was + found to cause issues with Open MPI's own error reporting mechanism. + If not already set, Open MPI now sets the IPATH_NO_BACKTRACE and + HFI_NO_BACKTRACE environment variables to disable PSM/PSM2's + handling these signals. + +.. attention:: Removed legacy support: + + - Removed support for OS X Leopard. + - Removed support for Cray XT systems. + - Removed VampirTrace. + - Removed support for Myrinet/MX. + - Removed legacy collective module:ML. + - Removed support for Alpha processors. + - Removed ``--enable-mpi-profiling`` configure option. + +- Updated internal/embedded copies of third-party software: + + - Update the internal copy of ROMIO to that which shipped in MPICH + 3.1.4. + - Update internal copy of libevent to v2.0.22. + - Update internal copy of hwloc to v1.11.2. + +- Notable new MCA parameters: + + - opal_progress_lp_call_ration: Control how often low-priority + callbacks are made during Open MPI's main progress loop. + - opal_common_verbs_want_fork_support: This replaces the + btl_openib_want_fork_support parameter. + +- Add ``--with-platform-patches-dir`` configure option. +- Add ``--with-pmi-libdir`` configure option for environments that install + PMI libs in a non-default location. +- Various configure-related compatibility updates for newer versions + of libibverbs and OFED. +- Numerous fixes/improvements to orte-dvm. Special thanks to Mark + Santcroos for his help. +- Fix a problem with timer code on ia32 platforms. Thanks to + Paul Hargrove for reporting this and providing a patch. +- Fix a problem with use of a 64 bit atomic counter. Thanks to + Paul Hargrove for reporting. +- Fix a problem with singleton job launching. Thanks to Lisandro + Dalcín for reporting. +- Fix a problem with use of MPI_UNDEFINED with MPI_COMM_SPLIT_TYPE. + Thanks to Lisandro Dalcín for reporting. +- Silence a compiler warning in PSM MTL. Thanks to Adrian Reber for + reporting this. +- Properly detect Intel TrueScale and OmniPath devices in the ACTIVE + state. Thanks to Durga Choudhury for reporting the issue. +- Fix detection and use of Solaris Studio 12.5 (beta) compilers. + Thanks to Paul Hargrove for reporting and debugging. +- Fix various small memory leaks. +- Allow NULL arrays when creating empty MPI datatypes. +- Replace use of alloca with malloc for certain datatype creation + functions. Thanks to Bogdan Sataric for reporting this. +- Fix use of MPI_LB and MPI_UB in creation of of certain MPI datatypes. + Thanks to Gus Correa for helping to fix this. +- Implement a workaround for a GNU Libtool problem. Thanks to Eric + Schnetter for reporting and fixing. +- Improve hcoll library detection in configure. Thanks to David + Shrader and Åke Sandgren for reporting this. +- Miscellaneous minor bug fixes in the hcoll component. +- Miscellaneous minor bug fixes in the ugni component. +- Fix problems with XRC detection in OFED 3.12 and older releases. + Thanks to Paul Hargrove for his analysis of this problem. +- Update (non-standard/experimental) Java MPI interfaces to support + MPI-3.1 functionality. +- Fix an issue with MCA parameters for Java bindings. Thanks to + Takahiro Kawashima and Siegmar Gross for reporting this issue. +- Fix a problem when using persistent requests in the Java bindings. + Thanks to Nate Chambers for reporting. +- Fix problem with Java bindings on OX X 10.11. Thanks to Alexander + Daryin for reporting this issue. +- Fix a performance problem for large messages for Cray XC systems. + Thanks to Jerome Vienne for reporting this. +- Fix an issue with MPI_WIN_LOCK_ALL. Thanks to Thomas Jahns for + reporting. +- Fix an issue with passing a parameter to configure multiple times. + Thanks to QuesarVII for reporting and supplying a fix. +- Add support for ALPS resource allocation system on Cray CLE 5.2 and + later. Thanks to Mark Santcroos. +- Corrections to the HACKING file. Thanks to Maximilien Levesque. +- Fix an issue with user supplied reduction operator functions. + Thanks to Rupert Nash for reporting this. +- Fix an issue with an internal list management function. Thanks to + Adrian Reber for reporting this. +- Fix a problem with MPI-RMA PSCW epochs. Thanks to Berk Hess for + reporting this. +- Fix a problem in neighborhood collectives. Thanks to Lisandro + Dalcín for reporting. +- Fix MPI_IREDUCE_SCATTER_BLOCK for a one-process communicator. Thanks + to Lisandro Dalcín for reporting. +- Add (Open MPI-specific) additional flavors to MPI_COMM_SPLIT_TYPE. + See MPI_Comm_split_type(3) for details. Thanks to Nick Andersen for + supplying this enhancement. +- Improve closing of file descriptors during the job launch phase. + Thanks to Piotr Lesnicki for reporting and providing this + enhancement. +- Fix a problem in MPI_GET_ACCUMULATE and MPI_RGET_ACCUMULATE when + using Portals4. Thanks to Nicolas Chevalier for reporting. +- Use correct include file for lstat prototype in ROMIO. Thanks to + William Throwe for finding and providing a fix. +- Add missing Fortran bindings for MPI_WIN_ALLOCATE. Thanks to Christoph + Niethammer for reporting and fixing. +- Fortran related fixes to handle Intel 2016 compiler. Thanks to + Fabrice Roy for reporting this. +- Fix a Fortran linkage issue. Thanks to Macro Atzeri for finding and + suggesting a fix. +- Fix problem with using BIND(C) for Fortran bindings with logical + parameters. Thanks to Paul Romano for reporting. +- Fix an issue with use of DL-related macros in opal library. Thanks to + Scott Atchley for finding this. +- Fix an issue with parsing mpirun command line options which contain + colons. Thanks to Lev Given for reporting. +- Fix a problem with Open MPI's package configury files. Thanks to + Christoph Junghans for reporting. +- Fix a typo in the MPI_INTERCOMM_MERGE man page. Thanks To Harald + Servat for reporting and correcting. +- Update man pages for non-blocking sends per MPI 3.1 standard. + Thanks to Alexander Pozdneev for reporting. +- Fix problem when compiling against PVFS2. Thanks to Dave Love for + reporting. +- Fix problems with MPI_NEIGHBOR_ALLTOALL{V,W}. Thanks to Willem + Vermin for reporting this issue. +- Fix various compilation problems on Cygwin. Thanks to Marco Atzeri + for supplying these fixes. +- Fix problem with resizing of subarray and darray data types. Thanks + to Keith Bennett and Dan Garmann for reporting. +- Fix a problem with MPI_COMBINER_RESIZED. Thanks to James Ramsey for + the report. +- Fix an hwloc binding issue. Thanks to Ben Menadue for reporting. +- Fix a problem with the shared memory (sm) BTL. Thanks to Peter Wind + for the report. +- Fixes for heterogeneous support. Thanks to Siegmar Gross for reporting. +- Fix a problem with memchecker. Thanks to Clinton Simpson for reporting. +- Fix a problem with MPI_UNWEIGHTED in topology functions. Thanks to + Jun Kudo for reporting. +- Fix problem with a MCA parameter base filesystem types. Thanks to + Siegmar Gross for reporting. +- Fix a problem with some windows info argument types. Thanks to + Alastair McKinstry for reporting. diff --git a/docs/news/news-v3.0.x.rst b/docs/news/news-v3.0.x.rst new file mode 100644 index 00000000000..27578071121 --- /dev/null +++ b/docs/news/news-v3.0.x.rst @@ -0,0 +1,252 @@ +Open MPI v3.0.x series +====================== + +This file contains all the NEWS updates for the Open MPI v3.0.x +series, in reverse chronological order. + +Open MPI version 3.0.6 +---------------------- +:Date: March, 2020 + +- Fix one-sided shared memory window configuration bug. +- Fix support for PGI'18 compiler. +- Fix run-time linker issues with OMPIO on newer Linux distros. +- Allow the user to override modulefile_path in the Open MPI SRPM, + even if ``install_in_opt`` is set to 1. +- Properly detect ConnectX-6 HCAs in the openib BTL. +- Fix segfault in the MTL/OFI initialization for large jobs. +- Fix various portals4 control flow bugs. +- Fix communications ordering for alltoall and Cartesian neighborhood + collectives. +- Fix an infinite recursion crash in the memory patcher on systems + with glibc v2.26 or later (e.g., Ubuntu 18.04) when using certain + OS-bypass interconnects. + + +Open MPI version 3.0.5 +---------------------- +:Date: November, 2019 + +- Fix OMPIO issue limiting file reads/writes to 2GB. Thanks to + Richard Warren for reporting the issue. +- At run time, automatically disable Linux cross-memory attach (CMA) + for vader BTL (shared memory) copies when running in user namespaces + (i.e., containers). Many thanks to Adrian Reber for raising the + issue and providing the fix. +- Sending very large MPI messages using the ofi MTL will fail with + some of the underlying Libfabric transports (e.g., PSM2 with + messages >=4GB, verbs with messages >=2GB). Prior version of Open + MPI failed silently; this version of Open MPI invokes the + appropriate MPI error handler upon failure. See + https://github.com/open-mpi/ompi/issues/7058 for more details. + Thanks to Emmanuel Thomé for raising the issue. +- Fix case where 0-extent datatypes might be eliminated during + optimization. Thanks to Github user @tjahns for raising the issue. +- Ensure that the MPIR_Breakpoint symbol is not optimized out on + problematic platforms. +- Fix OMPIO offset calculations with ``SEEK_END`` and ``SEEK_CUR`` in + ``MPI_FILE_GET_POSITION``. Thanks to Wei-keng Liao for raising the + issue. +- Fix corner case for datatype extent computations. Thanks to David + Dickenson for raising the issue. +- Fix MPI buffered sends with the "cm" PML. +- Update to PMIx v2.2.3. +- Fix ssh-based tree-based spawning at scale. Many thanks to Github + user @zrss for the report and diagnosis. +- Fix the Open MPI RPM spec file to not abort when grep fails. Thanks + to Daniel Letai for bringing this to our attention. +- Handle new SLURM CLI options (SLURM 19 deprecated some options that + Open MPI was using). Thanks to Jordan Hayes for the report and the + initial fix. +- OMPI: fix division by zero with an empty file view. +- Also handle ``shmat()``/``shmdt()`` memory patching with OS-bypass networks. +- Add support for unwinding info to all files that are present in the + stack starting from ``MPI_Init``, which is helpful with parallel + debuggers. Thanks to James Clark for the report and initial fix. +- Fixed inadvertant use of bitwise operators in the MPI C++ bindings + header files. Thanks to Bert Wesarg for the report and the fix. +- Added configure option ``--disable-wrappers-runpath`` (alongside the + already-existing ``--disable-wrappers-rpath`` option) to prevent Open + MPI's configure script from automatically adding runpath CLI options + to the wrapper compilers. + + +Open MPI version 3.0.4 +---------------------- +:Date: April, 2019 + +- Fix compile error when configured with ``--enable-mpi-java`` and + ``--with-devel-headers``. Thanks to @g-raffy for reporting the issue. +- Fix possible floating point rounding and division issues in OMPIO + which led to crashes and/or data corruption with very large data. + Thanks to Axel Huebl and René Widera for identifing the issue, + supplying and testing the fix (** also appeared: v3.0.4). +- Use ``static_cast<>`` in ``mpi.h`` where appropriate. Thanks to @shadow-fx + for identifying the issue. +- Fix datatype issue with RMA accumulate. Thanks to Jeff Hammond for + raising the issue. +- Fix RMA accumulate of non-predefined datatypes with predefined + operators. Thanks to Jeff Hammond for raising the issue. +- Fix race condition when closing open file descriptors when launching + MPI processes. Thanks to Jason Williams for identifying the issue and + supplying the fix. +- Fix Valgrind warnings for some ``MPI_TYPE_CREATE_*`` functions. Thanks + to Risto Toijala for identifying the issue and supplying the fix. +- Fix ``MPI_TYPE_CREATE_F90_{REAL,COMPLEX}`` for r=38 and r=308. +- Fix assembly issues with old versions of gcc (<6.0.0) that affected + the stability of shared memory communications (e.g., with the vader + BTL). +- Fix the OFI MTL handling of ``MPI_ANY_SOURCE``. +- Fix noisy errors in the openib BTL with regards to + ``ibv_exp_query_device()``. Thanks to Angel Beltre and others who + reported the issue. + + +Open MPI version 3.0.3 +---------------------- +:Date: October, 2018 + +- Fix race condition in ``MPI_THREAD_MULTIPLE`` support of non-blocking + send/receive path. +- Fix error handling ``SIGCHLD`` forwarding. +- Add support for ``CHARACTER`` and ``LOGICAL`` Fortran datatypes for ``MPI_SIZEOF``. +- Fix compile error when using OpenJDK 11 to compile the Java bindings. +- Fix crash when using a hostfile with a 'user@host' line. +- Numerous Fortran '08 interface fixes. +- TCP BTL error message fixes. +- OFI MTL now will use any provider other than shm, sockets, tcp, udp, or + rstream, rather than only supporting gni, psm, and psm2. +- Disable async receive of CUDA buffers by default, fixing a hang + on large transfers. +- Support the BCM57XXX and BCM58XXX Broadcomm adapters. +- Fix minmax datatype support in ROMIO. +- Bug fixes in vader shared memory transport. +- Support very large buffers with ``MPI_TYPE_VECTOR``. +- Fix hang when launching with mpirun on Cray systems. +- Bug fixes in OFI MTL. +- Assorted Portals 4.0 bug fixes. +- Fix for possible data corruption in ``MPI_BSEND``. +- Move shared memory file for vader btl into ``/dev/shm`` on Linux. +- Fix for ``MPI_ISCATTER``/``MPI_ISCATTERV`` Fortran interfaces with ``MPI_IN_PLACE``. +- Upgrade PMIx to v2.1.4. +- Fix for Power9 built-in atomics. +- Numerous One-sided bug fixes. +- Fix for race condition in uGNI BTL. +- Improve handling of large number of interfaces with TCP BTL. +- Numerous UCX bug fixes. +- Add support for QLogic and Broadcom Cumulus RoCE HCAs to Open IB BTL. +- Add patcher support for aarch64. +- Fix hang on Power and ARM when Open MPI was built with low compiler + optimization settings. + + +Open MPI version 3.0.2 +---------------------- +:Date: June, 2018 + +- Disable osc/pt2pt when using ``MPI_THREAD_MULTIPLE`` due to numerous + race conditions in the component. +- Fix dummy variable names for the mpi and mpi_f08 Fortran bindings to + match the MPI standard. This may break applications which use + name-based parameters in Fortran which used our internal names + rather than those documented in the MPI standard. +- Fixed ``MPI_SIZEOF`` in the "mpi" Fortran module for the NAG compiler. +- Fix RMA function signatures for ``use-mpi-f08`` bindings to have the + asynchonous property on all buffers. +- Fix Fortran ``MPI_COMM_SPAWN_MULTIPLE`` to properly follow the count + length argument when parsing the array_of_commands variable. +- Revamp Java detection to properly handle new Java versions which do + not provide a javah wrapper. +- Improved configure logic for finding the UCX library. +- Add support for HDR InfiniBand link speeds. +- Disable the POWER 7/BE block in configure. Note that POWER 7/BE is + still not a supported platform, but it is no longer automatically + disabled. See + https://github.com/open-mpi/ompi/issues/4349#issuecomment-374970982 + for more information. + + +Open MPI version 3.0.1 +---------------------- +:Date: March, 2018 + +- Fix ability to attach parallel debuggers to MPI processes. +- Fix a number of issues in MPI I/O found by the HDF5 test suite. +- Fix (extremely) large message transfers with shared memory. +- Fix out of sequence bug in multi-NIC configurations. +- Fix stdin redirection bug that could result in lost input. +- Disable the LSF launcher if CSM is detected. +- Plug a memory leak in ``MPI_Mem_free()``. Thanks to Philip Blakely for reporting. +- Fix the tree spawn operation when the number of nodes is larger than the radix. + Thanks to Carlos Eduardo de Andrade for reporting. +- Fix Fortran 2008 macro in MPI extensions. Thanks to Nathan T. Weeks for + reporting. +- Add UCX to list of interfaces that OpenSHMEM will use by default. +- Add ``--{enable|disable}-show-load-errors-by-default`` to control + default behavior of the load errors option. +- OFI MTL improvements: handle empty completion queues properly, fix + incorrect error message around ``fi_getinfo()``, use default progress + option for provider by default, Add support for reading multiple + CQ events in ofi_progress. +- PSM2 MTL improvements: Allow use of GPU buffers, thread fixes. +- Numerous corrections to memchecker behavior. +- Add a mca parameter ``ras_base_launch_orted_on_hn`` to allow for launching + MPI processes on the same node where mpirun is executing using a separate + orte daemon, rather than the mpirun process. This may be useful to set to + true when using SLURM, as it improves interoperability with SLURM's signal + propagation tools. By default it is set to false, except for Cray XC systems. +- Fix a problem reported on the mailing separately by Kevin McGrattan and Stephen + Guzik about consistency issues on NFS file systems when using OMPIO. This fix + also introduces a new mca parameter ``fs_ufs_lock_algorithm`` which allows to + control the locking algorithm used by ompio for read/write operations. By + default, ompio does not perfom locking on local UNIX file systems, locks the + entire file per operation on NFS file systems, and selective byte-range + locking on other distributed file systems. +- Add an mca parameter ``pmix_server_usock_connections`` to allow mpirun to + support applications statically built against the Open MPI v2.x release, + or installed in a container along with the Open MPI v2.x libraries. It is + set to false by default. + + +Open MPI version 3.0.0 +---------------------- +:Date: September, 2017 + +.. important:: Major new features: + + - Use UCX allocator for OSHMEM symmetric heap allocations to optimize intra-node + data transfers. UCX SPML only. + - Use UCX multi-threaded API in the UCX PML. Requires UCX 1.0 or later. + - Added support for Flux PMI + - Update embedded PMIx to version 2.1.0 + - Update embedded hwloc to version 1.11.7 + +- Per Open MPI's versioning scheme (see the README), increasing the + major version number to 3 indicates that this version is not + ABI-compatible with prior versions of Open MPI. In addition, there may + be differences in MCA parameter names and defaults from previous releases. + Command line options for mpirun and other commands may also differ from + previous versions. You will need to recompile MPI and OpenSHMEM applications + to work with this version of Open MPI. +- With this release, Open MPI supports ``MPI_THREAD_MULTIPLE`` by default. +- New configure options have been added to specify the locations of libnl + and zlib. +- A new configure option has been added to request Flux PMI support. +- The help menu for mpirun and related commands is now context based. + ``mpirun --help compatibility`` generates the help menu in the same format + as previous releases. + +.. attention:: Removed legacy support: + + - AIX is no longer supported. + - Loadlever is no longer supported. + - OpenSHMEM currently supports the UCX and MXM transports via the ucx and ikrit + SPMLs respectively. + - Remove IB XRC support from the OpenIB BTL due to lack of support. + - Remove support for big endian PowerPC. + - Remove support for XL compilers older than v13.1 + +.. note:: Known issues: + + - MPI_Connect/accept between applications started by different mpirun + commands will fail, even if ompi-server is running. diff --git a/docs/news/news-v3.1.x.rst b/docs/news/news-v3.1.x.rst new file mode 100644 index 00000000000..de265cf19ac --- /dev/null +++ b/docs/news/news-v3.1.x.rst @@ -0,0 +1,258 @@ +Open MPI v3.1.x series +====================== + +This file contains all the NEWS updates for the Open MPI v3.1.x +series, in reverse chronological order. + +Open MPI version 3.1.6 +---------------------- +:Date: March 2020 + +- Fix one-sided shared memory window configuration bug. +- Fix support for PGI'18 compiler. +- Fix issue with zero-length blockLength in ``MPI_TYPE_INDEXED``. +- Fix run-time linker issues with OMPIO on newer Linux distros. +- Fix PMIX dstore locking compilation issue. Thanks to Marco Atzeri + for reporting the issue. +- Allow the user to override modulefile_path in the Open MPI SRPM, + even if ``install_in_opt`` is set to 1. +- Properly detect ConnectX-6 HCAs in the openib BTL. +- Fix segfault in the MTL/OFI initialization for large jobs. +- Fix issue to guarantee to properly release MPI one-sided lock when + using UCX transports to avoid a deadlock. +- Fix potential deadlock when processing outstanding transfers with + uGNI transports. +- Fix various portals4 control flow bugs. +- Fix communications ordering for alltoall and Cartesian neighborhood + collectives. +- Fix an infinite recursion crash in the memory patcher on systems + with glibc v2.26 or later (e.g., Ubuntu 18.04) when using certain + OS-bypass interconnects. + + +Open MPI version 3.1.5 +---------------------- +:Date: November, 2019 + +- Fix OMPIO issue limiting file reads/writes to 2GB. Thanks to + Richard Warren for reporting the issue. +- At run time, automatically disable Linux cross-memory attach (CMA) + for vader BTL (shared memory) copies when running in user namespaces + (i.e., containers). Many thanks to Adrian Reber for raising the + issue and providing the fix. +- Sending very large MPI messages using the ofi MTL will fail with + some of the underlying Libfabric transports (e.g., PSM2 with + messages >=4GB, verbs with messages >=2GB). Prior version of Open + MPI failed silently; this version of Open MPI invokes the + appropriate MPI error handler upon failure. See + https://github.com/open-mpi/ompi/issues/7058 for more details. + Thanks to Emmanuel Thomé for raising the issue. +- Fix case where 0-extent datatypes might be eliminated during + optimization. Thanks to Github user @tjahns for raising the issue. +- Ensure that the ``MPIR_Breakpoint`` symbol is not optimized out on + problematic platforms. +- Fix MPI one-sided 32 bit atomic support. +- Fix OMPIO offset calculations with ``SEEK_END`` and ``SEEK_CUR`` in + ``MPI_FILE_GET_POSITION``. Thanks to Wei-keng Liao for raising the + issue. +- Add "naive" regx component that will never fail, no matter how + esoteric the hostnames are. +- Fix corner case for datatype extent computations. Thanks to David + Dickenson for raising the issue. +- Allow individual jobs to set their map/rank/bind policies when + running LSF. Thanks to Nick R. Papior for assistance in solving the + issue. +- Fix MPI buffered sends with the "cm" PML. +- Properly propagate errors to avoid deadlocks in MPI one-sided operations. +- Update to PMIx v2.2.3. +- Fix data corruption in non-contiguous MPI accumulates over UCX. +- Fix ssh-based tree-based spawning at scale. Many thanks to Github + user @zrss for the report and diagnosis. +- Fix the Open MPI RPM spec file to not abort when grep fails. Thanks + to Daniel Letai for bringing this to our attention. +- Handle new SLURM CLI options (SLURM 19 deprecated some options that + Open MPI was using). Thanks to Jordan Hayes for the report and the + initial fix. +- OMPI: fix division by zero with an empty file view. +- Also handle ``shmat()``/``shmdt()`` memory patching with OS-bypass networks. +- Add support for unwinding info to all files that are present in the + stack starting from MPI_Init, which is helpful with parallel + debuggers. Thanks to James Clark for the report and initial fix. +- Fixed inadvertant use of bitwise operators in the MPI C++ bindings + header files. Thanks to Bert Wesarg for the report and the fix. + + +Open MPI version 3.1.4 +---------------------- +:Date: April, 2019 + +- Fix compile error when configured with ``--enable-mpi-java`` and + ``--with-devel-headers``. Thanks to @g-raffy for reporting the issue + (** also appeared: v3.0.4). +- Only use hugepages with appropriate permissions. Thanks to Hunter + Easterday for the fix. +- Fix possible floating point rounding and division issues in OMPIO + which led to crashes and/or data corruption with very large data. + Thanks to Axel Huebl and René Widera for identifing the issue, + supplying and testing the fix (** also appeared: v3.0.4). +- Use ``static_cast<>`` in ``mpi.h`` where appropriate. Thanks to @shadow-fx + for identifying the issue (** also appeared: v3.0.4). +- Fix RMA accumulate of non-predefined datatypes with predefined + operators. Thanks to Jeff Hammond for raising the issue (** also + appeared: v3.0.4). +- Fix race condition when closing open file descriptors when launching + MPI processes. Thanks to Jason Williams for identifying the issue and + supplying the fix (** also appeared: v3.0.4). +- Fix support for external PMIx v3.1.x. +- Fix Valgrind warnings for some ``MPI_TYPE_CREATE_*`` functions. Thanks + to Risto Toijala for identifying the issue and supplying the fix (** + also appeared: v3.0.4). +- Fix ``MPI_TYPE_CREATE_F90_{REAL,COMPLEX}`` for r=38 and r=308 (** also + appeared: v3.0.4). +- Fix assembly issues with old versions of gcc (<6.0.0) that affected + the stability of shared memory communications (e.g., with the vader + BTL) (** also appeared: v3.0.4). +- Fix ``MPI_Allreduce`` crashes with some cases in the coll/spacc module. +- Fix the OFI MTL handling of ``MPI_ANY_SOURCE`` (** also appeared: + v3.0.4). +- Fix noisy errors in the openib BTL with regards to + ``ibv_exp_query_device()``. Thanks to Angel Beltre and others who + reported the issue (** also appeared: v3.0.4). +- Fix zero-size MPI one-sided windows with UCX. + + +Open MPI version 3.1.3 +---------------------- +:Date: October, 2018 + +- Fix race condition in ``MPI_THREAD_MULTIPLE`` support of non-blocking + send/receive path. +- Fix error handling ``SIGCHLD`` forwarding. +- Add support for ``CHARACTER`` and ``LOGICAL`` Fortran datatypes for ``MPI_SIZEOF``. +- Fix compile error when using OpenJDK 11 to compile the Java bindings. +- Fix crash when using a hostfile with a 'user@host' line. +- Numerous Fortran '08 interface fixes. +- TCP BTL error message fixes. +- OFI MTL now will use any provider other than shm, sockets, tcp, udp, or + rstream, rather than only supporting gni, psm, and psm2. +- Disable async receive of CUDA buffers by default, fixing a hang + on large transfers. +- Support the BCM57XXX and BCM58XXX Broadcomm adapters. +- Fix minmax datatype support in ROMIO. +- Bug fixes in vader shared memory transport. +- Support very large buffers with ``MPI_TYPE_VECTOR``. +- Fix hang when launching with mpirun on Cray systems. + + +Open MPI version 3.1.2 +---------------------- +:Date: August, 2018 + +- A subtle race condition bug was discovered in the "vader" BTL + (shared memory communications) that, in rare instances, can cause + MPI processes to crash or incorrectly classify (or effectively drop) + an MPI message sent via shared memory. If you are using the "ob1" + PML with "vader" for shared memory communication (note that vader is + the default for shared memory communication with ob1), you need to + upgrade to v3.1.2 or later to fix this issue. You may also upgrade + to the following versions to fix this issue: + + - Open MPI v2.1.5 (expected end of August, 2018) or later in the + v2.1.x series + - Open MPI v3.0.1 (released March, 2018) or later in the v3.0.x + series + +- Assorted Portals 4.0 bug fixes. +- Fix for possible data corruption in MPI_BSEND. +- Move shared memory file for vader btl into /dev/shm on Linux. +- Fix for ``MPI_ISCATTER``/``MPI_ISCATTERV`` Fortran interfaces with ``MPI_IN_PLACE``. +- Upgrade PMIx to v2.1.3. +- Numerous One-sided bug fixes. +- Fix for race condition in uGNI BTL. +- Improve handling of large number of interfaces with TCP BTL. +- Numerous UCX bug fixes. + + +Open MPI version 3.1.1 +---------------------- +:Date: June, 2018 + +- Fix potential hang in UCX PML during ``MPI_Finalize()`` +- Update internal PMIx to v2.1.2rc2 to fix forward version compatibility. +- Add new MCA parameter ``osc_sm_backing_store`` to allow users to specify + where in the filesystem the backing file for the shared memory + one-sided component should live. Defaults to ``/dev/shm`` on Linux. +- Fix potential hang on non-x86 platforms when using builds with + optimization flags turned off. +- Disable osc/pt2pt when using ``MPI_THREAD_MULTIPLE`` due to numerous + race conditions in the component. +- Fix dummy variable names for the mpi and mpi_f08 Fortran bindings to + match the MPI standard. This may break applications which use + name-based parameters in Fortran which used our internal names + rather than those documented in the MPI standard. +- Revamp Java detection to properly handle new Java versions which do + not provide a javah wrapper. +- Fix RMA function signatures for use-mpi-f08 bindings to have the + asynchonous property on all buffers. +- Improved configure logic for finding the UCX library. + + +Open MPI version 3.1.0 +---------------------- +:Date: May, 2018 + +- Various OpenSHMEM bug fixes. +- Properly handle array_of_commands argument to Fortran version of + ``MPI_COMM_SPAWN_MULTIPLE``. +- Fix bug with ``MODE_SEQUENTIAL`` and the sharedfp MPI-IO component. +- Use ``javac -h`` instead of ``javah`` when building the Java bindings + with a recent version of Java. +- Fix mis-handling of jostepid under SLURM that could cause problems + with PathScale/OmniPath NICs. +- Disable the POWER 7/BE block in configure. Note that POWER 7/BE is + still not a supported platform, but it is no longer automatically + disabled. See + https://github.com/open-mpi/ompi/issues/4349#issuecomment-374970982 + for more information. +- The output-filename option for ``mpirun`` is now converted to an + absolute path before being passed to other nodes. +- Add monitoring component for PML, OSC, and COLL to track data + movement of MPI applications. See + ompi/mca/commmon/monitoring/HowTo_pml_monitoring.tex for more + information about the monitoring framework. +- Add support for communicator assertions: ``mpi_assert_no_any_tag``, + ``mpi_assert_no_any_source``, ``mpi_assert_exact_length``, and + ``mpi_assert_allow_overtaking``. +- Update PMIx to version 2.1.1. +- Update hwloc to 1.11.7. +- Many one-sided behavior fixes. +- Improved performance for Reduce and Allreduce using Rabenseifner's algorithm. +- Revamped mpirun ``--help`` output to make it a bit more manageable. +- Portals4 MTL improvements: Fix race condition in rendezvous protocol and + retry logic. +- UCX OSC: initial implementation. +- UCX PML improvements: add multi-threading support. +- Yalla PML improvements: Fix error with irregular contiguous datatypes. +- Openib BTL: disable XRC support by default. +- TCP BTL: Add check to detect and ignore connections from processes + that aren't MPI (such as IDS probes) and verify that source and + destination are using the same version of Open MPI, fix issue with very + large message transfer. +- ompi_info parsable output now escapes double quotes in values, and + also quotes values can contains colons. Thanks to Lev Givon for the + suggestion. +- CUDA-aware support can now handle GPUs within a node that do not + support CUDA IPC. Earlier versions would get error and abort. +- Add a mca parameter ras_base_launch_orted_on_hn to allow for launching + MPI processes on the same node where mpirun is executing using a separate + orte daemon, rather than the mpirun process. This may be useful to set to + true when using SLURM, as it improves interoperability with SLURM's signal + propagation tools. By default it is set to false, except for Cray XC systems. +- Remove LoadLeveler RAS support. +- Remove IB XRC support from the OpenIB BTL due to lack of support. +- Add functionality for IBM s390 platforms. Note that regular + regression testing does not occur on the s390 and it is not + considered a supported platform. +- Remove support for big endian PowerPC. +- Remove support for XL compilers older than v13.1. +- Remove support for atomic operations using MacOS atomics library. diff --git a/docs/news/news-v4.0.x.rst b/docs/news/news-v4.0.x.rst new file mode 100644 index 00000000000..70d452c0619 --- /dev/null +++ b/docs/news/news-v4.0.x.rst @@ -0,0 +1,286 @@ +Open MPI v4.0.x series +====================== + +This file contains all the NEWS updates for the Open MPI v4.0.x +series, in reverse chronological order. + +Open MPI version 4.0.6 +---------------------- +:Date: March, 2021 + +- Update embedded PMIx to 3.2.2. This update addresses several + ``MPI_COMM_SPAWN`` problems. +- Fix a problem when using Flux PMI and UCX. Thanks to Sami Ilvonen + for reporting and supplying a fix. +- Fix a problem with MPIR breakpoint being compiled out using PGI + compilers. Thanks to @louisespellacy-arm for reporting. +- Fix some ROMIO issues when using Lustre. Thanks to Mark Dixon for + reporting. +- Fix a problem using an external PMIx 4 to build Open MPI 4.0.x. +- Fix a compile problem when using the enable-timing configure option + and UCX. Thanks to Jan Bierbaum for reporting. +- Fix a symbol name collision when using the Cray compiler to build + Open SHMEM. Thanks to Pak Lui for reporting and fixing. +- Correct an issue encountered when building Open MPI under OSX Big Sur. + Thanks to FX Coudert for reporting. +- Various fixes to the OFI MTL. +- Fix an issue with allocation of sufficient memory for parsing long + environment variable values. Thanks to @zrss for reporting. +- Improve reproducibility of builds to assist Open MPI packages. + Thanks to Bernhard Wiedmann for bringing this to our attention. + + +Open MPI version 4.0.5 +---------------------- +:Date: August, 2020 + +- Fix a problem with MPI RMA compare and swap operations. Thanks + to Wojciech Chlapek for reporting. +- Disable binding of MPI processes to system resources by Open MPI + if an application is launched using SLURM's srun command. +- Disable building of the Fortran mpi_f08 module when configuring + Open MPI with default 8 byte Fortran integer size. Thanks to + @ahcien for reporting. +- Fix a problem with mpirun when the ``--map-by`` option is used. + Thanks to Wenbin Lyu for reporting. +- Fix some issues with MPI one-sided operations uncovered using Global + Arrays regression test-suite. Thanks to @bjpalmer for reporting. +- Fix a problem with make check when using the PGI compiler. Thanks to + Carl Ponder for reporting. +- Fix a problem with ``MPI_FILE_READ_AT_ALL`` that could lead to application + hangs under certain circumstances. Thanks to Scot Breitenfeld for + reporting. +- Fix a problem building C++ applications with newer versions of GCC. + Thanks to Constantine Khrulev for reporting. + + +Open MPI version 4.0.4 +---------------------- +:Date: June, 2020 + +- Fix a memory patcher issue intercepting shmat and shmdt. This was + observed on RHEL 8.x ppc64le (see README for more info). +- Fix an illegal access issue caught using gcc's address sanitizer. + Thanks to Georg Geiser for reporting. +- Add checks to avoid conflicts with a libevent library shipped with LSF. +- Switch to linking against libevent_core rather than libevent, if present. +- Add improved support for UCX 1.9 and later. +- Fix an ABI compatibility issue with the Fortran 2008 bindings. + Thanks to Alastair McKinstry for reporting. +- Fix an issue with rpath of ``/usr/lib64`` when building OMPI on + systems with Lustre. Thanks to David Shrader for reporting. +- Fix a memory leak occurring with certain MPI RMA operations. +- Fix an issue with ORTE's mapping of MPI processes to resources. + Thanks to Alex Margolin for reporting and providing a fix. +- Correct a problem with incorrect error codes being returned + by OMPI MPI_T functions. +- Fix an issue with debugger tools not being able to attach + to mpirun more than once. Thanks to Gregory Lee for reporting. +- Fix an issue with the Fortran compiler wrappers when using + NAG compilers. Thanks to Peter Brady for reporting. +- Fix an issue with the ORTE ssh based process launcher at scale. + Thanks to Benjamín Hernández for reporting. +- Address an issue when using shared MPI I/O operations. OMPIO will + now successfully return from the file open statement but will + raise an error if the file system does not supported shared I/O + operations. Thanks to Romain Hild for reporting. +- Fix an issue with ``MPI_WIN_DETACH.`` Thanks to Thomas Naughton for reporting. + + +Open MPI version 4.0.3 +---------------------- +:Date: March, 2020 + +- Update embedded PMIx to 3.1.5 +- Add support for Mellanox ConnectX-6. +- Fix an issue in OpenMPI IO when using shared file pointers. + Thanks to Romain Hild for reporting. +- Fix a problem with Open MPI using a previously installed + Fortran mpi module during compilation. Thanks to Marcin + Mielniczuk for reporting +- Fix a problem with Fortran compiler wrappers ignoring use of + disable-wrapper-runpath configure option. Thanks to David + Shrader for reporting. +- Fixed an issue with trying to use mpirun on systems where neither + ssh nor rsh is installed. +- Address some problems found when using XPMEM for intra-node message + transport. +- Improve dimensions returned by MPI_Dims_create for certain + cases. Thanks to @aw32 for reporting. +- Fix an issue when sending messages larger than 4GB. Thanks to + Philip Salzmann for reporting this issue. +- Add ability to specify alternative module file path using + Open MPI's RPM spec file. Thanks to @jschwartz-cray for reporting. +- Clarify use of ``--with-hwloc`` configuration option in the README. + Thanks to Marcin Mielniczuk for raising this documentation issue. +- Fix an issue with shmem_atomic_set. Thanks to Sameh Sharkawi for reporting. +- Fix a problem with ``MPI_Neighbor_alltoall(v,w)`` for cartesian communicators + with cyclic boundary conditions. Thanks to Ralph Rabenseifner and + Tony Skjellum for reporting. +- Fix an issue using Open MPIO on 32 bit systems. Thanks to + Orion Poplawski for reporting. +- Fix an issue with NetCDF test deadlocking when using the vulcan + Open MPIO component. Thanks to Orion Poplawski for reporting. +- Fix an issue with the ``mpi_yield_when_idle`` parameter being ignored + when set in the Open MPI MCA parameter configuration file. + Thanks to @iassiour for reporting. +- Address an issue with Open MPIO when writing/reading more than 2GB + in an operation. Thanks to Richard Warren for reporting. + + +Open MPI version 4.0.2 +---------------------- +:Date: September, 2019 + +- Update embedded PMIx to 3.1.4 +- Enhance Open MPI to detect when processes are running in + different name spaces on the same node, in which case the + vader CMA single copy mechanism is disabled. Thanks + to Adrian Reber for reporting and providing a fix. +- Fix an issue with ORTE job tree launch mechanism. Thanks + to @lanyangyang for reporting. +- Fix an issue with env processing when running as root. + Thanks to Simon Byrne for reporting and providing a fix. +- Fix Fortran ``MPI_FILE_GET_POSITION`` return code bug. + Thanks to Wei-Keng Liao for reporting. +- Fix user defined datatypes/ops leak in nonblocking base collective + component. Thanks to Andrey Maslennikov for verifying fix. +- Fixed shared memory not working with spawned processes. + Thanks to @rodarima for reporting. +- Fix data corruption of overlapping datatypes on sends. + Thanks to DKRZ for reporting. +- Fix segfault in oob_tcp component on close with active listeners. + Thanks to Orivej Desh for reporting and providing a fix. +- Fix divide by zero segfault in ompio. + Thanks to @haraldkl for reporting and providing a fix. +- Fix finalize of flux compnents. + Thanks to Stephen Herbein and Jim Garlick for providing a fix. +- Fix osc_rdma_acc_single_intrinsic regression. + Thanks to Joseph Schuchart for reporting and providing a fix. +- Fix hostnames with large integers. + Thanks to @perrynzhou for reporting and providing a fix. +- Fix Deadlock in ``MPI_Fetch_and_op`` when using UCX + Thanks to Joseph Schuchart for reporting. +- Fix the SLURM plm for mpirun-based launching. + Thanks to Jordon Hayes for reporting and providing a fix. +- Prevent grep failure in rpmbuild from aborting. + Thanks to Daniel Letai for reporting. +- Fix btl/vader finalize sequence. + Thanks to Daniel Vollmer for reporting. +- Fix pml/ob1 local handle sent during PUT control message. + Thanks to @EmmanuelBRELLE for reporting and providing a fix. +- Fix Memory leak with persistent MPI sends and the ob1 "get" protocol. + Thanks to @s-kuberski for reporting. +- v4.0.x: mpi: mark ``MPI_COMBINER_{HVECTOR,HINDEXED,STRUCT}_INTEGER`` + removed unless configured with ``--enable-mpi1-compatibility`` +- Fix make-authors.pl when run in a git submodule. + Thanks to Michael Heinz for reporting and providing a fix. +- Fix deadlock with ``mpi_assert_allow_overtaking`` in MPI_Issend. + Thanks to Joseph Schuchart and George Bosilca for reporting. +- Add compilation flag to allow unwinding through files that are + present in the stack when attaching with MPIR. + Thanks to James A Clark for reporting and providing a fix. + +.. admonition:: Known Issue: + :class: knownissue + + There is a known issue with the OFI libfabric and PSM2 MTLs when trying to send + very long (> 4 GBytes) messages. In this release, these MTLs will catch + this case and abort the transfer. A future release will provide a + better solution to this issue. + + +Open MPI version 4.0.1 +---------------------- +:Date: March, 2019 + +- Update embedded PMIx to 3.1.2. +- Fix an issue with Vader (shared-memory) transport on OS-X. Thanks + to Daniel Vollmer for reporting. +- Fix a problem with the usNIC BTL Makefile. Thanks to George Marselis + for reporting. +- Fix an issue when using ``--enable-visibility`` configure option + and older versions of hwloc. Thanks to Ben Menadue for reporting + and providing a fix. +- Fix an issue with ``MPI_WIN_CREATE_DYNAMIC`` and ``MPI_GET`` from self. + Thanks to Bart Janssens for reporting. +- Fix an issue of excessive compiler warning messages from mpi.h + when using newer C++ compilers. Thanks to @Shadow-fax for + reporting. +- Fix a problem when building Open MPI using clang 5.0. +- Fix a problem with ``MPI_WIN_CREATE`` when using UCX. Thanks + to Adam Simpson for reporting. +- Fix a memory leak encountered for certain MPI datatype + destructor operations. Thanks to Axel Huebl for reporting. +- Fix several problems with MPI RMA accumulate operations. + Thanks to Jeff Hammond for reporting. +- Fix possible race condition in closing some file descriptors + during job launch using mpirun. Thanks to Jason Williams + for reporting and providing a fix. +- Fix a problem in OMPIO for large individual write operations. + Thanks to Axel Huebl for reporting. +- Fix a problem with parsing of map-by ppr options to mpirun. + Thanks to David Rich for reporting. +- Fix a problem observed when using the mpool hugepage component. Thanks + to Hunter Easterday for reporting and fixing. +- Fix valgrind warning generated when invoking certain MPI Fortran + data type creation functions. Thanks to @rtoijala for reporting. +- Fix a problem when trying to build with a PMIX 3.1 or newer + release. Thanks to Alastair McKinstry for reporting. +- Fix a problem encountered with building MPI F08 module files. + Thanks to Igor Andriyash and Axel Huebl for reporting. +- Fix two memory leaks encountered for certain MPI-RMA usage patterns. + Thanks to Joseph Schuchart for reporting and fixing. +- Fix a problem with the ORTE ``rmaps_base_oversubscribe`` MCA paramater. + Thanks to @iassiour for reporting. +- Fix a problem with UCX PML default error handler for MPI communicators. + Thanks to Marcin Krotkiewski for reporting. +- Fix various issues with OMPIO uncovered by the testmpio test suite. + + +Open MPI version 4.0.0 +---------------------- +:Date: September, 2018 + +- OSHMEM updated to the OpenSHMEM 1.4 API. +- Do not build OpenSHMEM layer when there are no SPMLs available. + Currently, this means the OpenSHMEM layer will only build if + a MXM or UCX library is found. +- A UCX BTL was added for enhanced MPI RMA support using UCX +- With this release, OpenIB BTL now only supports iWarp and RoCE by default. +- Updated internal HWLOC to 2.0.2 +- Updated internal PMIx to 3.0.2 +- Change the priority for selecting external verses internal HWLOC + and PMIx packages to build. Starting with this release, configure + by default selects available external HWLOC and PMIx packages over + the internal ones. +- Updated internal ROMIO to 3.2.1. +- Removed support for the MXM MTL. +- Removed support for SCIF. +- Improved CUDA support when using UCX. +- Enable use of CUDA allocated buffers for OMPIO. +- Improved support for two phase MPI I/O operations when using OMPIO. +- Added support for Software-based Performance Counters, see + https://github.com/davideberius/ompi/wiki/How-to-Use-Software-Based-Performance-Counters-(SPCs)-in-Open-MPI +- Change MTL OFI from opting-IN on "psm,psm2,gni" to opting-OUT on + "shm,sockets,tcp,udp,rstream" +- Various improvements to MPI RMA performance when using RDMA + capable interconnects. +- Update memkind component to use the memkind 1.6 public API. +- Fix a problem with javadoc builds using OpenJDK 11. Thanks to + Siegmar Gross for reporting. +- Fix a memory leak using UCX. Thanks to Charles Taylor for reporting. +- Fix hangs in ``MPI_Finalize`` when using UCX. +- Fix a problem with building Open MPI using an external PMIx 2.1.2 + library. Thanks to Marcin Krotkiewski for reporting. +- Fix race conditions in Vader (shared memory) transport. +- Fix problems with use of newer map-by mpirun options. Thanks to + Tony Reina for reporting. +- Fix rank-by algorithms to properly rank by object and span +- Allow for running as root of two environment variables are set. + Requested by Axel Huebl. +- Fix a problem with building the Java bindings when using Java 10. + Thanks to Bryce Glover for reporting. +- Fix a problem with ORTE not reporting error messages if an application + terminated normally but exited with non-zero error code. Thanks to + Emre Brookes for reporting. diff --git a/docs/news/news-v4.1.x.rst b/docs/news/news-v4.1.x.rst new file mode 100644 index 00000000000..6364968e6e4 --- /dev/null +++ b/docs/news/news-v4.1.x.rst @@ -0,0 +1,213 @@ +Open MPI v4.1.x series +====================== + +This file contains all the NEWS updates for the Open MPI v4.1.x +series, in reverse chronological order. + +Open MPI version 4.1.2 +---------------------- +:Date: November, 2021 + +- ROMIO portability fix for OpenBSD +- Fix handling of ``MPI_IN_PLACE`` with ``MPI_ALLTOALLW`` and improve performance + of ``MPI_ALLTOALL`` and ``MPI_ALLTOALLV`` for ``MPI_IN_PLACE.`` +- Fix one-sided issue with empty groups in Post-Start-Wait-Complete + synchronization mode. +- Fix Fortran status returns in certain use cases involving + Generalized Requests +- Romio datatype bug fixes. +- Fix ``oshmem_shmem_finalize()`` when ``main()`` returns non-zero value. +- Fix wrong affinity under LSF with the membind option. +- Fix ``count==0`` cases in ``MPI_REDUCE`` and ``MPI_IREDUCE.`` +- Fix ssh launching on Bourne-flavored shells when the user has ``set -u`` + set in their shell startup files. +- Correctly process 0 slots with the ``mpirun --host`` option. +- Ensure to unlink and rebind socket when the Open MPI session + directory already exists. +- Fix a segv in ``mpirun --disable-dissable-map``. +- Fix a potential hang in the memory hook handling. +- Slight performance improvement in ``MPI_WAITALL`` when running in + ``MPI_THREAD_MULTIPLE``. +- Fix hcoll datatype mapping and rooted operation behavior. +- Correct some operations modifying ``MPI_Status``. ``MPI_ERROR`` when it is + disallowed by the MPI standard. +- UCX updates: + + - Fix datatype reference count issues. + - Detach dynamic window memory when freeing a window. + - Fix memory leak in datatype handling. + +- Fix various atomic operations issues. +- mpirun: try to set the curses winsize to the pty of the spawned + task. Thanks to Stack Overflow user @Seriously for reporting the + issue. +- PMIx updates: + + - Fix compatibility with external PMIx v4.x installations. + - Fix handling of PMIx v3.x compiler/linker flags. Thanks to Erik + Schnetter for reporting the issue. + - Skip SLURM-provided PMIx detection when appropriate. Thanks to + Alexander Grund for reporting the issue. + +- Fix handling by C++ compilers when they #include the STL "" + header file, which ends up including Open MPI's text VERSION file + (which is not C code). Thanks to @srpgilles for reporting the + issue. +- Fix ``MPI_Op`` support for ``MPI_LONG``. +- Make the MPI C++ bindings library (libmpi_cxx) explicitly depend on + the OPAL internal library (libopen-pal). Thanks to Ye Luo for + reporting the issue. +- Fix configure handling of ``--with-libevent=/usr``. +- Fix memory leak when opening Lustre files. Thanks to Bert Wesarg + for submitting the fix. +- Fix ``MPI_SENDRECV_REPLACE`` to correctly process datatype errors. + Thanks to Lisandro Dalcin for reporting the issue. +- Fix ``MPI_SENDRECV_REPLACE`` to correctly handle large data. Thanks + Jakub Benda for reporting this issue and suggesting a fix. +- Add workaround for TCP "dropped connection" errors to drastically + reduce the possibility of this happening. +- OMPIO updates: + + - Fix handling when AMODE is not set. Thanks to Rainer Keller for + reporting the issue and supplying the fix. + - Fix FBTL "posix" component linking issue. Thanks for Honggang Li + for reporting the issue. + - Fixed segv with ``MPI_FILE_GET_BYTE_OFFSET`` on 0-sized file view. + - Thanks to GitHub user @shanedsnyder for submitting the issue. + +- OFI updates: + + - Multi-plane / Multi-Nic nic selection cleanups + - Add support for exporting Open MPI memory monitors into + Libfabric. + - Ensure that Cisco usNIC devices are never selected by the OFI + MTL. + - Fix buffer overflow in OFI networking setup. Thanks to Alexander + Grund for reporting the issue and supplying the fix. + +- Fix SSEND on tag matching networks. +- Fix error handling in several MPI collectives. +- Fix the ordering of ``MPI_COMM_SPLIT_TYPE``. Thanks to Wolfgang + Bangerth for raising the issue. +- No longer install the orted-mpir library (it's an internal / Libtool + convenience library). Thanks to Andrew Hesford for the fix. +- PSM2 updates: + + - Allow advanced users to disable PSM2 version checking. + - Fix to allow non-default installation locations of psm2.h. + +Open MPI version 4.1.1 +---------------------- +:Date: April, 2021 + +- Fix a number of datatype issues, including an issue with + improper handling of partial datatypes that could lead to + an unexpected application failure. +- Change UCX PML to not warn about MPI_Request leaks during + ``MPI_Finalize()`` by default. The old behavior can be restored with + the mca_pml_ucx_request_leak_check MCA parameter. +- Reverted temporary solution that worked around launch issues in + SLURM v20.11.{0,1,2}. SchedMD encourages users to avoid these + versions and to upgrade to v20.11.3 or newer. +- Updated PMIx to v3.2.2. +- Fixed configuration issue on Apple Silicon observed with + Homebrew. Thanks to François-Xavier Coudert for reporting the issue. +- Disabled gcc built-in atomics by default on aarch64 platforms. +- Disabled UCX PML when UCX v1.8.0 is detected. UCX version 1.8.0 has a bug that + may cause data corruption when its TCP transport is used in conjunction with + the shared memory transport. UCX versions prior to v1.8.0 are not affected by + this issue. Thanks to @ksiazekm for reporting the issue. +- Fixed detection of available UCX transports/devices to better inform PML + prioritization. +- Fixed SLURM support to mark ORTE daemons as non-MPI tasks. +- Improved AVX detection to more accurately detect supported + platforms. Also improved the generated AVX code, and switched to + using word-based MCA params for the op/avx component (vs. numeric + big flags). +- Improved OFI compatibility support and fixed memory leaks in error + handling paths. +- Improved HAN collectives with support for Barrier and Scatter. Thanks + to @EmmanuelBRELLE for these changes and the relevant bug fixes. +- Fixed MPI debugger support (i.e., the ``MPIR_Breakpoint()`` symbol). + Thanks to @louisespellacy-arm for reporting the issue. +- Fixed ORTE bug that prevented debuggers from reading MPIR_Proctable. +- Removed PML uniformity check from the UCX PML to address performance + regression. +- Fixed ``MPI_Init_thread(3)`` statement about C++ binding and update + references about ``MPI_THREAD_MULTIPLE.`` Thanks to Andreas Lösel for + bringing the outdated docs to our attention. +- Added ``fence_nb`` to Flux PMIx support to address segmentation faults. +- Ensured progress of AIO requests in the POSIX FBTL component to + prevent exceeding maximum number of pending requests on MacOS. +- Used OPAL's mutli-thread support in the orted to leverage atomic + operations for object refcounting. +- Fixed segv when launching with static TCP ports. +- Fixed ``--debug-daemons`` mpirun CLI option. +- Fixed bug where mpirun did not honor ``--host`` in a managed job + allocation. +- Made a managed allocation filter a hostfile/hostlist. +- Fixed bug to marked a generalized request as pending once initiated. +- Fixed external PMIx v4.x check. +- Fixed OSHMEM build with ``--enable-mem-debug``. +- Fixed a performance regression observed with older versions of GCC when + ``__ATOMIC_SEQ_CST`` is used. Thanks to @BiplabRaut for reporting the issue. +- Fixed buffer allocation bug in the binomial tree scatter algorithm when + non-contiguous datatypes are used. Thanks to @sadcat11 for reporting the issue. +- Fixed bugs related to the accumulate and atomics functionality in the + osc/rdma component. +- Fixed race condition in MPI group operations observed with + ``MPI_THREAD_MULTIPLE`` threading level. +- Fixed a deadlock in the TCP BTL's connection matching logic. +- Fixed pml/ob1 compilation error when CUDA support is enabled. +- Fixed a build issue with Lustre caused by unnecessary header includes. +- Fixed a build issue with IMB LSF workload manager. +- Fixed linker error with UCX SPML. + + +Open MPI version 4.1.0 +---------------------- +:Date: December, 2020 + +- collectives: Add HAN and ADAPT adaptive collectives components. + Both components are off by default and can be enabled by specifying + ``mpirun --mca coll_adapt_priority 100 --mca coll_han_priority 100 ...``. + We intend to enable both by default in Open MPI 5.0. +- OMPIO is now the default for MPI-IO on all filesystems, including + Lustre (prior to this, ROMIO was the default for Lustre). Many + thanks to Mark Dixon for identifying MPI I/O issues and providing + access to Lustre systems for testing. +- Updates for macOS Big Sur. Thanks to FX Coudert for reporting this + issue and pointing to a solution. +- Minor MPI one-sided RDMA performance improvements. +- Fix hcoll ``MPI_SCATTERV`` with ``MPI_IN_PLACE``. +- Add AVX support for MPI collectives. +- Updates to mpirun(1) about "slots" and PE=x values. +- Fix buffer allocation for large environment variables. Thanks to + @zrss for reporting the issue. +- Upgrade the embedded OpenPMIx to v3.2.2. +- Take more steps towards creating fully Reproducible builds (see + https://reproducible-builds.org/). Thanks Bernhard M. Wiedemann for + bringing this to our attention. +- Fix issue with extra-long values in MCA files. Thanks to GitHub + user @zrss for bringing the issue to our attention. +- UCX: Fix zero-sized datatype transfers. +- Fix ``--cpu-list`` for non-uniform modes. +- Fix issue in PMIx callback caused by missing memory barrier on Arm platforms. +- OFI MTL: Various bug fixes. +- Fixed issue where ``MPI_TYPE_CREATE_RESIZED`` would create a datatype + with unexpected extent on oddly-aligned datatypes. +- collectives: Adjust default tuning thresholds for many collective + algorithms +- runtime: fix situation where rank-by argument does not work +- Portals4: Clean up error handling corner cases +- runtime: Remove ``--enable-install-libpmix`` option, which has not + worked since it was added +- opal: Disable memory patcher component on MacOS +- UCX: Allow UCX 1.8 to be used with the btl uct +- UCX: Replace usage of the deprecated NB API of UCX with NBX +- OMPIO: Add support for the IME file system +- OFI/libfabric: Added support for multiple NICs +- OFI/libfabric: Added support for Scalable Endpoints +- OFI/libfabric: Added btl for one-sided support +- OFI/libfabric: Multiple small bugfixes +- libnbc: Adding numerous performance-improving algorithms diff --git a/docs/openmpi_logo.png b/docs/openmpi_logo.png new file mode 100644 index 00000000000..16a2e0cbab2 Binary files /dev/null and b/docs/openmpi_logo.png differ diff --git a/docs/quickstart.rst b/docs/quickstart.rst new file mode 100644 index 00000000000..ccecb9907e5 --- /dev/null +++ b/docs/quickstart.rst @@ -0,0 +1,20 @@ +.. _label-quickstart: + +Quick start +=========== + +There are three general phases of using Open MPI: installing Open MPI, +building MPI applications, and running MPI applications. + +The links below take you to "quick start" sections at the beginning of +each chapter. These "quick start" sections provide a good starting +point, and may be all that many users need to read. However, if the +"quick start" sections are not enough, the remainder of each chapter +provides significantly more detailed information. + +#. :doc:`Quick start: Installing Open MPI + ` +#. :doc:`Quick start: Building MPI applications + ` +#. :doc:`Quick start: Running MPI applications + ` diff --git a/docs/release-notes/compilers.rst b/docs/release-notes/compilers.rst new file mode 100644 index 00000000000..23f5061fa9e --- /dev/null +++ b/docs/release-notes/compilers.rst @@ -0,0 +1,324 @@ +.. _compiler-notes-section-label: + +Compiler Notes +============== + +* Open MPI requires a C99-capable compiler to build. + +* On platforms other than x86-64, AArc64 (64-bit ARM), and PPC, Open + MPI requires a compiler that either supports C11 atomics or the GCC + ``__atomic`` atomics (e.g., GCC >= v4.8.x). + +* 32-bit platforms are only supported with a recent compiler that + supports C11 atomics. This includes GCC 4.9.x+ (although GCC 6.x or + newer is recommened), the Intel compiler suite 16, and clang 3.1. + +* Mixing compilers from different vendors when building Open MPI + (e.g., using the C/C++ compiler from one vendor and the Fortran + compiler from a different vendor) has been successfully employed by + some Open MPI users (discussed on the Open MPI user's mailing list), + but such configurations are not tested and not documented. For + example, such configurations may require additional compiler / + linker flags to make Open MPI build properly. + + A not-uncommon case for this is when building on MacOS with the + system-default GCC compiler (i.e., ``/usr/bin/gcc``), but a 3rd party + gfortran (e.g., provided by Homebrew, in ``/usr/local/bin/gfortran``). + Since these compilers are provided by different organizations, they + have different default search paths. For example, if Homebrew has + also installed a local copy of Libevent (a 3rd party package that + Open MPI requires), the MacOS-default ``gcc`` linker will find it + without any additional command line flags, but the Homebrew-provided + gfortran linker will not. In this case, it may be necessary to + provide the following on the configure command line: + + .. code-block:: sh + + shell$ ./configure FCFLAGS=-L/usr/local/lib ... + + This ``-L`` flag will then be passed to the Fortran linker when + creating Open MPI's Fortran libraries, and it will therefore be able + to find the installed Libevent. + +* In general, the latest versions of compilers of a given vendor's + series have the least bugs. We have seen cases where Vendor XYZ's + compiler version A.B fails to compile Open MPI, but version A.C + (where C>B) works just fine. If you run into a compile failure, you + might want to double check that you have the latest bug fixes and + patches for your compiler. + +* Users have reported issues with older versions of the Fortran PGI + compiler suite when using Open MPI's (non-default) ``--enable-debug`` + configure option. Per the above advice of using the most recent + version of a compiler series, the Open MPI team recommends using the + latest version of the PGI suite, and/or not using the ``--enable-debug`` + configure option. If it helps, here's what we have found with some + (not comprehensive) testing of various versions of the PGI compiler + suite: + + * pgi-8 : NO known good version with ``--enable-debug`` + * pgi-9 : 9.0-4 known GOOD + * pgi-10: 10.0-0 known GOOD + * pgi-11: NO known good version with ``--enable-debug`` + * pgi-12: 12.10 known BAD with ``-m32``, but known GOOD without ``-m32`` + (and 12.8 and 12.9 both known BAD with ``--enable-debug``) + * pgi-13: 13.9 known BAD with ``-m32``, 13.10 known GOOD without ``-m32`` + * pgi-15: 15.10 known BAD with ``-m32`` + +* Similarly, there is a known Fortran PGI compiler issue with long + source directory path names that was resolved in 9.0-4 (9.0-3 is + known to be broken in this regard). + +* Open MPI does not support the PGI compiler suite on OS X or MacOS. + See issues below for more details: + + * https://github.com/open-mpi/ompi/issues/2604 + * https://github.com/open-mpi/ompi/issues/2605 + +* OpenSHMEM Fortran bindings do not support the "no underscore" + Fortran symbol convention. IBM's ``xlf`` compilers build in that mode + by default. As such, IBM's ``xlf`` compilers cannot build/link the + OpenSHMEM Fortran bindings by default. A workaround is to pass + ``FC="xlf -qextname"`` at configure time to force a trailing + underscore. See https://github.com/open-mpi/ompi/issues/3612 for + more details. + +* MPI applications that use the ``mpi_f08`` module on PowerPC platforms + (tested ppc64le) will likely experience runtime failures if: + + * they are using a GNU linker (ld) version after v2.25.1 and before + v2.28, + *and* + * they compiled with PGI (tested 17.5) or XL (tested v15.1.5) + compilers. This was noticed on Ubuntu 16.04 which uses the + 2.26.1 version of ``ld`` by default. However, this issue impacts + any OS using a version of ``ld`` noted above. This GNU linker + regression will be fixed in version 2.28. `Here is a link to the + GNU bug on this issue + `_. The + XL compiler will include a fix for this issue in a future + release. + +* On NetBSD-6 (at least AMD64 and i386), and possibly on OpenBSD, + Libtool misidentifies properties of f95/g95, leading to obscure + compile-time failures if used to build Open MPI. You can work + around this issue by ensuring that libtool will not use f95/g95 + (e.g., by specifying ``FC=``, or otherwise ensuring + a different Fortran compiler will be found earlier in the path than + ``f95``/``g95``), or by disabling the Fortran MPI bindings with + ``--disable-mpi-fortran``. + +* On OpenBSD/i386, if you configure with + ``--enable-mca-no-build=patcher``, you will also need to add + ``--disable-dlopen``. Otherwise, odd crashes can occur + nondeterministically. + +* Absoft 11.5.2 plus a service pack from September 2012 (which Absoft + says is available upon request), or a version later than 11.5.2 + (e.g., 11.5.3), is required to compile the Fortran ``mpi_f08`` + module. + +* Open MPI does not support the Sparc v8 CPU target. However, + as of Solaris Studio 12.1, and later compilers, one should not + specify ``-xarch=v8plus`` or ``-xarch=v9``. The use of the options + ``-m32`` and ``-m64`` for producing 32 and 64 bit targets, respectively, + are now preferred by the Solaris Studio compilers. GCC may + require either ``-m32`` or ``-mcpu=v9 -m32``, depending on GCC version. + +* If one tries to build OMPI on Ubuntu with Solaris Studio using the C++ + compiler and the ``-m32`` option, you might see a warning: + + .. code-block:: + + CC: Warning: failed to detect system linker version, falling back to custom linker usage + + And the build will fail. One can overcome this error by either + setting ``LD_LIBRARY_PATH`` to the location of the 32 bit libraries + (most likely ``/lib32``), or giving ``LDFLAGS="-L/lib32 -R/lib32"`` + to the ``configure`` command. Officially, Solaris Studio is not + supported on Ubuntu Linux distributions, so additional problems + might occur. + +* Open MPI does not support the ``gccfss`` compiler (GCC For SPARC + Systems; a now-defunct compiler project from Sun). + +* At least some versions of the Intel 8.1 compiler seg fault while + compiling certain Open MPI source code files. As such, it is not + supported. + +* It has been reported that the Intel 9.1 and 10.0 compilers fail to + compile Open MPI on IA64 platforms. As of 12 Sep 2012, there is + very little (if any) testing performed on IA64 platforms (with any + compiler). Support is "best effort" for these platforms, but it is + doubtful that any effort will be expended to fix the Intel 9.1 / + 10.0 compiler issuers on this platform. + +* Early versions of the Intel 12.1 Linux compiler suite on x86_64 seem + to have a bug that prevents Open MPI from working. Symptoms + including immediate segv of the wrapper compilers (e.g., ``mpicc``) and + MPI applications. As of 1 Feb 2012, if you upgrade to the latest + version of the Intel 12.1 Linux compiler suite, the problem will go + away. + +* `Users have reported + `_ that the Intel + Fortran compiler will fail to link Fortran-based MPI applications on + macOS with linker errors similar to this: + + .. code-block:: text + + Undefined symbols for architecture x86_64: + "_ompi_buffer_detach_f08", referenced from: + import-atom in libmpi_usempif08.dylib + ld: symbol(s) not found for architecture x86_64 + + It appears that setting the environment variable + ``lt_cx_ld_force_load=no`` before invoking Open MPI's ``configure`` + script works around the issue. For example: + + .. code-block:: sh + + shell$ lt_cv_ld_force_load=no ./configure ... + +* The Portland Group compilers prior to version 7.0 require the + ``-Msignextend`` compiler flag to extend the sign bit when converting + from a shorter to longer integer. This is is different than other + compilers (such as GNU). When compiling Open MPI with the Portland + compiler suite, the following flags should be passed to Open MPI's + ``configure`` script: + + .. code-block:: sh + + shell$ ./configure CFLAGS=-Msignextend CXXFLAGS=-Msignextend \ + --with-wrapper-cflags=-Msignextend \ + --with-wrapper-cxxflags=-Msignextend ... + + This will both compile Open MPI with the proper compile flags and + also automatically add ``-Msignextend`` when the C and C++ MPI wrapper + compilers are used to compile user MPI applications. + +* It has been reported that Pathscale 5.0.5 and 6.0.527 compilers + give an internal compiler error when trying to build Open MPI. + +* As of July 2017, the Pathscale compiler suite apparently has no + further commercial support, and it does not look like there will be + further releases. Any issues discovered regarding building / + running Open MPI with the Pathscale compiler suite therefore may not + be able to be resolved. + +* Using the Absoft compiler to build the MPI Fortran bindings on Suse + 9.3 is known to fail due to a Libtool compatibility issue. + +* There is now only a single Fortran MPI wrapper compiler and a + single Fortran OpenSHMEM wrapper compiler: ``mpifort`` and ``oshfort``, + respectively. + + .. caution:: The legacy executable names ``mpif77`` and ``mpif90`` + still exist, but they are symbolic links to + ``mpifort``. Users should immediately stop using the + legacy names, and should always use ``mpifort``. + + Similarly, Open MPI's ``configure`` script only recognizes the ``FC`` + and ``FCFLAGS`` environment variables (to specify the Fortran + compiler and compiler flags, respectively). The ``F77`` and ``FFLAGS`` + environment variables are **IGNORED**. + + .. important:: As a direct result, it is **STRONGLY** recommended + that you specify a Fortran compiler that uses file suffixes to + determine Fortran code layout (e.g., free form vs. fixed). For + example, with some versions of the IBM XLF compiler, it is + preferable to use ``FC=xlf`` instead of ``FC=xlf90``, because + ``xlf`` will automatically determine the difference between free + form and fixed Fortran source code. + + However, many Fortran compilers allow specifying additional + command-line arguments to indicate which Fortran dialect to use. + For example, if ``FC=xlf90``, you may need to use ``mpifort --qfixed ...`` + to compile fixed format Fortran source files. + + You can use either ``ompi_info`` or ``oshmem_info`` to see with which + Fortran compiler Open MPI was configured and compiled. + + There are up to three sets of Fortran MPI bindings that may be + provided (depending on your Fortran compiler): + + #. ``mpif.h``: This is the first MPI Fortran interface that was + defined in MPI-1. It is a file that is included in Fortran + source code. The only interface declared in Open MPI's + ``mpif.h`` is ``MPI_SIZEOF`` (because of its polymorphism). All + other interfaces are implicit. + + #. ``mpi`` module: The ``mpi`` module file was added in MPI-2. It + provides strong compile-time parameter type checking for MPI all + interfaces. + + #. ``mpi_f08`` module: The ``mpi_f08`` module was added in MPI-3. + It provides many advantages over the ``mpif.h`` file and ``mpi`` + module. For example, MPI handles have distinct types (vs. all + being integers). See the `MPI-3.0 (or later) standard + `_ for more details. + + .. important:: The ``mpi_f08`` module is **STRONGLY** recommended + for all new MPI Fortran subroutines and applications. Note that + the ``mpi_f08`` module can be used in conjunction with the other + two Fortran MPI bindings in the same application (only one + binding can be used per subroutine/function, however). Full + interoperability between ``mpif.h``/``mpi`` module and + ``mpi_f08`` module MPI handle types is provided, allowing + ``mpi_f08`` to be used in new subroutines in legacy MPI + applications. + + Per the OpenSHMEM specification, there is only one Fortran OpenSHMEM + binding provided: + + * ``shmem.fh``: All Fortran OpenSHMEM programs should include + ``shmem.f``, and Fortran OpenSHMEM programs that use constants + defined by OpenSHMEM **MUST** include ``shmem.fh``. + + The following notes apply to the above-listed Fortran bindings: + + * All Fortran compilers support the ``mpif.h``/``shmem.fh``-based + bindings, with one exception: the ``MPI_SIZEOF`` interfaces will + only be present when Open MPI is built with a Fortran compiler + that supports the ``INTERFACE`` keyword and ``ISO_FORTRAN_ENV``. Most + notably, this excludes the GNU Fortran compiler suite before + version 4.9. + + * The level of support provided by the ``mpi`` module is based on your + Fortran compiler. + + If Open MPI is built with a non-GNU Fortran compiler, or if Open + MPI is built with the GNU Fortran compiler >= v4.9, all MPI + subroutines will be prototyped in the ``mpi`` module. All calls to + MPI subroutines will therefore have their parameter types checked + at compile time. + + If Open MPI is built with an old ``gfortran`` (i.e., < v4.9), a + limited ``mpi`` module will be built. Due to the limitations of + these compilers, and per guidance from the MPI-3.0 (and later) + specification, all MPI subroutines with "choice" buffers are + specifically *not* included in the ``mpi`` module, and their + parameters will not be checked at compile time. Specifically, all + MPI subroutines with no "choice" buffers are prototyped and will + receive strong parameter type checking at run-time (e.g., + ``MPI_INIT``, ``MPI_COMM_RANK``, etc.). + + Similar to the ``mpif.h`` interface, ``MPI_SIZEOF`` is only + supported on Fortran compilers that support ``INTERFACE`` and + ``ISO_FORTRAN_ENV``. + + * The ``mpi_f08`` module has been tested with the Intel Fortran + compiler and gfortran >= 4.9. Other modern Fortran compilers + likely also work. + + Many older Fortran compilers do not provide enough modern Fortran + features to support the ``mpi_f08`` module. For example, ``gfortran`` + < v4.9 does provide enough support for the ``mpi_f08`` module. + + You can examine the output of the following command to see all + the Fortran features that are/are not enabled in your Open MPI + installation: + + .. code-block:: sh + + shell$ ompi_info | grep -i fort diff --git a/docs/release-notes/extensions.rst b/docs/release-notes/extensions.rst new file mode 100644 index 00000000000..36021c427d3 --- /dev/null +++ b/docs/release-notes/extensions.rst @@ -0,0 +1,8 @@ +Open MPI Extensions +=================== + +An MPI "extensions" framework is included in Open MPI, providing +non-standardized / Open MPI-specific extensions for MPI applications. + +:doc:`See the Open MPI-specific features ` section +for more information on compiling and using MPI extensions. diff --git a/docs/release-notes/general.rst b/docs/release-notes/general.rst new file mode 100644 index 00000000000..bbb6090d7de --- /dev/null +++ b/docs/release-notes/general.rst @@ -0,0 +1,59 @@ +General notes +============= + +The following list of release notes applies to this code base as of +this writing: + +* Open MPI now includes two public software layers: MPI and OpenSHMEM. + Throughout this document, references to Open MPI implicitly include + both of these layers. When distinction between these two layers is + necessary, we will reference them as the "MPI" and "OpenSHMEM" + layers respectively. + +* OpenSHMEM is a collaborative effort between academia, industry, and + the U.S. Government to create a specification for a standardized API + for parallel programming in the Partitioned Global Address Space + (PGAS). For more information about the OpenSHMEM project, including + access to the current OpenSHMEM specification, please visit + http://openshmem.org/. + + .. note:: This OpenSHMEM implementation will only work in Linux + environments with a restricted set of supported networks. + +* Open MPI includes support for a wide variety of supplemental + hardware and software package. When configuring Open MPI, you may + need to supply additional flags to the ``configure`` script in order + to tell Open MPI where the header files, libraries, and any other + required files are located. As such, running ``configure`` by itself + may not include support for all the devices (etc.) that you expect, + especially if their support headers / libraries are installed in + non-standard locations. Network interconnects are an easy example + to discuss -- Libfabric and OpenFabrics networks, for example, both + have supplemental headers and libraries that must be found before + Open MPI can build support for them. You must specify where these + files are with the appropriate options to configure. See the + listing of configure command-line switches, below, for more details. + +* The majority of Open MPI's documentation is here in this document. + The man pages are also installed by default. + +* Note that Open MPI documentation uses the word "component" + frequently; the word "plugin" is probably more familiar to most + users. As such, end users can probably completely substitute the + word "plugin" wherever you see "component" in our documentation. + For what it's worth, we use the word "component" for historical + reasons, mainly because it is part of our acronyms and internal API + function calls. + +* Open MPI has taken some steps towards `Reproducible Builds + `_. Specifically, Open MPI's + ``configure`` and ``make`` process, by default, records the build date + and some system-specific information such as the hostname where Open + MPI was built and the username who built it. If you desire a + Reproducible Build, set the ``$SOURCE_DATE_EPOCH``, ``$USER`` and + ``$HOSTNAME`` environment variables before invoking ``configure`` and + ``make``, and Open MPI will use those values instead of invoking + ``whoami`` and/or ``hostname``, respectively. See + https://reproducible-builds.org/docs/source-date-epoch/ for + information on the expected format and content of the + ``$SOURCE_DATE_EPOCH`` variable. diff --git a/docs/release-notes/index.rst b/docs/release-notes/index.rst new file mode 100644 index 00000000000..11c002b39b0 --- /dev/null +++ b/docs/release-notes/index.rst @@ -0,0 +1,16 @@ +Release notes +============= + +.. toctree:: + :maxdepth: 1 + + general + platform + compilers + run-time + mpi + openshmem + mpi-collectives + openshmem-collectives + networks + extensions diff --git a/docs/release-notes/mpi-collectives.rst b/docs/release-notes/mpi-collectives.rst new file mode 100644 index 00000000000..4a723a74f58 --- /dev/null +++ b/docs/release-notes/mpi-collectives.rst @@ -0,0 +1,9 @@ +MPI Collectives +=============== + +* The ``cuda`` coll component provides CUDA-aware support for the + reduction type collectives with GPU buffers. This component is only + compiled into the library when the library has been configured with + CUDA-aware support. It intercepts calls to the reduction + collectives, copies the data to staging buffers if GPU buffers, then + calls underlying collectives to do the work. diff --git a/docs/release-notes/mpi.rst b/docs/release-notes/mpi.rst new file mode 100644 index 00000000000..1fd834c7595 --- /dev/null +++ b/docs/release-notes/mpi.rst @@ -0,0 +1,162 @@ +MPI Functionality and Features +============================== + +MPI Standard conformance +------------------------ + +In the Open MPI |ompi_series| series, all MPI-3.1 functionality is +supported. Some MPI-4.0 functionality is supported, but not all of +it. + +As such, ``MPI_VERSION`` is set to 3 and ``MPI_SUBVERSION`` is set +to 1. + +For historical reference: + +.. list-table:: + :header-rows: 1 + + * - MPI standards conformance + - Introduced in Open MPI version + + * - MPI-2.0 + - Open MPI v1.2 + + * - MPI-2.1 + - Open MPI v1.3 + + * - MPI-3.0 + - Open MPI v1.8 + + * - MPI-3.1 + - Open MPI v2.0 + +Removed MPI APIs +---------------- + +Note that starting with Open MPI v4.0.0, prototypes for several +legacy MPI-1 symbols that were deleted in the MPI-3.0 specification +are no longer available by default in ``mpi.h``. Specifically, +several MPI-1 symbols were deprecated in the 1996 publishing of the +MPI-2.0 specification. These deprecated symbols were eventually +removed from the MPI-3.0 specification in +2012. + +The symbols that now no longer appear by default in Open MPI's +``mpi.h`` are: + +* ``MPI_Address`` (replaced by ``MPI_Get_address``) +* ``MPI_Errhandler_create`` (replaced by ``MPI_Comm_create_errhandler``) +* ``MPI_Errhandler_get`` (replaced by ``MPI_Comm_get_errhandler``) +* ``MPI_Errhandler_set`` (replaced by ``MPI_Comm_set_errhandler``) +* ``MPI_Type_extent`` (replaced by ``MPI_Type_get_extent``) +* ``MPI_Type_hindexed`` (replaced by ``MPI_Type_create_hindexed``) +* ``MPI_Type_hvector`` (replaced by ``MPI_Type_create_hvector``) +* ``MPI_Type_lb`` (replaced by ``MPI_Type_get_extent``) +* ``MPI_Type_struct`` (replaced by ``MPI_Type_create_struct``) +* ``MPI_Type_ub`` (replaced by ``MPI_Type_get_extent``) +* ``MPI_LB`` (replaced by ``MPI_Type_create_resized``) +* ``MPI_UB`` (replaced by ``MPI_Type_create_resized``) +* ``MPI_COMBINER_HINDEXED_INTEGER`` +* ``MPI_COMBINER_HVECTOR_INTEGER`` +* ``MPI_COMBINER_STRUCT_INTEGER`` +* ``MPI_Handler_function`` (replaced by ``MPI_Comm_errhandler_function``) + +Although these symbols are no longer prototyped in ``mpi.h``, they +are still present in the MPI library in Open MPI |ompi_series|. This +enables legacy MPI applications to link and run successfully with +Open MPI |ompi_series|, even though they will fail to compile. + +.. warning:: Future releases of Open MPI beyond the |ompi_series| + series may remove these symbols altogether. + +.. warning:: The Open MPI team **STRONGLY** encourages all MPI + application developers to stop using these constructs that were + first deprecated over 20 years ago, and finally removed from the + MPI specification in MPI-3.0 (in 2012). + +.. important:: :ref:`The "Removed MPI constructs" section + ` contains examples of how to update + legacy MPI applications using these deleted symbols to use the + "new" symbols. + +All that being said, if you are unable to immediately update your +application to stop using these legacy MPI-1 symbols, you can +re-enable them in ``mpi.h`` by configuring Open MPI with the +``--enable-mpi1-compatibility`` flag. + +Other MPI features +------------------ + +* Rank reordering support is available using the TreeMatch library. It + is activated for the graph and ``dist_graph`` communicator topologies. + +* When using MPI deprecated functions, some compilers will emit + warnings. For example: + + .. code-block:: + + shell$ cat deprecated_example.c + #include + void foo(void) { + MPI_Datatype type; + MPI_Type_struct(1, NULL, NULL, NULL, &type); + } + shell$ mpicc -c deprecated_example.c + deprecated_example.c: In function 'foo': + deprecated_example.c:4: warning: 'MPI_Type_struct' is deprecated (declared at /opt/openmpi/include/mpi.h:1522) + shell$ + +* ``MPI_THREAD_MULTIPLE`` is supported with some exceptions. + + The following PMLs support ``MPI_THREAD_MULTIPLE``: + + #. ``cm``, when used with the following MTLs: + + #. ``ofi`` (Libfabric) + #. ``portals4`` + + #. ``ob1``, when used with the following BTLs: + + #. ``self`` + #. ``sm`` + #. ``smcuda`` + #. ``tcp`` + #. ``ugni`` + #. ``usnic`` + + #. ``ucx`` + + Currently, MPI File operations are not thread safe even if MPI is + initialized for ``MPI_THREAD_MULTIPLE`` support. + +* ``MPI_REAL16`` and ``MPI_COMPLEX32`` are only supported on platforms + where a portable C datatype can be found that matches the Fortran + type ``REAL*16``, both in size and bit representation. + +* The "libompitrace" library is bundled in Open MPI and is installed + by default (it can be disabled via the ``--disable-libompitrace`` + flag). This library provides a simplistic tracing of select MPI + function calls via the MPI profiling interface. Linking it in to + your application via (e.g., via ``-lompitrace``) will automatically + output to stderr when some MPI functions are invoked: + + .. code-block:: + + shell$ cd examples/ + shell$ mpicc hello_c.c -o hello_c -lompitrace + shell$ mpirun -np 1 hello_c + MPI_INIT: argc 1 + Hello, world, I am 0 of 1 + MPI_BARRIER[0]: comm MPI_COMM_WORLD + MPI_FINALIZE[0] + shell$ + + Keep in mind that the output from the trace library is going to + ``stderr``, so it may output in a slightly different order than the + ``stdout`` from your application. + + This library is being offered as a "proof of concept" / convenience + from Open MPI. If there is interest, it is trivially easy to extend + it to printf for other MPI functions. Pull requests on github.com + would be greatly appreciated. diff --git a/docs/release-notes/networks.rst b/docs/release-notes/networks.rst new file mode 100644 index 00000000000..15a108f8b46 --- /dev/null +++ b/docs/release-notes/networks.rst @@ -0,0 +1,128 @@ +Network Support +=============== + +Main network support models +--------------------------- + +There are multiple MPI network models available in this release: + +* ``ob1`` supports a variety of networks using BTL ("Byte Transfer + Layer") plugins that can be used in + combination with each other: + + * ``self``: Loopback (send-to-self) + * ``sm``: Shared memory, including single-copy technologies: + XPMEM, Linux CMA, as Linux KNEM, as well as traditional + copy-in/copy-out shared memory. + * ``tcp``: TCP + * ``smcuda``: CUDA-enabled shared memory + * ``usnic``: Cisco usNIC + * ``ugni``: uGNI (Cray Gemini, Aries) + +* ``cm`` supports a smaller number of networks (and they cannot be + used together), but may provide better overall MPI performance by + utilizing MTL ("Matching Transport Layer") plugins: + + * OpenFabrics Interfaces ("libfabric" tag matching) + * Intel Omni-Path PSM2 (version 11.2.173 or later) + * Intel True Scale PSM (QLogic InfiniPath) + * Portals 4 + +* ``ucx`` uses the `Unified Communication X (UCX) communication + library `_. This is an open-source + project developed in collaboration between industry, laboratories, + and academia to create an open-source production grade + communication framework for data centric and high-performance + applications. The UCX library can be downloaded from repositories + (e.g., Fedora/RedHat yum repositories). The UCX library is also + part of Mellanox OFED and Mellanox HPC-X binary distributions. + + UCX currently supports: + + * OpenFabrics Verbs (including InfiniBand and RoCE) + * Cray's uGNI + * TCP + * Shared memory + * NVIDIA CUDA drivers + +While users can manually select any of the above transports at run +time, Open MPI will select a default transport as follows: + +#. If InfiniBand devices are available, use the UCX PML. +#. If PSM, PSM2, or other tag-matching-supporting Libfabric + transport devices are available (e.g., Cray uGNI), use the ``cm`` + PML and a single appropriate corresponding ``mtl`` module. +#. Otherwise, use the ``ob1`` PML and one or more appropriate ``btl`` + modules. + +Users can override Open MPI's default selection algorithms and force +the use of a specific transport if desired by setting the ``pml`` MCA +parameter (and potentially the ``btl`` and/or ``mtl`` MCA parameters) at +run-time: + +.. code-block:: sh + + shell$ mpirun --mca pml ob1 --mca btl [comma-delimted-BTLs] ... + # or + shell$ mpirun --mca pml cm --mca mtl [MTL] ... + # or + shell$ mpirun --mca pml ucx ... + +There is a known issue when using UCX with very old Mellanox +Infiniband HCAs, in particular HCAs preceding the introduction of +the ConnectX product line, which can result in Open MPI crashing in +MPI_Finalize. This issue is addressed by UCX release 1.9.0 and +newer. + +Miscellaneous network notes +--------------------------- + +* The main OpenSHMEM network model is ``ucx``; it interfaces directly + with UCX. + +* In prior versions of Open MPI, InfiniBand and RoCE support was + provided through the ``openib`` BTL and ``ob1`` PML plugins. Starting + with Open MPI 4.0.0, InfiniBand support through the ``openib`` plugin + is both deprecated and superseded by the ``ucx`` PML component. The + ``openib`` BTL was removed in Open MPI v5.0.0. + + While the ``openib`` BTL depended on ``libibverbs``, the UCX PML depends + on the UCX library. + + Once installed, Open MPI can be built with UCX support by adding + ``--with-ucx`` to the Open MPI configure command. Once Open MPI is + configured to use UCX, the runtime will automatically select the + ``ucx`` PML if one of the supported networks is detected (e.g., + InfiniBand). It's possible to force using UCX in the ``mpirun`` or + ``oshrun`` command lines by specifying any or all of the following mca + parameters: ``--mca pml ucx`` for MPI point-to-point operations, + ``--mca spml ucx`` for OpenSHMEM support, and ``--mca osc ucx`` for MPI + RMA (one-sided) operations. + +* The ``usnic`` BTL is support for Cisco's usNIC device ("userspace NIC") + on Cisco UCS servers with the Virtualized Interface Card (VIC). + Although the usNIC is accessed via the OpenFabrics Libfabric API + stack, this BTL is specific to Cisco usNIC devices. + +* uGNI is a Cray library for communicating over the Gemini and Aries + interconnects. + +* Linux ``knem`` support is used when the ``sm`` (shared memory) BTL is + compiled with knem support (see the ``--with-knem`` configure option) + and the ``knem`` Linux module is loaded in the running kernel. If the + ``knem`` Linux kernel module is not loaded, the ``knem`` support is (by + default) silently deactivated during Open MPI jobs. + + See https://knem.gforge.inria.fr/ for details on Knem. + +* Linux Cross-Memory Attach (CMA) or XPMEM is used by the ``sm`` shared + memory BTL when the CMA/XPMEM libraries are installed, + respectively. Linux CMA and XPMEM are similar (but different) + mechanisms for Open MPI to utilize single-copy semantics for shared + memory. + +* The OFI MTL does not support sending messages larger than the active + Libfabric provider's ``max_msg_size``. If you receive an error + message about sending too large of a message when using the OFI MTL, + please reach out to your networking vendor to ask them to support a + larger ``max_msg_size`` for tagged messages. diff --git a/docs/release-notes/openshmem-collectives.rst b/docs/release-notes/openshmem-collectives.rst new file mode 100644 index 00000000000..38e491cdcb2 --- /dev/null +++ b/docs/release-notes/openshmem-collectives.rst @@ -0,0 +1,10 @@ +OpenSHMEM Collectives +===================== + +* The ``fca`` scoll component: the Mellanox Fabric Collective + Accelerator (FCA) is a solution for offloading collective operations + from the MPI process onto Mellanox QDR InfiniBand switch CPUs and + HCAs. + +* The ``basic`` scoll component: Reference implementation of all + OpenSHMEM collective operations. diff --git a/docs/release-notes/openshmem.rst b/docs/release-notes/openshmem.rst new file mode 100644 index 00000000000..46ace7d48e8 --- /dev/null +++ b/docs/release-notes/openshmem.rst @@ -0,0 +1,4 @@ +OpenSHMEM Functionality and Features +==================================== + +All OpenSHMEM-1.3 functionality is supported. diff --git a/docs/release-notes/platform.rst b/docs/release-notes/platform.rst new file mode 100644 index 00000000000..18b48b24eb0 --- /dev/null +++ b/docs/release-notes/platform.rst @@ -0,0 +1,42 @@ +.. _platform-notes-section-label: + +Platform Notes +============== + +.. error:: **TODO We should have a canonical list of:** + + * *required* 3rd-party package versions supported (PRRTE, hwloc, + libevent) + * back-end run-time systems supported (behind PRRTE) + * OS's and compilers supported + * network interconnects supported. + +* Systems that have been tested are: + + * Linux (various flavors/distros), 64 bit (x86, ppc, aarch64), + with gcc (>=4.8.x+), clang (>=3.6.0), Absoft (fortran), Intel, + and Portland (be sure to also see :ref:`the Compiler Notes + section `) + * macOS (10.14-10.15, 11.x, 12.x), 64 bit (x86_64) with XCode + compilers + +* Other systems have been lightly (but not fully) tested: + + * Linux (various flavors/distros), 32 bit, with gcc + * Cygwin 32 & 64 bit with gcc + * ARMv6, ARMv7, ARMv9 + * Other 64 bit platforms. + * OpenBSD. Requires configure options ``--enable-mca-no-build=patcher`` + and ``--disable-dlopen`` with this release. + * Problems have been reported when building Open MPI on FreeBSD 11.1 + using the clang-4.0 system compiler. A workaround is to build + Open MPI using the GNU compiler. + +* The run-time systems that are currently supported are: + + * ssh / rsh + * PBS Pro, Torque + * Platform LSF (tested with v9.1.1 and later) + * Slurm + * Cray XE, XC, and XK + * Oracle Grid Engine (OGE) 6.1, 6.2 and open source Grid Engine diff --git a/docs/release-notes/run-time.rst b/docs/release-notes/run-time.rst new file mode 100644 index 00000000000..a35f6273221 --- /dev/null +++ b/docs/release-notes/run-time.rst @@ -0,0 +1,34 @@ +General Run-Time Support Notes +============================== + +* The Open MPI installation must be in your ``PATH`` on all nodes (and + potentially ``LD_LIBRARY_PATH`` or ``DYLD_LIBRARY_PATH``, if + ``libmpi``/``libshmem`` is a shared library), unless using the + ``--prefix`` or ``--enable-mpirun-prefix-by-default`` functionality (see + below). + +* Open MPI's run-time behavior can be customized via Modular Component + Architecture (MCA) parameters (see below for more information on how + to get/set MCA parameter values). Some MCA parameters can be set in + a way that renders Open MPI inoperable (see notes about MCA + parameters later in this file). In particular, some parameters have + required options that must be included. + + .. error:: TODO Need a link in the above paragraph for how to set MCA + params. This might be in FAQ content that has not yet + been converted. + + * If specified, the ``btl`` parameter must include the ``self`` + component, or Open MPI will not be able to deliver messages to the + same rank as the sender. For example: ``mpirun --mca btl tcp,self + ...`` + * If specified, the ``btl_tcp_if_exclude`` parameter must include the + loopback device (``lo`` on many Linux platforms), or Open MPI will + not be able to route MPI messages using the TCP BTL. For example: + ``mpirun --mca btl_tcp_if_exclude lo,eth1 ...`` + +* Running on nodes with different endian and/or different datatype + sizes within a single parallel job is supported in this release. + However, Open MPI does not resize data when datatypes differ in size + (for example, sending a 4 byte ``MPI_DOUBLE`` and receiving an 8 byte + ``MPI_DOUBLE`` will fail). diff --git a/docs/requirements.txt b/docs/requirements.txt new file mode 100644 index 00000000000..80798525f3d --- /dev/null +++ b/docs/requirements.txt @@ -0,0 +1,4 @@ +sphinx>=4.2.0 +recommonmark +docutils +sphinx-rtd-theme diff --git a/docs/running-apps/gridengine.rst b/docs/running-apps/gridengine.rst new file mode 100644 index 00000000000..1412f472a67 --- /dev/null +++ b/docs/running-apps/gridengine.rst @@ -0,0 +1,290 @@ +Launching with Grid Engine +========================== + +Open MPI supports the family of run-time schedulers including the Sun +Grid Engine (SGE), Oracle Grid Engine (OGE), Grid Engine (GE), Son of +Grid Engine, and others. + +This documentation will collectively refer to all of them as "Grid +Engine", unless a referring to a specific flavor of the Grid Engine +family. + +Verify Grid Engine support +-------------------------- + +.. important:: To build Grid Engine support in Open MPI, you will need + to explicitly request the SGE support with the ``--with-sge`` + command line switch to Open MPI's ``configure`` script. + +To verify if support for Grid Engine is configured into your Open MPI +installation, run ``prte_info`` as shown below and look for +``gridengine``. + +.. code-block:: + + shell$ prte_info | grep gridengine + MCA ras: gridengine (MCA v2.0, API v2.0, Component v1.3) + +.. note:: PRRTE is the software layer that provides run-time + environment support to Open MPI. Open MPI typically hides most + PMIx and PRRTE details from the end user, but this is one place + that Open MPI is unable to hide the fact that PRRTE provides this + functionality, not Open MPI. Hence, users need to use the + ``prte_info`` command to check for Grid Engine support (not + ``ompi_info``). + +Launching +--------- + +When Grid Engine support is included, Open MPI will automatically +detect when it is running inside SGE and will just "do the Right +Thing." + +Specifically, if you execute an ``mpirun`` command in a Grid Engine +job, it will automatically use the Grid Engine mechanisms to launch +and kill processes. There is no need to specify what nodes to run on +|mdash| Open MPI will obtain this information directly from Grid +Engine and default to a number of processes equal to the slot count +specified. For example, this will run 4 MPI processes on the nodes +that were allocated by Grid Engine: + +.. code-block:: sh + + # Get the environment variables for Grid Engine + + # (Assuming Grid Engine is installed at /opt/sge and $Grid + # Engine_CELL is 'default' in your environment) + shell$ . /opt/sge/default/common/settings.sh + + # Allocate an Grid Engine interactive job with 4 slots from a + # parallel environment (PE) named 'ompi' and run a 4-process Open + # MPI job + shell$ qrsh -pe ompi 4 -b y mpirun -np 4 mpi-hello-world + +There are also other ways to submit jobs under Grid Engine: + +.. code-block:: sh + + # Submit a batch job with the 'mpirun' command embedded in a script + shell$ qsub -pe ompi 4 my_mpirun_job.csh + + # Submit a Grid Engine and OMPI job and mpirun in one line + shell$ qrsh -V -pe ompi 4 mpirun hostname + + # Use qstat(1) to show the status of Grid Engine jobs and queues + shell$ qstat -f + +In reference to the setup, be sure you have a Parallel Environment +(PE) defined for submitting parallel jobs. You don't have to name your +PE "ompi". The following example shows a PE named "ompi" that would +look like: + +.. code-block:: + + shell$ qconf -sp ompi + pe_name ompi + slots 99999 + user_lists NONE + xuser_lists NONE + start_proc_args NONE + stop_proc_args NONE + allocation_rule $fill_up + control_slaves TRUE + job_is_first_task FALSE + urgency_slots min + accounting_summary FALSE + qsort_args NONE + +.. note:: ``qsort_args`` is necessary with the Son of Grid Engine + distribution, version 8.1.1 and later, and probably only applicable + to it. + +.. note:: For very old versions of Sun Grid Engine, omit + ``accounting_summary`` too. + +You may want to alter other parameters, but the important one is +``control_slaves``, specifying that the environment has "tight +integration". Note also the lack of a start or stop procedure. The +tight integration means that mpirun automatically picks up the slot +count to use as a default in place of the ``-np`` argument, picks up a +host file, spawns remote processes via ``qrsh`` so that Grid Engine +can control and monitor them, and creates and destroys a per-job +temporary directory (``$TMPDIR``), in which Open MPI's directory will +be created (by default). + +Be sure the queue will make use of the PE that you specified: + +.. code-block:: + + shell$ qconf -sq all.q + [...snipped...] + pe_list make cre ompi + [...snipped...] + +To determine whether the Grid Engine parallel job is successfully +launched to the remote nodes, you can pass in the MCA parameter +``--mca plm_base_verbose 1`` to ``mpirun``. + +This will add in a ``-verbose`` flag to the ``qrsh -inherit`` command +that is used to send parallel tasks to the remote Grid Engine +execution hosts. It will show whether the connections to the remote +hosts are established successfully or not. + +.. error:: TODO is this site still live? Doesn't look like it.. Jeff + emailed Dave Love on 31 Dec 2021 to ask if this is still the + correct URL. + +Various Grid Engine documentation with pointers to more is available +at `the Son of GridEngine site `_, and +configuration instructions can be found at `the Son of GridEngine +configuration how-to site +`_. + +Grid Engine tight integration support of the ``qsub -notify`` flag +------------------------------------------------------------------ + +If you are running SGE 6.2 Update 3 or later, then the ``-notify`` +flag is supported. If you are running earlier versions, then the +``-notify`` flag will not work and using it will cause the job to be +killed. + +To use ``-notify``, one has to be careful. First, let us review what +``-notify`` does. Here is an excerpt from the qsub man page for the +``-notify`` flag. + + The ``-notify`` flag, when set causes Sun Grid Engine to send + warning signals to a running job prior to sending the signals + themselves. If a SIGSTOP is pending, the job will receive a SIGUSR1 + several seconds before the SIGSTOP. If a SIGKILL is pending, the + job will receive a SIGUSR2 several seconds before the SIGKILL. The + amount of time delay is controlled by the notify parameter in each + queue configuration. + +Let us assume the reason you want to use the ``-notify`` flag is to +get the SIGUSR1 signal prior to getting the SIGTSTP signal. Something +like this batch script can be used: + +.. code-block:: sh + + #! /bin/bash + #$ -S /bin/bash + #$ -V + #$ -cwd + #$ -N Job1 + #$ -pe ompi 16 + #$ -j y + #$ -l h_rt=00:20:00 + mpirun -np 16 -mca orte_forward_job_control 1 mpi-hello-world + +.. error:: Ralph: Does ``orte_forward_job_control`` still exist? + +However, one has to make one of two changes to this script for things +to work properly. By default, a SIGUSR1 signal will kill a shell +script. So we have to make sure that does not happen. Here is one way +to handle it: + +.. code-block:: sh + + #! /bin/bash + #$ -S /bin/bash + #$ -V + #$ -cwd + #$ -N Job1 + #$ -pe ompi 16 + #$ -j y + #$ -l h_rt=00:20:00 + exec mpirun -np 16 -mca orte_forward_job_control 1 mpi-hello-world + +Alternatively, one can catch the signals in the script instead of doing +an exec on the mpirun: + +.. code-block:: sh + + #! /bin/bash + #$ -S /bin/bash + #$ -V + #$ -cwd + #$ -N Job1 + #$ -pe ompi 16 + #$ -j y + #$ -l h_rt=00:20:00 + + function sigusr1handler() + { + echo "SIGUSR1 caught by shell script" 1>&2 + } + + function sigusr2handler() + { + echo "SIGUSR2 caught by shell script" 1>&2 + } + + trap sigusr1handler SIGUSR1 + trap sigusr2handler SIGUSR2 + + mpirun -np 16 -mca orte_forward_job_control 1 mpi-hello-world + +Grid Engine job suspend / resume support +---------------------------------------- + +To suspend the job, you send a SIGTSTP (not SIGSTOP) signal to +``mpirun``. ``mpirun`` will catch this signal and forward it to the +``mpi-hello-world`` as a SIGSTOP signal. To resume the job, you send +a SIGCONT signal to ``mpirun`` which will be caught and forwarded to +the ``mpi-hello-world``. + +By default, this feature is not enabled. This means that both the +SIGTSTP and SIGCONT signals will simply be consumed by the ``mpirun`` +process. To have them forwarded, you have to run the job with ``--mca +orte_forward_job_control 1``. Here is an example on Solaris: + +.. error:: TODO Ralph: does ``orte_forward_job_control`` still exist? + +.. code-block:: sh + + shell$ mpirun -mca orte_forward_job_control 1 -np 2 mpi-hello-world + +In another window, we suspend and continue the job: + +.. code-block:: sh + + shell$ prstat -p 15301,15303,15305 + PID USERNAME SIZE RSS STATE PRI NICE TIME CPU PROCESS/NLWP + 15305 rolfv 158M 22M cpu1 0 0 0:00:21 5.9% mpi-hello-world/1 + 15303 rolfv 158M 22M cpu2 0 0 0:00:21 5.9% mpi-hello-world/1 + 15301 rolfv 8128K 5144K sleep 59 0 0:00:00 0.0% mpirun/1 + + shell$ kill -TSTP 15301 + shell$ prstat -p 15301,15303,15305 + PID USERNAME SIZE RSS STATE PRI NICE TIME CPU PROCESS/NLWP + 15303 rolfv 158M 22M stop 30 0 0:01:44 21% mpi-hello-world/1 + 15305 rolfv 158M 22M stop 20 0 0:01:44 21% mpi-hello-world/1 + 15301 rolfv 8128K 5144K sleep 59 0 0:00:00 0.0% mpirun/1 + + shell$ kill -CONT 15301 + shell$ prstat -p 15301,15303,15305 + PID USERNAME SIZE RSS STATE PRI NICE TIME CPU PROCESS/NLWP + 15305 rolfv 158M 22M cpu1 0 0 0:02:06 17% mpi-hello-world/1 + 15303 rolfv 158M 22M cpu3 0 0 0:02:06 17% mpi-hello-world/1 + 15301 rolfv 8128K 5144K sleep 59 0 0:00:00 0.0% mpirun/1 + +Note that all this does is stop the ``mpi-hello-world`` processes. It +does not, for example, free any pinned memory when the job is in the +suspended state. + +To get this to work under the Grid Engine environment, you have to +change the ``suspend_method`` entry in the queue. It has to be set to +SIGTSTP. Here is an example of what a queue should look like. + +.. code-block:: sh + + shell$ qconf -sq all.q + qname all.q + [...snipped...] + starter_method NONE + suspend_method SIGTSTP + resume_method NONE + +Note that if you need to suspend other types of jobs with SIGSTOP +(instead of SIGTSTP) in this queue then you need to provide a script +that can implement the correct signals for each job type. diff --git a/docs/running-apps/index.rst b/docs/running-apps/index.rst new file mode 100644 index 00000000000..dca8473b54b --- /dev/null +++ b/docs/running-apps/index.rst @@ -0,0 +1,28 @@ +.. _label-running-mpi-applications: + +Running MPI applications +======================== + +Open MPI can launch MPI processes in a wide variety of environments, +but they can generally be broken down into two categories: + +#. Scheduled environments: these are systems where a resource manager + and/or scheduler are used to control access to the compute nodes. + Popular resource managers include Slurm, PBS/Pro/Torque, and LSF. +#. Non-scheduled environments: these are systems where resource + managers are not used. Launches are typically local (e.g., on a + single laptop or workstation) or via ``ssh`` (e.g., across a small + number of nodes). + +.. toctree:: + :maxdepth: 1 + + quickstart + pmix-and-prrte + + localhost + ssh + slurm + lsf + tm + gridengine diff --git a/docs/running-apps/localhost.rst b/docs/running-apps/localhost.rst new file mode 100644 index 00000000000..4d2820e0332 --- /dev/null +++ b/docs/running-apps/localhost.rst @@ -0,0 +1,115 @@ +Launching only on the local node +================================ + +It is common to develop MPI applications on a single workstation or +laptop, and then move to a larger parallel / HPC environment once the +MPI application is ready. + +Open MPI supports running multi-process MPI jobs on a single machine. +In such cases, you can simply avoid listing a hostfile or remote +hosts, and simply list a number of MPI processes to launch. For +example: + +.. code-block:: sh + + shell$ mpirun -np 6 mpi-hello-world + Hello world, I am 0 of 6 (running on my-laptop)) + Hello world, I am 1 of 6 (running on my-laptop) + ... + Hello world, I am 5 of 6 (running on my-laptop) + +If you do not specify the ``-np`` option, ``mpirun`` will default to +launching as many MPI processes as there are processor cores (not +hyperthreads) on the machine. + +MPI communication +----------------- + +When running on a single machine, Open MPI will most likely use the +``ob1`` PML and the following BTLs for MPI communication between +peers: + +* ``self``: used for sending and receiving loopback MPI messages + |mdash| where the source and destination MPI process are the same. +* ``sm``: used for sending and receiving MPI messages where the source + and destination MPI processes can share memory (e.g., via SYSV or + POSIX shared memory mechanisms). + +Shared memory MPI communication +------------------------------- + +.. error:: TODO This should really be moved to the networking section. + +The ``sm`` BTL supports two modes of shared memory communication: + +#. **Two-copy:** Otherwise known as "copy-in / copy-out", this mode is + where the sender copies data into shared memory and the receiver + copies the data out. + + This mechanism is always available. + +#. **Sinlge copy:** In this mode, the sender or receiver makes a + single copy of the message data from the source buffer in one + process to the destination buffer in another process. Open MPI + supports three flavors of shared memory single-copy transfers: + + * `Linux KNEM `_. This is a + standalone Linux kernel module, made specifically for HPC and MPI + libraries to enable high-performance single-copy message + transfers. + + Open MPI must be able to find the KNEM header files in order to + build support for KNEM. + + * `Linux XPMEM `_. Similar to + KNEM, this is a standalone Linux kernel module, made specifically + for HPC and MPI libraries to enable high-performance single-copy + message transfers. It is derived from the Cray XPMEM system. + + Open MPI must be able to find the XPMEM header files in order to + build support for XPMEM. + + * Linux Cross-Memory Attach (CMA). This mechanism is built-in to + modern versions of the Linux kernel. Although more performance + than the two-copy shared memory transfer mechanism, CMA is the + lowest performance of the single-copy mechanisms. However, CMA + is likely the most widely available because it is enabled by + default in several modern Linux distributions. + + Open MPI must be built on a Linux system with a recent enough + Glibc and kernel version in order to build support for Linux CMA. + +Which mechanism is used at run time depends both on how Open MPI was +built and how your system is configured. You can check to see which +single-copy mechanisms Open MPI was built with via two mechanisms: + +#. At the end of running ``configure``, Open MPI emits a list of + transports for which it found relevant header files and libraries + such that it will be able to build support for them. You might see + lines like this, for example: + + .. code-block:: text + + Shared memory/copy in+copy out: yes + Shared memory/Linux CMA: yes + Shared memory/Linux KNEM: no + Shared memory/XPMEM: no + + The above output indicates that Open MPI will be built with 2-copy + (as mentioned above, 2-copy is *always* available) and with Linux + CMA support. KNEM and XPMEM support will *not* be built. + +#. After Open MPI is installed, the ``ompi_info`` command can show + which ``smsc`` (shared memory single copy) components are + available: + + .. code-block:: text + + shell$ ompi_info | grep smsc + MCA smsc: cma (MCA v2.1.0, API v1.0.0, Component v5.1.0) + + This Open MPI installation only supports the Linux CMA single-copy + mechanism. + +.. note:: As implied by the SMSC component names, none of them are + supported on macOS. macOS users will use the two-copy mechanism. diff --git a/docs/running-apps/lsf.rst b/docs/running-apps/lsf.rst new file mode 100644 index 00000000000..52fa9a64e39 --- /dev/null +++ b/docs/running-apps/lsf.rst @@ -0,0 +1,64 @@ +Launching with LSF +================== + +Open MPI supports the LSF resource manager. + +Verify LSF support +------------------ + +The ``prte_info`` command can be used to determine whether or not an +installed Open MPI includes LSF support: + +.. code-block:: + + shell$ prte_info | grep lsf + +If the Open MPI installation includes support for LSF, you +should see a line similar to that below. Note the MCA version +information varies depending on which version of Open MPI is +installed. + +.. code-block:: + + MCA ras: lsf (MCA v2.1.0, API v2.0.0, Component v3.0.0) + +.. note:: PRRTE is the software layer that provides run-time + environment support to Open MPI. Open MPI typically hides most + PMIx and PRRTE details from the end user, but this is one place + that Open MPI is unable to hide the fact that PRRTE provides this + functionality, not Open MPI. Hence, users need to use the + ``prte_info`` command to check for LSF support (not + ``ompi_info``). + +Launching +--------- + +When properly configured, Open MPI obtains both the list of hosts and +how many processes to start on each host from LSF directly. Hence, it +is unnecessary to specify the ``--hostfile``, ``--host``, or ``-np`` +options to ``mpirun``. Open MPI will use PBS/Torque-native mechanisms +to launch and kill processes (``ssh`` is not required). + +For example: + +.. error:: TODO Need LSF specific content here + +.. code-block:: sh + + # Allocate a PBS job with 4 nodes + shell$ qsub -I -lnodes=4 + + # Now run an Open MPI job on all the nodes allocated by PBS/Torque + shell$ mpirun mpi-hello-world + +This will run the MPI processes on the nodes that were allocated by +LSF. Or, if submitting a script: + +.. error:: TODO Need LSF specific content here + +.. code-block:: sh + + shell$ cat my_script.sh + #!/bin/sh + mpirun mpi-hello-world + shell$ qsub -l nodes=4 my_script.sh diff --git a/docs/running-apps/pmix-and-prrte.rst b/docs/running-apps/pmix-and-prrte.rst new file mode 100644 index 00000000000..31329815001 --- /dev/null +++ b/docs/running-apps/pmix-and-prrte.rst @@ -0,0 +1,63 @@ +The role of PMIx and PRRTE +========================== + +Open MPI uses two external packages for its run-time system support: +`PMIx `_ and `PRRTE +`_. + +.. note:: Both of these packages were originally developed as an + internal part of Open MPI. Over time, they split off into + independent packages so that they could be used outside of an Open + MPI-specific environment. + +Both PMIx and PRRTE have many configure- and run-time options. Open +MPI attempts to hide most of these details from the end user, and +instead present a unified "everything is Open MPI" interface. Open +MPI will translate configuration directives to PMIx and PRRTE as +relevant, hiding such minutia from the end-user. + +This is an intentional design decision on the part of the Open MPI +developer community: HPC and MPI are complicated enough. We do not +want to burden the average end user with needing to understand which +abstractions and configuration options belong to Open MPI vs. PMIx +vs. PRRTE. + +Advanced users can peek into the PMIx and PRRTE internals and tweak +additional configuration settings if necessary, but we hope that that +will rarely be necessary. + +PMIx +---- + +The `Process Management Interface for Exascale (PMIx) +`_ package is used by Open MPI for the management +communication and coordination of MPI processes with a back-end +run-time system. + +The "back-end run-time system" may range from a low-infrastructure +system that simply uses ``ssh`` to remotely execute commands (with no +other infrastructure) to an environment with a full-featured resource +manager and scheduler such as Slurm, PBS/Pro/Torque, or LSF. + +PMIx presents a unified API that hides many of the complexities of +communication with these back-end run-time environments. Open MPI +uses the PMIx API to discover, communicate, and coordinate with any +supported back-end run-time system without needing to know the +intimiate details of that system. + +PRRTE +----- + +The `PMIx Reference Runtime Environment +`_ is, as its name implies, a +reference run-time environment that utilizes the PMIx API. It mainly +provides run-time environment infrastructure for environments that do +not natively have them. In practical terms, this typically means +providing infrastructure for non-scheduled environments that have no +concept of distributed scheduling, file staging, remote stdout/stderr +redirection, and only have ``ssh`` to execute commands on remote +nodes. + +Open MPI uses PRRTE to deal with the practical issues of the back-end +run-time environment such as launching, monitoring, killing, and +reaping remote processes. diff --git a/docs/running-apps/quickstart.rst b/docs/running-apps/quickstart.rst new file mode 100644 index 00000000000..799280fa827 --- /dev/null +++ b/docs/running-apps/quickstart.rst @@ -0,0 +1,207 @@ +.. _label-quickstart-running-apps: + +Quick start: Running MPI applications +===================================== + +Although this section skips many details, it offers examples that will +probably work in many environments. + +.. caution:: Note that this section is a "Quick start" |mdash| it does + not attempt to be comprehensive or describe how to build Open MPI + in all supported environments. The examples below may therefore + not work exactly as shown in your environment. + + Please consult the other sections in this chapter for more details, + if necessary. + +Open MPI supports both ``mpirun`` and ``mpiexec`` (they are exactly +equivalent) to launch MPI applications. For example: + +.. code-block:: sh + + shell$ mpirun -np 2 mpi-hello-world + # or + shell$ mpiexec -np 2 mpi-hello-world + # or + shell$ mpiexec -np 1 mpi-hello-world : -np 1 mpi-hello-world + +are all equivalent. For simplicity, the rest of this documentation +will simply refer to ``mpirun``. + +.. error:: TODO Link to the mpirun(1) page here. + +Note that the ``mpirun`` command supports a *large* number of options. +Be sure to see the ``mpirun`` man page for much more information. + +Launching on a single host +-------------------------- + +It is common to develop MPI applications on a single laptop or +workstation. In such cases, use ``mpirun`` and specify how many MPI +processes you want to launch via the ``-np`` option: + +.. code-block:: sh + + shell$ mpirun -np 6 mpi-hello-world + Hello world, I am 0 of 6 (running on my-laptop)) + Hello world, I am 1 of 6 (running on my-laptop) + ... + Hello world, I am 5 of 6 (running on my-laptop) + +If you do not specify the ``-np`` option, ``mpirun`` will default to +launching as many MPI processes as there are processor cores (not +hyperthreads) on the machine. + +Launching in a non-scheduled environments (via ``ssh``) +------------------------------------------------------- + +In general, Open MPI requires the following to launch and run MPI +applications: + +#. You must be able to login to remote nodes non-interactively (e.g., + without entering a password or passphrase). +#. Open MPI's executables must be findable (e.g., in your ``PATH``). +#. Open MPI's libraries must be findable (e.g., in your + ``LD_LIBRARY_PATH``). + +``mpirun`` accepts a ``--hostfile`` parameter to specify a hostfile +containing one hostname per line: + +.. code-block:: sh + + shell$ cat my-hostfile.txt + node1.example.com + node2.example.com + node3.example.com slots=2 + node4.example.com slots=10 + +The optional ``slots`` attribute tells Open MPI the *maximum* number +of processes that can be allocated to that node. If ``slots`` is not +provided, Open MPI |mdash| by default |mdash| uses the number of +processor cores (not hyperthreads) on that node. + +Assuming that each of the 4 nodes in `my-hostfile.txt` have 16 cores: + +.. code-block:: sh + + shell$ mpirun --hostfile my-hostfile.txt mpi-hello-world + Hello world, I am 0 of 44 (running on node1.example.com) + Hello world, I am 1 of 44 (running on node1.example.com) + ... + Hello world, I am 15 of 44 (running on node1.example.com) + Hello world, I am 16 of 44 (running on node2.example.com) + Hello world, I am 17 of 44 (running on node2.example.com) + ... + Hello world, I am 31 of 44 (running on node2.example.com) + Hello world, I am 32 of 44 (running on node3.example.com) + Hello world, I am 33 of 44 (running on node3.example.com) + Hello world, I am 34 of 44 (running on node4.example.com) + ... + Hello world, I am 43 of 44 (running on node4.example.com) + +You can see the breakdown of how many processes Open MPI launched on +each node: + +* node1: 16, because no ``slots`` was specified +* node2: 16, because no ``slots`` was specified +* node3: 2, because ``slots=2`` was specified +* node2: 10, because ``slots=10`` was specified + +Launching in scheduled environments +----------------------------------- + +In scheduled environments (e.g., in a Slurm job, or PBS/Pro, or LSF, +or any other schedule), the user tells the scheduler how many MPI +processes to launch, and the scheduler decides which hosts to use. +The scheduler then passes both pieces of information (the number of +processes and the hosts to use) to Open MPI. + +There are two ways to launch in a scheduled environment. Nominally, +they both achieve the same thing: they launch MPI processes. Them +main user-observable difference between the two methods is that +``mpirun`` has many more features than scheduler direct launchers. + +Using Open MPI's ``mpirun`` +^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. note:: Technically, Open MPI's ``mpirun`` is a thin layer around + the PRRTE ``prun``. Hence, most of the functionality + described here is really about ``prun``. For simplicity, + however, this docmentation will describe everything in terms + of ``mpirun``. + +.. error:: TODO Link to mpirun(1) here. + +When using the full-featured ``mpirun`` in a scheduled environment, +there is no need to specify a hostfile or number of MPI processes to +launch. ``mpirun`` will receive this information directly from the +scheduler. Hence, if you want to launch an MPI job that completely +"fills" your scheduled allocation (i.e., one MPI process for each slot +in the scheduled allocation), you can simply: + +.. code-block:: sh + + # Write a script that runs your MPI application + shell$ cat my-slurm-script.sh + #!/bin/sh + # There is no need to specify -np or --hostfile because that + # information will automatically be provided by Slurm. + mpirun mpi-hello-world + +You then submit the ``my-slurm-script.sh`` script to Slurm for +execution: + +.. code-block:: sh + + # Use -n to indicate how many MPI processes you want to run. + # Slurm will pick the specific hosts which will be used. + shell$ sbatch -n 40 my-slurm-script.sh + Submitted batch job 1234 + shell$ + +After Slurm job 1234 completes, you can look at the output file to see +what happened: + +.. code-block:: sh + + shell$ cat slurm-1234.out + Hello world, I am 0 of 40 (running on node37.example.com) + Hello world, I am 1 of 40 (running on node37.example.com) + Hello world, I am 2 of 40 (running on node37.example.com) + ... + Hello world, I am 39 of 40 (running on node19.example.com) + +Note that the Slurm scheduler picked the hosts on which the processes +ran. + +The above example shows that simply invoking ``mpirun +mpi-hello-world`` |mdash| with no other CLI options |mdash| obtains +the number of processes to run and hosts to use from the scheduler. + +.. error:: TODO Link to mpirun(1) here. + +``mpirun`` has many more features not described in this Quick Start +section. For example, while uncommon in scheduled environments, you +can use ``-np`` and/or ``--hostfile`` to launch in subsets of the +overall scheduler allocation. See the mpirun man page for more +details. + +Using the scheduler to "direct launch" (without ``mpirun``) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Some schedulers (such as Slurm) have the ability to "direct launch" +MPI processes without using Open MPI's ``mpirun``. For example: + +.. code-block:: sh + + shell$ srun -n 40 mpi-hello-world + Hello world, I am 0 of 40 (running on node14.example.com) + Hello world, I am 1 of 40 (running on node14.example.com) + Hello world, I am 2 of 40 (running on node14.example.com) + ... + Hello world, I am 39 of 40 (running on node203.example.com) + shell$ + +Similar to the prior example, this example launches 40 copies of +``mpi-hello-world``, but it does so via the Slurm ``srun`` command +without using ``mpirun``. diff --git a/docs/running-apps/slurm.rst b/docs/running-apps/slurm.rst new file mode 100644 index 00000000000..2b576100a4f --- /dev/null +++ b/docs/running-apps/slurm.rst @@ -0,0 +1,121 @@ +Launching with Slurm +==================== + +Open MPI supports two modes of launching parallel MPI jobs under +Slurm: + +#. Using Open MPI's full-features ``mpirun`` launcher. +#. Using Slurm's "direct launch" capability. + +Unless there is a strong reason to use ``srun`` for direct launch, the +Open MPI team recommends using ``mpirun`` for launching under Slurm jobs. + +.. note:: In versions of Open MPI prior to 5.0.x, using ``srun`` for + direct launch could be faster than using ``mpirun``. **This is no + longer true.** + +Using ``mpirun`` +---------------- + +When ``mpirun`` is launched in a Slurm job, ``mpirun`` will +automatically utilize the Slurm infrastructure for launching and +controlling the individual MPI processes. +Hence, it is unnecessary to specify the ``--hostfile``, +``--host``, or ``-np`` options to ``mpirun``. + +.. note:: Using ``mpirun`` is the recomended method for launching Open + MPI jobs in Slurm jobs. + + ``mpirun``'s Slurm support should always be available, regardless + of how Open MPI or Slurm was installed. + +For example: + +.. code-block:: sh + + # Allocate a Slurm job with 4 slots + shell$ salloc -n 4 + salloc: Granted job allocation 1234 + + # Now run an Open MPI job on all the slots allocated by Slurm + shell$ mpirun mpi-hello-world + +This will run the 4 MPI processes on the node(s) that were allocated +by Slurm. + +Or, if submitting a script: + +.. code-block:: sh + + shell$ cat my_script.sh + #!/bin/sh + mpirun mpi-hello-world + shell$ sbatch -n 4 my_script.sh + srun: jobid 1235 submitted + shell$ + +Similar to the ``salloc`` case, no command line options specifing +number of MPI processes were necessary, since Open MPI will obtain +that information directly from Slurm at run time. + +Using Slurm's "direct launch" functionality +------------------------------------------- + +Assuming that Slurm installed its Open MPI plugin, you can use +``srun`` to "direct launch" Open MPI applications without the use of +Open MPI's ``mpirun`` command. + +.. note:: Using direct launch can be *slightly* faster when launching + very, very large MPI processes (i.e., thousands or millions of MPI + processes in a single job). But it has significantly fewer + features than Open MPI's ``mpirun``. + +First, you must ensure that Slurm was built and installed with PMI-2 +support. + +.. note:: Please ask your friendly neighborhood Slurm developer to + support PMIx. PMIx is the current generation of run-time + support API; PMI-2 is the legacy / antiquated API. Open MPI + *only* supports PMI-2 for Slurm. + +Next, ensure that Open MPI was configured ``--with-pmi=DIR``, where +``DIR`` is the path to the directory where Slurm's ``pmi2.h`` is +located. + +Open MPI applications can then be launched directly via the ``srun`` +command. For example: + +.. code-block:: sh + + shell$ srun -N 4 mpi-hello-world + +Or you can use ``sbatch`` with a script: + +.. code-block:: sh + + shell$ cat my_script.sh + #!/bin/sh + srun mpi-hello-world + shell$ sbatch -N 4 my_script.sh + srun: jobid 1235 submitted + shell$ + +Similar using ``mpirun`` inside of an ``sbatch`` batch script, no +``srun`` command line options specifing number of processes were +necessary, because ``sbatch`` set all the relevant Slurm-level +parameters about number of processes, cores, partition, etc. + +Slurm 20.11 +----------- + +There were some changes in Slurm behavior that were introduced in +Slurm 20.11.0 and subsequently reverted out in Slurm 20.11.3. + +SchedMD (the makers of Slurm) strongly suggest that all Open MPI users +avoid using Slurm versions 20.11.0 through 20.11.2. + +Indeed, you will likely run into problems using just about any version +of Open MPI these problematic Slurm releases. + +.. important:: Please either downgrade to an older version or upgrade + to a newer version of Slurm. diff --git a/docs/running-apps/ssh.rst b/docs/running-apps/ssh.rst new file mode 100644 index 00000000000..55137dd36e7 --- /dev/null +++ b/docs/running-apps/ssh.rst @@ -0,0 +1,203 @@ +Launching with SSH +================== + +When launching Open MPI jobs in a non-scheduled environment, ``ssh`` +is typically used to launch commands on remote nodes. As listed in +the :doc:`quick start section `, +successfully launching MPI applications with ``ssh`` requires the +following: + +#. You must be able to non-interactively login |mdash| without + entering a password or passphrase |mdash| to all remote nodes from + all remotes nodes. +#. Open MPI's executables must be findable (e.g., in your ``PATH``). +#. Open MPI's libraries must be findable (e.g., in your + ``LD_LIBRARY_PATH``). + +Non-interactive ``ssh`` logins +------------------------------ + +SSH keys must be setup such that the following can be executed without +being prompted for password or passphrase: + +.. code-block:: sh + + shell$ ssh othernode echo hello + hello + shell$ + +Consult instructions and tutorials from around the internet to learn +how to setup SSH keys. Try Google search terms like "passwordless +SSH" or "SSH key authentication". + +For simplicity, it may be desireable to configure your SSH keys +without passphrases. This adds some risk, however (e.g., if your SSH +keys are compromised). But it simplifies your SSH setup because you +will not need to use ``ssh-agent``. Evaluate the risk level you are +comfortable with. + +.. important:: Open MPI uses a tree-based pattern to launch processes + on remote nodes. This means that Open MPI must be able to + non-interactively login |mdash| without being prompted for password + or passphrase |mdash| *to any node* in the host list *from any + node* in the host list. + + It may *not* be sufficient to only setup an SSH key from the node + where you are invoking ``mpirun`` to all other nodes. + +If you have a shared ``$HOME`` filesystem between your nodes, you can +setup a single SSH key that is used to login to all nodes. + +Finding Open MPI executables and libraries +------------------------------------------ + +Once Open MPI is able to use ``ssh`` to invoke executables on a remote +node, it must be able to find its helper executables and shared +libraries on that remote node. + +If Open MPI is installed in a system-level folder (e.g., in +``/usr/bin``), Open MPI will likely be able to find its executables +and libraries on the remote node with no additional assistance. + +If, however, Open MPI is installed into a path that is not searched by +default, you will need to provide assistance so that Open MPI can find +its executables and libraries. + +.. important:: For simplicitly, it is *strongly* recomended that you + install Open MPI in the same location on all nodes in your job. + See the :doc:`Installation location section + ` for more details. + +You can do this in one of two ways. + +Use "prefix" behavior +^^^^^^^^^^^^^^^^^^^^^ + +.. note:: "Prefix" behavior is only available with ``mpirun``; it is + not available via resource manager direct launch mechanisms. + However, this section is about using ``ssh`` to launch MPI jobs, + which means that there is no resource manager, and therefore there + is no direct launch mechanism available. + +When "prefix" behavior is enabled, Open MPI will automatically set the +``$PATH`` and ``$LD_LIBRARY_PATH`` on remote nodes before executing +remote commands. + +.. important:: Open MPI assumes that the installation ``prefix``, + ``bindir``, and ``libdir`` are the same on the remote node as they + are on the local node. If they are not, *then you should not use + the "prefix" behavior.* + +You can enable "prefix" behavior in one of three ways: + +#. Use an absolute path name to invoke ``mpirun``. + + .. code-block:: sh + + shell$ $HOME/my-openmpi/bin/mpirun --hostfile my-hostfile.txt mpi-hello-world + + Simply using the absolute path name to ``mpirun`` tells Open MPI to + enable "prefix" mode. + + +#. Use the ``--prefix`` option to ``mpirun``. + + .. code-block:: sh + + shell$ $HOME/my-openmpi/bin/mpirun --hostfile my-hostfile.txt \ + --prefix $HOME/my-openmpi \ + mpi-hello-world + + The ``-prefix`` option takes a single argument: the prefix path to + use for the bindir and libdir on the remote node. + +#. Configure Open MPI with ``--enable-mpirun-prefix-by-default``. + + If Open MPI is built this way, ``mpirun`` will always enable + "prefix" behavior. + +Set the ``PATH`` and ``LD_LIBRARY_PATH`` in your shell startup files +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Consider the case where Open MPI was configured with: + +.. code-block:: sh + + shell$ ./configure --prefix=$HOME/my-openmpi ... + +In this cause, Open MPI will be installed into ``$HOME/my-openmpi``. +This path is almost certainly not in any system-default search paths, +so it must be added to the ``$PATH`` and ``$LD_LIBRARY_PATH`` +environment variables. + +Specifically: the goal is that the following non-interactive commands +must be able to execute without error: + +.. code-block:: sh + + # First, ensure that this command returns the correct ompi_info + # instance (i.e., $HOME/my-openmpi/bin/ompi_info). + shell$ ssh remotenode which ompi_info + /home/myusername/my-openmpi/bin/ompi_info + + # Next, ensure that you can run that ompi_info command without + # error + shell$ ssh remotenode ompi_info + + # ... lots of output ... + +Ensure that you do not see any errors about libraries that cannot be +found. + +All shells have some kind of script file that is executed at login +time perform environmental setup tasks. This startup file is the one +that needs to be edited to: + +#. Add Open MPI's executable path (which is likely ``$prefix/bin``, or + ``$HOME/my-openmpi/bin`` in this example) to the ``$PATH`` + environment variable. +#. Add Open MPI's library path (which is likely ``$prefix/lib``, or + ``$HOME/my-openmpi/lib`` in this example) to the + ``$LD_LIBRARY_PATH`` environment variable. + +You probably want to add Open MPI's libraries to the *front* of +``$PATH`` and ``$LD_LIBRARY_PATH`` to ensure that this Open MPI +installation's files are found *first*. + +Consult the manual page for your shell for specific details (some +shells are picky about the permissions of the startup file, for +example). The list below contains some common shells and the startup +files that they read/execute upon login: + +.. error:: TODO This rendering sucks, but I couldn't make it play nice + with list-table, either. :-( + +* ``bash`` or ``zsh``: + + * **Non-interactive login:** ``$HOME/.bashrc`` if it exists. + * **Interactive login**: ``$HOME/.bash_profile`` if it exists, or + ``$HOME/.bash_login`` if it exists, or ``$HOME/.profile`` if it + exists (in that order). Note that some Linux distributions + automatically come with ``$HOME/.bash_profile`` scripts for users + that automatically execute ``$HOME/.bashrc`` as well. Consult the + bash man page for more information. + +* ``sh``: + + * **Non-interactive login:** This shell does not execute any file + automatically, so Open MPI will execute the ``$HOME/.profile`` + script before invoking Open MPI executables on remote nodes + * **Interactive login:** ``$HOME/.profile`` + +* ``csh``: + + * **Non-interactive login:** ``$HOME/.cshrc`` + * **Interactive login:** ``$HOME/.cshrc`` followed by + ``$HOME/.login`` + +* ``tcsh``: + + * **Non-interactive login:** ``$HOME/.tcshrc`` if it exists, + ``$HOME/.cshrc`` if it does not + * **Interactive login:** ``$HOME/.tcshrc`` if it exists, + ``$HOME/.cshrc`` if it does not, followed by ``$HOME/.login`` diff --git a/docs/running-apps/tm.rst b/docs/running-apps/tm.rst new file mode 100644 index 00000000000..1788c1d2856 --- /dev/null +++ b/docs/running-apps/tm.rst @@ -0,0 +1,72 @@ +Launching with PBS / Torque +=========================== + +Open MPI supports PBS, PBS Pro, Torque, and other related resource +managers. + +Verify PBS/Torque support +------------------------- + +The ``prte_info`` command can be used to determine whether or not an +installed Open MPI includes Torque/PBS Pro support: + +.. code-block:: + + shell$ prte_info | grep ras + +If the Open MPI installation includes support for PBS/Torque, you +should see a line similar to that below. Note the MCA version +information varies depending on which version of Open MPI is +installed. + +.. code-block:: + + MCA ras: tm (MCA v2.1.0, API v2.0.0, Component v3.0.0) + +.. note:: PRRTE is the software layer that provides run-time + environment support to Open MPI. Open MPI typically hides most + PMIx and PRRTE details from the end user, but this is one place + that Open MPI is unable to hide the fact that PRRTE provides this + functionality, not Open MPI. Hence, users need to use the + ``prte_info`` command to check for PBS/Torque support (not + ``ompi_info``). + +Launching +--------- + +When properly configured, Open MPI obtains both the list of hosts and +how many processes to start on each host from Torque / PBS Pro +directly. Hence, it is unnecessary to specify the ``--hostfile``, +``--host``, or ``-np`` options to ``mpirun``. Open MPI will use +PBS/Torque-native mechanisms to launch and kill processes (``ssh`` is +not required). + +For example: + +.. code-block:: sh + + # Allocate a PBS job with 4 nodes + shell$ qsub -I -lnodes=4 + + # Now run an Open MPI job on all the nodes allocated by PBS/Torque + shell$ mpirun mpi-hello-world + +This will run the MPI processes on the nodes that were allocated by +PBS/Torque. Or, if submitting a script: + +.. code-block:: sh + + shell$ cat my_script.sh + #!/bin/sh + mpirun mpi-hello-world + shell$ qsub -l nodes=4 my_script.sh + +.. warning:: Do not modify ``$PBS_NODEFILE``! + + We've had reports from some sites that system administrators modify + the ``$PBS_NODEFILE`` in each job according to local policies. + This will currently cause Open MPI to behave in an unpredictable + fashion. As long as no new hosts are added to the hostfile, it + *usually* means that Open MPI will incorrectly map processes to + hosts, but in some cases it can cause Open MPI to fail to launch + processes altogether. diff --git a/docs/to-do.rst b/docs/to-do.rst new file mode 100644 index 00000000000..5d985b7b46a --- /dev/null +++ b/docs/to-do.rst @@ -0,0 +1,109 @@ +To-Do Items +=========== + +This is a "to-do" file for while we are working on creating the first +version of the Open MPI RST / Sphinx docs. *It will not be included in +the final documentation.* + +Things that have changed in v5.0 +-------------------------------- + +Need to update these docs to reflect: + +* What specifically does ``--disable-io-romio`` do? + +* Do we still have AMCA files? + +* Do we still have ``--tune`` files? + +* Document this new breakpoint functionality: + https://github.com/open-mpi/ompi/commit/f97d081cf9b540c5a79e00aecee17b25e8c123ad + +* Document v5.0.x's ABI relationship with v4.x. It will likely be: + + * C bindings are ABI compatible + * MPI-1 deleted functions are always included in the library + + * Need to check to see what the current behavior is w.r.t. the + prototypes in ``mpi.h``. + +Other random to-do items +------------------------ + +* Create for-ompi-develpopers-to-write-RST-docs file: + * Document == -- ^^ progression of headers + * Document ... lots of other things. + +* Add a section about debugging and removal of MPIR, yadda yadda yadda + (at least some of this can come from the "parallel debugging" FAQ + section). + +* Can we make a new ".. knownissues::" directive (etc.) for the NEWS + file that does the same thing as ".. attention::", but says "Known + Issues" instead of "Attention". + + Reading the Python docs style guide, it kinda implies we can do that + kind of thing...? + +* Make "setting an MCA param" docs prominent in the doc (this already + exists somewhere, but we need to make it prominent). + +* https://github.com/open-mpi/ompi/issues/7668 (ORTE --> PRRTE + user-visible changes) + + * Add docs about mpirun launch vs. direct launch. + +* Finish folding in all FAQ topics into the main document. + +Man page to-dos +--------------- + +* ``man-openmpi/man1/mpirun.1.rst`` is currently essentially an + RST-ified version of OMPI v4.1's ``mpirun(1)`` man page. There has + been some light editing: + + * Eliminiated all ``-foo`` options + * Modernized *some* of the examples + + Much more work needs to be done to update it for all the PRTE + changes since Open MPI v4.1.x. + +* It's possible that a lot of *tokens* in the RST man pages should be + ``tokens``. + +* Look for "See the ___ section" text and create appropriate cross + references. + +* There's some shmem man pages with list that have incorrect + indentation in the RST, which results in odd line break, extra bold + face, and strange indentation. Example: + man-openshmem/man3/shmem_double_prod_to_all.3.html -- look for the + description of "target". + +* It looks like much of the cross-linking that we have in the MPI + man pages (e.g., when one MPI API is mentioned on a page, it + automatically links to the man page for that API) doesn't exist in + the OSHMEM pages. + +* Some of our code blocks have line numbers, others do not. I think + I prefer to have the line numbers, but don't feel too strongly + about it. + +* Ensure somewhere that it is documented -- probably in the networking + section? -- that it is necessary for resource manager daemons to + have their /etc/security limits set properly for locked memory. + +Josh Hursey notes +----------------- + +Running MPI Applications Notes +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Reviewing: + https://ompi--8329.org.readthedocs.build/en/8329/faq/running-mpi-apps.html + +10.8.21 + - Is aggressive mode really determined by the slot count provided by PRRTE? Or is it determined + by a query to hwloc with a reference ot the number of processes per node. It just surprises + me that this part of OMPI is controlled by PRRTE instead of something more generic that might + work with, say, Slurm direct launch via srun. diff --git a/docs/validate.rst b/docs/validate.rst new file mode 100644 index 00000000000..a9a43703368 --- /dev/null +++ b/docs/validate.rst @@ -0,0 +1,70 @@ +Validating your installation +============================ + +Checking your Open MPI configuration +------------------------------------ + +The ``ompi_info`` command can be used to check the status of your Open +MPI installation (located in ``PREFIX/bin/ompi_info``). Running it with +no arguments provides a summary of information about your Open MPI +installation. + +Note that the ``ompi_info`` command is extremely helpful in determining +which components are installed as well as listing all the run-time +settable parameters that are available in each component (as well as +their default values). + +The following ``ompi_info`` options may be helpful: + +* ``--all``: Show a *lot* of information about your Open MPI + installation. +* ``--parsable``: Display all the information in a machine-parsable + format. +* ``--param FRAMEWORK COMPONENT``: + A ``FRAMEWORK`` value of ``all`` and a ``COMPONENT`` value of ``all`` will + show all parameters to all components. Otherwise, the parameters of + all the components in a specific framework, or just the parameters + of a specific component can be displayed by using an appropriate + FRAMEWORK and/or COMPONENT name. +* ``--level LEVEL``: + By default, ``ompi_info`` only shows "Level 1" MCA parameters -- + parameters that can affect whether MPI processes can run + successfully or not (e.g., determining which network interfaces to + use). The ``--level`` option will display all MCA parameters from + level 1 to ``LEVEL`` (the max ``LEVEL`` value is 9). Use ``ompi_info + --param FRAMEWORK COMPONENT --level 9`` to see *all* MCA parameters + for a given component. See "The Modular Component Architecture + (MCA)" section, below, for a fuller explanation. + +.. error:: TODO move the ref below to a better / more-prominent place + to explain how to set MCA params. + +Changing the values of these parameters is explained in the +:ref:`Internal frameworks section `. + + +Testing your Open MPI installation +---------------------------------- + +When verifying a new Open MPI installation, we recommend running the +following tests in order (the tests build upon each other): + +#. Use ``mpirun`` to launch a non-MPI program (e.g., ``hostname`` or + ``uptime``) across multiple nodes. +#. Use ``mpirun`` to launch a trivial MPI program that does no MPI + communication (e.g., the ``hello_c`` program in the ``examples/`` + directory in the Open MPI distribution). +#. Use ``mpirun`` to launch a trivial MPI program that sends and + receives a few MPI messages (e.g., the ``ring_c`` program in the + ``examples/`` directory in the Open MPI distribution). +#. Use ``oshrun`` to launch a non-OpenSHMEM program across multiple + nodes. +#. Use ``oshrun`` to launch a trivial MPI program that does no OpenSHMEM + communication (e.g., ``hello_shmem.c`` program in the ``examples/`` + directory in the Open MPI distribution.) +#. Use ``oshrun`` to launch a trivial OpenSHMEM program that puts and + gets a few messages (e.g., the ``ring_shmem.c`` in the ``examples/`` + directory in the Open MPI distribution.) + +If you can run all of these tests successfully, that is a good +indication that Open MPI built and installed properly. diff --git a/docs/version-numbering.rst b/docs/version-numbering.rst new file mode 100644 index 00000000000..a2d6ef2688a --- /dev/null +++ b/docs/version-numbering.rst @@ -0,0 +1,162 @@ +.. _version_numbers_section_label: + +Version numbers and binary compatibility +========================================== + +Open MPI has two sets of version numbers that are likely of interest +to end users / system administrators: + +#. Software version number +#. Shared library version numbers + +Both are predicated on Open MPI's definition of "backwards +compatibility." + +Backwards Compatibility +----------------------- + +Open MPI version Y is backwards compatible with Open MPI version X +(where Y>X) if users can: + +* Compile an MPI/OpenSHMEM application with version X, + ``mpirun``/``oshrun`` it with version Y, and get the same + user-observable behavior. +* Invoke ``ompi_info`` with the same CLI options in versions X and Y and + get the same user-observable behavior. + +Note that this definition encompasses several things: + +* Application Binary Interface (ABI) +* MPI / OpenSHMEM run time system +* ``mpirun`` / ``oshrun`` command line options +* MCA parameter names / values / meanings + +However, this definition only applies when the same version of Open +MPI is used with all instances of the runtime and MPI / OpenSHMEM +processes in a single MPI job. If the versions are not exactly the +same everywhere, Open MPI is not guaranteed to work properly in any +scenario. + +Backwards compatibility tends to work best when user applications are +dynamically linked to one version of the Open MPI / OSHMEM libraries, +and can be updated at run time to link to a new version of the Open +MPI / OSHMEM libraries. + +For example, if an MPI / OSHMEM application links statically against +the libraries from Open MPI vX, then attempting to launch that +application with ``mpirun`` / ``oshrun`` from Open MPI vY is not guaranteed to +work (because it is mixing vX and vY of Open MPI in a single job). + +Similarly, if using a container technology that internally bundles all +the libraries from Open MPI vX, attempting to launch that container +with ``mpirun`` / ``oshrun`` from Open MPI vY is not guaranteed to work. + +Software Version Number +----------------------- + +Official Open MPI releases use the common "A.B.C" version identifier +format. Each of the three numbers has a specific meaning: + +* Major: The major number is the first integer in the version string + Changes in the major number typically indicate a significant + change in the code base and/or end-user functionality, and also + indicate a break from backwards compatibility. Specifically: Open + MPI releases with different major version numbers are not + backwards compatibale with each other. + + .. important:: This rule does not extend to versions prior to + v1.10.0. Specifically: v1.10.x is not guaranteed to be backwards + compatible with other v1.x releases. + +* Minor: The minor number is the second integer in the version string. + Changes in the minor number indicate a user-observable change in the + code base and/or end-user functionality. Backwards compatibility + will still be preserved with prior releases that have the same major + version number (e.g., v2.5.3 is backwards compatible with v2.3.1). + +* Release: The release number is the third integer in the version + string. Changes in the release number typically indicate a bug fix + in the code base and/or end-user functionality. For example, if + there is a release that only contains bug fixes and no other + user-observable changes or new features, only the third integer will + be increased (e.g., from v4.3.0 to v4.3.1). + +The "A.B.C" version number may optionally be followed by a quantifier +string: + +* ``aX``: Indicates an alpha release. X is an integer indicating the + number of the alpha release (e.g., v1.10.3a5 indicates the 5th alpha + release of version 1.10.3). +* ``bX``: Indicates a beta release. X is an integer indicating the + number of the beta release (e.g., v1.10.3b3 indicates the 3rd beta + release of version 1.10.3). +* ``rcX``: Indicates a release candidate. X is an integer indicating + the number of the release candidate (e.g., v1.10.3rc4 indicates the + 4th release candidate of version 1.10.3). + +Nightly development snapshot tarballs use a different version number +scheme; they contain three distinct values: + +* The git branch name from which the tarball was created. +* The date/timestamp, in ``YYYYMMDDHHMM`` format. +* The hash of the git commit from which the tarball was created. + +For example, a snapshot tarball filename of +``openmpi-v2.x-201703070235-e4798fb.tar.bz2`` indicates that this tarball +was created from the v2.x branch, on March 7, 2017, at 2:35am GMT, +from git hash e4798fb. + + +Shared Library Version Number +----------------------------- + +The `GNU Libtool official documentation +`_ details how the +versioning scheme works. The quick version is that the shared library +versions are a triple of integers: (current,revision,age), or +``c:r:a``. This triple is not related to the Open MPI software +version number. There are six simple rules for updating the values +(taken almost verbatim from the Libtool docs): + +#. Start with version information of ``0:0:0`` for each shared library. +#. Update the version information only immediately before a public + release of your software. More frequent updates are unnecessary, + and only guarantee that the current interface number gets larger + faster. +#. If the library source code has changed at all since the last + update, then increment revision (``c:r:a`` becomes ``c:r+1:a``). +#. If any interfaces have been added, removed, or changed since the + last update, increment current, and set revision to 0. +#. If any interfaces have been added since the last public release, + then increment age. +#. If any interfaces have been removed since the last public release, + then set age to 0. + +Here's how we apply those rules specifically to Open MPI: + +#. The above rules do not apply to MCA components (a.k.a. "plugins"); + MCA component ``.so`` versions stay unspecified. +#. The above rules apply exactly as written to the following libraries + starting with Open MPI version v1.5: + + * ``libopen-pal`` + * ``libmca_common_*`` + +#. The following libraries use a slightly modified version of the + above rules: rules 4, 5, and 6 only apply to the official MPI and + OpenSHMEM interfaces (functions, global variables). The rationale + for this decision is that the vast majority of our users only care + about the official/public MPI/OpenSHMEM interfaces; we therefore + want the ``.so`` version number to reflect only changes to the + official MPI/OpenSHMEM APIs. Put simply: non-MPI/OpenSHMEM API / + internal changes to the MPI-application-facing libraries are + irrelevant to pure MPI/OpenSHMEM applications. + + * ``libmpi`` + * ``libmpi_mpifh`` + * ``libmpi_usempi_tkr`` + * ``libmpi_usempi_ignore_tkr`` + * ``libmpi_usempif08`` + * ``libmpi_cxx`` + * ``libmpi_java`` + * ``liboshmem`` diff --git a/ompi/Makefile.am b/ompi/Makefile.am index 8bf51605910..8ebbd66c0c1 100644 --- a/ompi/Makefile.am +++ b/ompi/Makefile.am @@ -9,7 +9,7 @@ # University of Stuttgart. All rights reserved. # Copyright (c) 2004-2005 The Regents of the University of California. # All rights reserved. -# Copyright (c) 2008-2022 Cisco Systems, Inc. All rights reserved +# Copyright (c) 2008-2022 Cisco Systems, Inc. All rights reserved. # Copyright (c) 2008 Sun Microsystems, Inc. All rights reserved. # Copyright (c) 2010-2011 Sandia National Laboratories. All rights reserved. # Copyright (c) 2013-2015 Los Alamos National Security, LLC. All rights @@ -91,9 +91,7 @@ SUBDIRS = \ $(OMPI_MPIEXT_USEMPIF08_DIRS) \ mpi/fortran/use-mpi-f08 \ mpi/fortran/mpiext-use-mpi-f08 \ - $(MCA_ompi_FRAMEWORK_COMPONENT_DSO_SUBDIRS) \ - mpi/man/man3 \ - mpi/man/man5 + $(MCA_ompi_FRAMEWORK_COMPONENT_DSO_SUBDIRS) if OMPI_WANT_JAVA_BINDINGS SUBDIRS += \ @@ -126,9 +124,7 @@ DIST_SUBDIRS = \ mpi/java \ $(OMPI_MPIEXT_ALL_SUBDIRS) \ $(MCA_ompi_FRAMEWORKS_SUBDIRS) \ - $(MCA_ompi_FRAMEWORK_COMPONENT_ALL_SUBDIRS) \ - mpi/man/man3 \ - mpi/man/man5 + $(MCA_ompi_FRAMEWORK_COMPONENT_ALL_SUBDIRS) # Build the main MPI library diff --git a/ompi/mpi/man/man3/MPI_Abort.3in b/ompi/mpi/man/man3/MPI_Abort.3in deleted file mode 100644 index 112b3ebb54d..00000000000 --- a/ompi/mpi/man/man3/MPI_Abort.3in +++ /dev/null @@ -1,70 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright (c) 2010-2014 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Abort 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Abort\fP \- Terminates MPI execution environment. - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -int MPI_Abort(MPI_Comm \fIcomm\fP, int\fI errorcode\fP) - -.fi -.SH Fortran Syntax -.ft R -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_ABORT(\fICOMM\fP, \fIERRORCODE\fP, \fIIERROR\fP) - INTEGER \fICOMM\fP,\fI ERRORCODE\fP,\fI IERROR - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Abort(\fIcomm\fP, \fIerrorcode\fP, \fIierror\fP) - TYPE(MPI_Comm), INTENT(IN) :: \fIcomm\fP - INTEGER, INTENT(IN) :: \fIerrorcode\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETERS -.ft R -.TP 1i -comm -Communicator of tasks to abort. -.TP 1i -errorcode -Error code to return to invoking environment. - -.SH OUTPUT PARAMETER -.ft R -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -This routine makes a "best attempt" to abort all tasks in the group of -comm. This function does not require that the invoking environment take any -action with the error code. However, a UNIX or POSIX -environment should handle this as a return errorcode from the main program -or an abort (errorcode). -.sp -The long-term goal of the Open MPI implementation is to terminate all processes in all tasks that contain a process in \fIcomm\fP, and the error code is not returned to the invoking environment. At the moment, this isn't fully implemented and MPI_Abort will terminate the entire job. -.sp -Note: All associated processes are sent a SIGTERM. - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for I/O function errors. The error handler -may be changed with MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN may be used to cause error values to be returned. Note that MPI does not guarantee that an MPI program can continue past an error. - diff --git a/ompi/mpi/man/man3/MPI_Accumulate.3in b/ompi/mpi/man/man3/MPI_Accumulate.3in deleted file mode 100644 index baa1110714b..00000000000 --- a/ompi/mpi/man/man3/MPI_Accumulate.3in +++ /dev/null @@ -1,165 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2013-2014 Los Alamos National Security, LLC. All rights reserved. -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Accumulate 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Accumulate\fP, \fBMPI_Raccumulate\fP \- Combines the contents of the origin buffer with that of a target buffer. - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -int MPI_Accumulate(const void *\fIorigin_addr\fP, int \fIorigin_count\fP, - MPI_Datatype \fIorigin_datatype\fP, int \fItarget_rank\fP, - MPI_Aint \fItarget_disp\fP, int \fItarget_count\fP, - MPI_Datatype \fItarget_datatype\fP, MPI_Op \fIop\fP, MPI_Win \fIwin\fP) - -int MPI_Raccumulate(const void *\fIorigin_addr\fP, int \fIorigin_count\fP, - MPI_Datatype \fIorigin_datatype\fP, int \fItarget_rank\fP, - MPI_Aint \fItarget_disp\fP, int \fItarget_count\fP, - MPI_Datatype \fItarget_datatype\fP, MPI_Op \fIop\fP, MPI_Win \fIwin\fP, - MPI_Request *\fIrequest\fP) - -.fi -.SH Fortran Syntax (see FORTRAN 77 NOTES) -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_ACCUMULATE(\fIORIGIN_ADDR, ORIGIN_COUNT, ORIGIN_DATATYPE, TARGET_RANK, - TARGET_DISP, TARGET_COUNT, TARGET_DATATYPE, OP, WIN, IERROR\fP) - \fIORIGIN_ADDR\fP(*) - INTEGER(KIND=MPI_ADDRESS_KIND) \fITARGET_DISP\fP - INTEGER \fIORIGIN_COUNT, ORIGIN_DATATYPE, TARGET_RANK, TARGET_COUNT, - TARGET_DATATYPE, OP, WIN, IERROR \fP - -MPI_RACCUMULATE(\fIORIGIN_ADDR, ORIGIN_COUNT, ORIGIN_DATATYPE, TARGET_RANK, - TARGET_DISP, TARGET_COUNT, TARGET_DATATYPE, OP, WIN, REQUEST, IERROR\fP) - \fIORIGIN_ADDR\fP(*) - INTEGER(KIND=MPI_ADDRESS_KIND) \fITARGET_DISP\fP - INTEGER \fIORIGIN_COUNT, ORIGIN_DATATYPE, TARGET_RANK, TARGET_COUNT, - TARGET_DATATYPE, OP, WIN, REQUEST, IERROR \fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Accumulate(\fIorigin_addr\fP, \fIorigin_count\fP, \fIorigin_datatype\fP, \fItarget_rank\fP, - \fItarget_disp\fP, \fItarget_count\fP, \fItarget_datatype\fP, \fIop\fP, \fIwin\fP, \fIierror\fP) - TYPE(*), DIMENSION(..), INTENT(IN), ASYNCHRONOUS :: \fIorigin_addr\fP - INTEGER, INTENT(IN) :: \fIorigin_count\fP, \fItarget_rank\fP, \fItarget_count\fP - TYPE(MPI_Datatype), INTENT(IN) :: \fIorigin_datatype\fP, \fItarget_datatype\fP - INTEGER(KIND=MPI_ADDRESS_KIND), INTENT(IN) :: \fItarget_disp\fP - TYPE(MPI_Op), INTENT(IN) :: \fIop\fP - TYPE(MPI_Win), INTENT(IN) :: \fIwin\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -MPI_Raccumulate(\fIorigin_addr\fP, \fIorigin_count\fP, \fIorigin_datatype\fP, \fItarget_rank\fP, - \fItarget_disp\fP, \fItarget_count\fP, \fItarget_datatype\fP, \fIop\fP, \fIwin\fP, \fIrequest,\fP - \fIierror\fP) - TYPE(*), DIMENSION(..), INTENT(IN), ASYNCHRONOUS :: \fIorigin_addr\fP - INTEGER, INTENT(IN) :: \fIorigin_count\fP, \fItarget_rank\fP, \fItarget_count\fP - TYPE(MPI_Datatype), INTENT(IN) :: \fIorigin_datatype\fP, \fItarget_datatype\fP - INTEGER(KIND=MPI_ADDRESS_KIND), INTENT(IN) :: \fItarget_disp\fP - TYPE(MPI_Op), INTENT(IN) :: \fIop\fP - TYPE(MPI_Win), INTENT(IN) :: \fIwin\fP - TYPE(MPI_Request), INTENT(OUT) :: \fIrequest\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETERS -.ft R -.TP 1i -origin_addr -Initial address of buffer (choice). -.ft R -.TP 1i -origin_count -Number of entries in buffer (nonnegative integer). -.ft R -.TP 1i -origin_datatype -Data type of each buffer entry (handle). -.ft R -.TP 1i -target_rank -Rank of target (nonnegative integer). -.ft R -.TP 1i -target_disp -Displacement from start of window to beginning of target buffer (nonnegative integer). -.ft R -.TP 1i -target_count -Number of entries in target buffer (nonnegative integer). -.ft R -.TP 1i -target_datatype -Data type of each entry in target buffer (handle). -.ft R -.TP 1i -op -Reduce operation (handle). -.ft R -.TP 1i -win -Window object (handle). - -.SH OUTPUT PARAMETER -.ft R -.TP 1i -MPI_Raccumulate: RMA request -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -\fBMPI_Accumulate\fP is a function used for one-sided MPI communication that adds the contents of the origin buffer (as defined by \fIorigin_addr\fP, \fIorigin_count\fP, and \fIorigin_datatype\fP) to the buffer specified by the arguments \fItarget_count\fP and \fItarget_datatype\fP, at offset \fItarget_disp\fP, in the target window specified by \fItarget_rank\fP and \fIwin\fP, using the operation \fIop\fP. The target window can only be accessed by processes within the same node. This is similar to MPI_Put, except that data is combined into the target area instead of overwriting it. -.sp -Any of the predefined operations for MPI_Reduce can be used. User-defined functions cannot be used. For example, if \fIop\fP is MPI_SUM, each element of the origin buffer is added to the corresponding element in the target, replacing the former value in the target. -.sp -Each datatype argument must be a predefined data type or a derived data type, where all basic components are of the same predefined data type. Both datatype arguments must be constructed from the same predefined data type. The operation \fIop\fP applies to elements of that predefined type. The \fItarget_datatype\fP argument must not specify overlapping entries, and the target buffer must fit in the target window. -.sp -A new predefined operation, MPI_REPLACE, is defined. It corresponds to the associative function f(a, b) =b; that is, the current value in the target memory is replaced by the value supplied by the origin. -.sp -\fBMPI_Raccumulate\fP is similar to \fBMPI_Accumulate\fP, except that it allocates a communication request object and associates it with the request handle (the argument \fIrequest\fP) that can be used to wait or test for completion. The completion of an \fBMPI_Raccumulate\fP operation indicates that the \fIorigin_addr\fP buffer is free to be updated. It does not indicate that the operation has completed at the target window. - -.SH FORTRAN 77 NOTES -.ft R -The MPI standard prescribes portable Fortran syntax for -the \fITARGET_DISP\fP argument only for Fortran 90. FORTRAN 77 -users may use the non-portable syntax -.sp -.nf - INTEGER*MPI_ADDRESS_KIND \fITARGET_DISP\fP -.fi -.sp -where MPI_ADDRESS_KIND is a constant defined in mpif.h -and gives the length of the declared integer in bytes. - -.SH NOTES -MPI_Put is a special case of \fBMPI_Accumulate\fP, with the operation MPI_REPLACE. Note, however, that MPI_Put and \fBMPI_Accumulate\fP have different constraints on concurrent updates. -.sp -It is the user's responsibility to guarantee that, when -using the accumulate functions, the target displacement argument is such -that accesses to the window are properly aligned according to the data -type arguments in the call to the \fBMPI_Accumulate\fP function. - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for I/O function errors. The error handler -may be changed with MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN may be used to cause error values to be returned. Note that MPI does not guarantee that an MPI program can continue past an error. - -.SH SEE ALSO -.ft R -.sp -MPI_Put -MPI_Get_accumulate -MPI_Reduce diff --git a/ompi/mpi/man/man3/MPI_Add_error_class.3in b/ompi/mpi/man/man3/MPI_Add_error_class.3in deleted file mode 100644 index 1d150f96de1..00000000000 --- a/ompi/mpi/man/man3/MPI_Add_error_class.3in +++ /dev/null @@ -1,91 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Add_error_class 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" - -.SH NAME -.nf -\fBMPI_Add_error_class\fP \- Creates a new error class and returns its value - -.fi -.SH SYNTAX -.ft R - -.SH C Syntax -.nf -#include -int MPI_Add_error_class(int *\fIerrorclass\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_ADD_ERROR_CLASS(\fIERRORCLASS, IERROR\fP) - INTEGER \fIERRORCLASS, IERROR\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Add_error_class(\fIerrorclass\fP, \fIierror\fP) - INTEGER, INTENT(OUT) :: \fIerrorclass\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH OUTPUT PARAMETERS -.ft R -.TP 1.4i -errorclass -New error class (integer). -.ft R -.TP 1.4i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -The function MPI_Add_error_class creates a new, local error -class. - -.SH NOTES -.ft R -Because this function is local, the same value of \fIerrorclass\fP may -not be returned on all processes that make this call, even if they -call the function concurrently. Thus, same error on different -processes may not cause the same value of \fIerrorclass\fP to be -returned. To reduce the potential for confusion, MPI_Add_error_string -may be used on multiple processes to associate the same error string -with the newly created \fIerrorclass\fP. Even though \fIerrorclass\fP -may not be consistent across processes, using MPI_Add_error_string -will ensure the error string associated with it will be the same -everywhere. -.sp -No function is provided to free error classes, as it is not expected -that an application will create them in significant numbers. -.sp -The value returned is always greater than or equal to MPI_ERR_LASTCODE. - -.SH ERRORS -.ft R -Almost all MPI routines return an error value; C routines as -the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for -I/O function errors. The error handler may be changed with -MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN -may be used to cause error values to be returned. Note that MPI does not -guarantee that an MPI program can continue past an error. - -.SH SEE ALSO -.ft R -.nf -MPI_Add_error_code -MPI_Add_error_string -MPI_Error_class -MPI_Error_string - diff --git a/ompi/mpi/man/man3/MPI_Add_error_code.3in b/ompi/mpi/man/man3/MPI_Add_error_code.3in deleted file mode 100644 index 6ddbbf671de..00000000000 --- a/ompi/mpi/man/man3/MPI_Add_error_code.3in +++ /dev/null @@ -1,84 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Add_error_code 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" - -.SH NAME -\fBMPI_Add_error_code\fP \- Creates a new error code associated -with \fIerrorclass\fP - -.SH SYNTAX -.ft R - -.SH C Syntax -.nf -#include -int MPI_Add_error_code(int \fIerrorclass\fP, int *\fIerrorcode\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_ADD_ERROR_CODE(\fIERRORCLASS, ERRORCODE, IERROR\fP) - INTEGER \fI ERRORCLASS, ERRORCODE, IERROR\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Add_error_code(\fIerrorclass\fP, \fIerrorcode\fP, \fIierror\fP) - INTEGER, INTENT(IN) :: \fIerrorclass\fP - INTEGER, INTENT(OUT) :: \fIerrorcode\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETER -.ft R -.TP 1.4i -errorclass -MPI error class (integer). - -.SH OUTPUT PARAMETERS -.ft R -.TP 1.4i -errorcode -Error code returned by an MPI routine or an MPI error class (integer). -.ft R -.TP 1.4i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -Creates a new error code associated with \fIerrorclass\fP and returns -its value in \fIerrorcode\fP. - -.SH NOTES -.ft R -No function is provided to free error codes, as it is not expected -that an application will create them in significant numbers. -.sp -The value returned is always greater than or equal to MPI_ERR_LASTCODE. - -.SH ERRORS -.ft R -Almost all MPI routines return an error value; C routines as -the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for -I/O function errors. The error handler may be changed with -MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN -may be used to cause error values to be returned. Note that MPI does not -guarantee that an MPI program can continue past an error. - -.SH SEE ALSO -.ft R -.nf -MPI_Add_error_class -MPI_Error_class - - diff --git a/ompi/mpi/man/man3/MPI_Add_error_string.3in b/ompi/mpi/man/man3/MPI_Add_error_string.3in deleted file mode 100644 index 9955732a989..00000000000 --- a/ompi/mpi/man/man3/MPI_Add_error_string.3in +++ /dev/null @@ -1,88 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2013 Los Alamos National Security, LLC. All rights reserved. -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Add_error_string 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" - -.SH NAME -.nf -\fBMPI_Add_error_string\fP \- Associates a string with an error code or class - -.fi -.SH SYNTAX -.ft R - -.SH C Syntax -.nf -#include -int MPI_Add_error_string(int \fIerrorcode\fP, const char *\fIstring\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_ADD_ERROR_STRING(\fIERRORCODE, STRING, IERROR\fP) - INTEGER \fIERRORCODE, IERROR\fP - CHARACTER*(*) \fISTRING\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Add_error_string(\fIerrorcode\fP, \fIstring\fP, \fIierror\fP) - INTEGER, INTENT(IN) :: \fIerrorcode\fP - CHARACTER(LEN=*), INTENT(IN) :: \fIstring\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETERS -.ft R -.TP 1.4i -errorcode -MPI error class, or an error code returned by an MPI routine (integer). -.ft R -.TP 1.4i -string -Text that corresponds to the error code or class (string). - -.SH OUTPUT PARAMETER -.ft R -.TP 1.4i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -This routine associates an error string with an error code or -class. Calling MPI_Add_error_string for an error code or class that -already has an associated error string will replace the old string -with the new one. It is erroneous to call MPI_Add_error_string for an -error value not generated via MPI_Add_error_class or -MPI_Add_error_code (e.g., an error code or class with a value not -greater than MPI_LAST_ERRCODE). - -.SH ERRORS -.ft R -Almost all MPI routines return an error value; C routines as -the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for -I/O function errors. The error handler may be changed with -MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN -may be used to cause error values to be returned. Note that MPI does not -guarantee that an MPI program can continue past an error. - -.SH SEE ALSO -.ft R -.nf -MPI_Add_error_class -MPI_Add_error_code -MPI_Error_class -MPI_Error_string - - diff --git a/ompi/mpi/man/man3/MPI_Address.3in b/ompi/mpi/man/man3/MPI_Address.3in deleted file mode 100644 index bd554053902..00000000000 --- a/ompi/mpi/man/man3/MPI_Address.3in +++ /dev/null @@ -1,92 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Address 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Address\fP \- Gets the address of a location in memory -- use of this routine is deprecated. - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -int MPI_Address(void *\fIlocation\fP, MPI_Aint\fP *address\fP) - -.fi -.SH Fortran Syntax -.nf -INCLUDE 'mpif.h' -MPI_ADDRESS(\fILOCATION\fP,\fI ADDRESS\fP,\fI IERROR\fP) - \fILOCATION\fP (*) - INTEGER \fIADDRESS\fP,\fI IERROR\fP - - -.fi -.SH INPUT PARAMETER -.ft R -.TP 1i -location -Location in caller memory (choice). - -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -address -Address of location (integer). -.ft R -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -Note that use of this routine is \fIdeprecated\fP as of MPI-2. Please use MPI_Get_address instead. -.sp -The address of a location in memory can be found by invoking this function. Returns the (byte) address of location. -.sp -Example: Using MPI_Address for an array. -.sp -.nf -REAL A(100,100) -.fi -.br - INTEGER I1, I2, DIFF -.br - CALL MPI_ADDRESS(A(1,1), I1, IERROR) -.br - CALL MPI_ADDRESS(A(10,10), I2, IERROR) -.br - DIFF = I2 - I1 -.br -! The value of DIFF is 909*sizeofreal; the values of I1 and I2 are -.br -! implementation dependent. -.fi - -.SH NOTES -.ft R -This routine is provided for both Fortran and C programmers and may be useful when writing portable code. In the current release, the address returned by this routine will be the same as that produced by the C & operator. -.sp -C users may be tempted to avoid using -MPI_Address and rely on the availability of the address operator &. -Note, however, that & cast-expression is a pointer, not an address. -ANSI C does not require that the value of a pointer (or the pointer -cast to int) be the absolute address of the object pointed at although -this is commonly the case. Furthermore, referencing may not have a unique -definition on machines with a segmented address space. The use of -MPI_Address to "reference" C variables guarantees portability to -such machines as well. - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for I/O function errors. The error handler -may be changed with MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN may be used to cause error values to be returned. Note that MPI does not guarantee that an MPI program can continue past an error. - -.SH SEE ALSO -MPI_Get_address -.br diff --git a/ompi/mpi/man/man3/MPI_Aint_add.3in b/ompi/mpi/man/man3/MPI_Aint_add.3in deleted file mode 100644 index 800e2a1b975..00000000000 --- a/ompi/mpi/man/man3/MPI_Aint_add.3in +++ /dev/null @@ -1,94 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2013-2015 Los Alamos National Security, LLC. All rights reserved. -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" $COPYRIGHT$ -.TH MPI_Aint_add 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Aint_add\fP, \fBMPI_Aint_diff\fP \- Portable functions for -arithmetic on MPI_Aint values. - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -MPI_Aint MPI_Aint_add(MPI_Aint \fIbase\fP, MPI_Aint \fIdisp\fP) - -MPI_Aint MPI_Aint_diff(MPI_Aint \fIaddr1\fP, MPI_Aint \fIaddr2\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -INTEGER(KIND=MPI_ADDRESS_KIND) MPI_AINT_ADD(\fIBASE, DISP\fP) - INTEGER(KIND=MPI_ADDRESS_KIND) \fIBASE, DISP\fP - -INTEGER(KIND=MPI_ADDRESS_KIND) MPI_AINT_DIFF(\fIADDR1, ADDR2\fP) - INTEGER(KIND=MPI_ADDRESS_KIND) \fIADDR1, ADDR2\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -INTEGER(KIND=MPI_ADDRESS_KIND) MPI_AINT_ADD(\fIBASE, DISP\fP) - INTEGER(KIND=MPI_ADDRESS_KIND) \fIBASE, DISP\fP - -INTEGER(KIND=MPI_ADDRESS_KIND) MPI_AINT_DIFF(\fIADDR1, ADDR2\fP) - INTEGER(KIND=MPI_ADDRESS_KIND) \fIADDR1, ADDR2\fP - -.fi -.SH INPUT PARAMETERS -.ft R -.TP 1i -base -Base address (integer). -.ft R -.TP 1i -disp -Displacement (integer). -.ft R -.TP 1i -addr1 -Minuend address (integer). -.ft R -.TP -addr2 -Subtrahend address (integer). - -.SH DESCRIPTION -.ft R -\fBMPI_Aint_add\fP produces a new MPI_Aint value that is equivalent to the sum of -the \fIbase\fP and \fIdisp\fP arguments, where \fIbase\fP represents -a base address returned by a call to \fBMPI_Get_address\fP and -\fIdisp\fP represents a signed integer displacement. The resulting -address is valid only at the process that generated \fIbase\fP, and it -must correspond to a location in the same object referenced by -\fIbase\fP, as described in MPI-3.1 \[char167] 4.1.12. The addition is -performed in a manner that results in the correct MPI_Aint -representation of the output address, as if the process that -originally produced \fIbase\fP had called: - -.nf - \fBMPI_Get_address\fP ((char *) \fIbase\fP + \fIdisp\fP, &\fIresult\fP); -.fi -.sp -.ft R -\fBMPI_Aint_diff\fP produces a new MPI_Aint value that is equivalent -to the difference between \fIaddr1\fP and \fIaddr2\fP arguments, where -\fIaddr1\fP and \fIaddr2\fP represent addresses returned by calls to -\fBMPI_Get_address\fP. The resulting address is valid only at the -process that generated \fIaddr1\fP and \fIaddr2\fP, and \fIaddr1\fP -and \fIaddr2\fP must correspond to locations in the same object in the -same process, as described in MPI-3.1 \[char167] 4.1.12. The difference is -calculated in a manner that results in the signed difference from -\fIaddr1\fP to \fIaddr2\fP, as if the process that originally produced -the addresses had called (char *) \fIaddr1\fP - (char *) \fIaddr2\fP -on the addresses initially passed to \fBMPI_Get_address\fP. - -.SH SEE ALSO -.ft R -.sp -MPI_Get_address diff --git a/ompi/mpi/man/man3/MPI_Aint_diff.3in b/ompi/mpi/man/man3/MPI_Aint_diff.3in deleted file mode 100644 index 5fb829b5fb7..00000000000 --- a/ompi/mpi/man/man3/MPI_Aint_diff.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/MPI_Aint_add.3 diff --git a/ompi/mpi/man/man3/MPI_Allgather.3in b/ompi/mpi/man/man3/MPI_Allgather.3in deleted file mode 100644 index b4747e5a16e..00000000000 --- a/ompi/mpi/man/man3/MPI_Allgather.3in +++ /dev/null @@ -1,188 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2013 Los Alamos National Security, LLC. All rights reserved. -.\" Copyright (c) 2010-2014 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Allgather 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Allgather, MPI_Iallgather, MPI_Allgather_init\fP \- Gathers data from all processes and distributes it to all processes - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -int MPI_Allgather(const void\fI *sendbuf\fP, int \fI sendcount\fP, - MPI_Datatype\fI sendtype\fP, void\fI *recvbuf\fP, int\fI recvcount\fP, - MPI_Datatype\fI recvtype\fP, MPI_Comm\fI comm\fP) - -int MPI_Iallgather(const void\fI *sendbuf\fP, int \fI sendcount\fP, - MPI_Datatype\fI sendtype\fP, void\fI *recvbuf\fP, int\fI recvcount\fP, - MPI_Datatype\fI recvtype\fP, MPI_Comm\fI comm\fP, MPI_Request \fI*request\fP) - -int MPI_Allgather_init(const void\fI *sendbuf\fP, int \fI sendcount\fP, - MPI_Datatype\fI sendtype\fP, void\fI *recvbuf\fP, int\fI recvcount\fP, - MPI_Datatype\fI recvtype\fP, MPI_Comm\fI comm\fP, MPI_Info\fI info\fP, MPI_Request \fI*request\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_ALLGATHER(\fISENDBUF\fP,\fI SENDCOUNT\fP,\fI SENDTYPE\fP,\fI RECVBUF\fP,\fI RECVCOUNT\fP,\fI - RECVTYPE\fP,\fI COMM\fP,\fI IERROR\fP) - \fISENDBUF\fP (*), \fIRECVBUF\fP (*) - INTEGER \fISENDCOUNT\fP,\fI SENDTYPE\fP,\fI RECVCOUNT\fP,\fI RECVTYPE\fP,\fI COMM\fP, - INTEGER \fIIERROR\fP - -MPI_IALLGATHER(\fISENDBUF, SENDCOUNT, SENDTYPE, RECVBUF, RECVCOUNT, - RECVTYPE, COMM, REQUEST, IERROR\fP) - \fISENDBUF\fP(*)\fI, RECVBUF\fP (*) - INTEGER \fISENDCOUNT, SENDTYPE, RECVCOUNT, RECVTYPE, COMM\fP - INTEGER \fIREQUEST, IERROR\fP - -MPI_ALLGATHER_INIT(\fISENDBUF, SENDCOUNT, SENDTYPE, RECVBUF, RECVCOUNT, - RECVTYPE, COMM, INFO, REQUEST, IERROR\fP) - \fISENDBUF\fP(*)\fI, RECVBUF\fP (*) - INTEGER \fISENDCOUNT, SENDTYPE, RECVCOUNT, RECVTYPE, COMM\fP - INTEGER \fIINFO, REQUEST, IERROR\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Allgather(\fIsendbuf\fP, \fIsendcount\fP, \fIsendtype\fP, \fIrecvbuf\fP, \fIrecvcount\fP, \fIrecvtype\fP, - \fIcomm\fP, \fIierror\fP) - TYPE(*), DIMENSION(..), INTENT(IN) :: \fIsendbuf\fP - TYPE(*), DIMENSION(..) :: \fIrecvbuf\fP - INTEGER, INTENT(IN) :: \fIsendcount\fP, \fIrecvcount\fP - TYPE(MPI_Datatype), INTENT(IN) :: \fIsendtype\fP, \fIrecvtype\fP - TYPE(MPI_Comm), INTENT(IN) :: \fIcomm\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -MPI_Iallgather(\fIsendbuf\fP, \fIsendcount\fP, \fIsendtype\fP, \fIrecvbuf\fP, \fIrecvcount\fP, \fIrecvtype\fP, - \fIcomm\fP, \fIrequest\fP, \fIierror\fP) - TYPE(*), DIMENSION(..), INTENT(IN), ASYNCHRONOUS :: \fIsendbuf\fP - TYPE(*), DIMENSION(..), ASYNCHRONOUS :: \fIrecvbuf\fP - INTEGER, INTENT(IN) :: \fIsendcount\fP, \fIrecvcount\fP - TYPE(MPI_Datatype), INTENT(IN) :: \fIsendtype\fP, \fIrecvtype\fP - TYPE(MPI_Comm), INTENT(IN) :: \fIcomm\fP - TYPE(MPI_Request), INTENT(OUT) :: \fIrequest\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -MPI_Allgather_init(\fIsendbuf\fP, \fIsendcount\fP, \fIsendtype\fP, \fIrecvbuf\fP, \fIrecvcount\fP, \fIrecvtype\fP, - \fIcomm\fP, \fIinfo\fP, \fIrequest\fP, \fIierror\fP) - TYPE(*), DIMENSION(..), INTENT(IN), ASYNCHRONOUS :: \fIsendbuf\fP - TYPE(*), DIMENSION(..), ASYNCHRONOUS :: \fIrecvbuf\fP - INTEGER, INTENT(IN) :: \fIsendcount\fP, \fIrecvcount\fP - TYPE(MPI_Datatype), INTENT(IN) :: \fIsendtype\fP, \fIrecvtype\fP - TYPE(MPI_Comm), INTENT(IN) :: \fIcomm\fP - TYPE(MPI_Info), INTENT(IN) :: \fIinfo\fP - TYPE(MPI_Request), INTENT(OUT) :: \fIrequest\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP -.fi -.SH INPUT PARAMETERS -.ft R -.TP 1i -sendbuf -Starting address of send buffer (choice). -.TP 1i -sendcount -Number of elements in send buffer (integer). -.TP 1i -sendtype -Datatype of send buffer elements (handle). -.TP 1i -recvbuf -Starting address of recv buffer (choice). -.TP 1i -recvcount -Number of elements received from any process (integer). -.TP 1i -recvtype -Datatype of receive buffer elements (handle). -.TP 1i -comm -Communicator (handle). -.TP 1i -info -Info (handle, persistent only). - -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -recvbuf -Address of receive buffer (choice). -.ft R -.TP 1i -request -Request (handle, non-blocking only). -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -MPI_Allgather is similar to MPI_Gather, except that all processes receive the result, instead of just the root. In other words, all processes contribute to the result, and all processes receive the result. -.sp -The type signature associated with sendcount, sendtype at a process must be equal to the type signature associated with recvcount, recvtype at any other process. -.sp -The outcome of a call to MPI_Allgather(\&...) is as if all processes executed n calls to -.sp -.nf - MPI_Gather(sendbuf,sendcount,sendtype,recvbuf,recvcount, - recvtype,root,comm), -.fi -.sp -.fi -for root = 0 , ..., n-1. The rules for correct usage of MPI_Allgather are easily found from the corresponding rules for MPI_Gather. -.sp -\fBExample:\fR The all-gather version of Example 1 in MPI_Gather. Using MPI_Allgather, we will gather 100 ints from every process in the group to every process. -.sp -.nf -MPI_Comm comm; - int gsize,sendarray[100]; - int *rbuf; - \&... - MPI_Comm_size( comm, &gsize); - rbuf = (int *)malloc(gsize*100*sizeof(int)); - MPI_Allgather( sendarray, 100, MPI_INT, rbuf, 100, MPI_INT, comm); -.fi -.sp -After the call, every process has the group-wide concatenation of the sets of data. - -.SH USE OF IN-PLACE OPTION -When the communicator is an intracommunicator, you can perform an all-gather operation in-place (the output buffer is used as the input buffer). Use the variable MPI_IN_PLACE as the value of \fIsendbuf\fR. In this case, \fIsendcount\fR and \fIsendtype\fR are ignored. The input data of each process is assumed to be in the area where that process would receive its own contribution to the receive buffer. Specifically, the outcome of a call to MPI_Allgather that used the in-place option is identical to the case in which all processes executed \fIn\fR calls to -.sp -.nf - MPI_ALLGATHER ( MPI_IN_PLACE, 0, MPI_DATATYPE_NULL, recvbuf, - recvcount, recvtype, root, comm ) - -for root =0, ... , n-1. -.fi -.sp -Note that MPI_IN_PLACE is a special kind of value; it has the same restrictions on its use as MPI_BOTTOM. -.sp -Because the in-place option converts the receive buffer into a send-and-receive buffer, a Fortran binding that includes INTENT must mark these as INOUT, not OUT. -.sp -.SH WHEN COMMUNICATOR IS AN INTER-COMMUNICATOR -.sp -When the communicator is an inter-communicator, the gather operation occurs in two phases. The data is gathered from all the members of the first group and received by all the members of the second group. Then the data is gathered from all the members of the second group and received by all the members of the first. The operation, however, need not be symmetric. The number of items sent by the processes in first group need not be equal to the number of items sent by the the processes in the second group. You can move data in only one direction by giving \fIsendcount\fR a value of 0 for communication in the reverse direction. -.sp - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for I/O function errors. The error handler -may be changed with MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN may be used to cause error values to be returned. Note that MPI does not guarantee that an MPI program can continue past an error. - -.SH SEE ALSO -.ft R -.sp -MPI_Allgatherv -.br -MPI_Gather - diff --git a/ompi/mpi/man/man3/MPI_Allgather_init.3in b/ompi/mpi/man/man3/MPI_Allgather_init.3in deleted file mode 100644 index f7b03f37700..00000000000 --- a/ompi/mpi/man/man3/MPI_Allgather_init.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/MPI_Allgather.3 diff --git a/ompi/mpi/man/man3/MPI_Allgatherv.3in b/ompi/mpi/man/man3/MPI_Allgatherv.3in deleted file mode 100644 index 837cf40be31..00000000000 --- a/ompi/mpi/man/man3/MPI_Allgatherv.3in +++ /dev/null @@ -1,177 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2013 Los Alamos National Security, LLC. All rights reserved. -.\" Copyright (c) 2010-2014 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2007-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Allgatherv 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Allgatherv, MPI_Iallgatherv, MPI_Allgatherv_init\fP \- Gathers data from all processes and delivers it to all. Each process may contribute a different amount of data. - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -int MPI_Allgatherv(const void\fI *sendbuf\fP, int\fI sendcount\fP, - MPI_Datatype\fI sendtype\fP, void\fI *recvbuf\fP, const int\fI recvcounts[]\fP, - const int\fI displs[]\fP, MPI_Datatype\fI recvtype\fP, MPI_Comm\fI comm\fP) - -int MPI_Iallgatherv(const void\fI *sendbuf\fP, int\fI sendcount\fP, - MPI_Datatype\fI sendtype\fP, void\fI *recvbuf\fP, const int\fI recvcounts[]\fP, - const int\fI displs[]\fP, MPI_Datatype\fI recvtype\fP, MPI_Comm\fI comm\fP, - MPI_Request \fI*request\fP) - -int MPI_Allgatherv_init(const void\fI *sendbuf\fP, int\fI sendcount\fP, - MPI_Datatype\fI sendtype\fP, void\fI *recvbuf\fP, const int\fI recvcounts[]\fP, - const int\fI displs[]\fP, MPI_Datatype\fI recvtype\fP, MPI_Comm\fI comm\fP, - MPI_Info\fI info\fP, MPI_Request \fI*request\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_ALLGATHERV(\fISENDBUF\fP,\fI SENDCOUNT\fP, \fISENDTYPE\fP,\fI RECVBUF\fP, - \fIRECVCOUNT\fP,\fI DISPLS\fP, \fIRECVTYPE\fP,\fI COMM\fP,\fI IERROR\fP) - \fISENDBUF\fP(*), \fIRECVBUF\fP(*) - INTEGER \fISENDCOUNT\fP,\fI SENDTYPE\fP, \fIRECVCOUNT\fP(*) - INTEGER \fIDISPLS\fP(*),\fI RECVTYPE\fP,\fI COMM\fP,\fI IERROR\fP - -MPI_IALLGATHERV(\fISENDBUF\fP,\fI SENDCOUNT\fP, \fISENDTYPE\fP,\fI RECVBUF\fP, - \fIRECVCOUNT\fP,\fI DISPLS\fP, \fIRECVTYPE\fP,\fI COMM\fP, \fI REQUEST\fP, \fI IERROR\fP) - \fISENDBUF\fP(*)\fI, RECVBUF\fP(*) - INTEGER \fISENDCOUNT, SENDTYPE, RECVCOUNT\fP(*), - INTEGER \fIDISPLS\fP(*)\fI, RECVTYPE, COMM, REQUEST, IERROR\fP - -MPI_ALLGATHERV_INIT(\fISENDBUF\fP,\fI SENDCOUNT\fP, \fISENDTYPE\fP,\fI RECVBUF\fP, - \fIRECVCOUNT\fP,\fI DISPLS\fP, \fIRECVTYPE\fP,\fI COMM\fP, \fI INFO\fP, \fI REQUEST\fP, \fI IERROR\fP) - \fISENDBUF\fP(*)\fI, RECVBUF\fP(*) - INTEGER \fISENDCOUNT, SENDTYPE, RECVCOUNT\fP(*), - INTEGER \fIDISPLS\fP(*)\fI, RECVTYPE, COMM, INFO, REQUEST, IERROR\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Allgatherv(\fIsendbuf\fP, \fIsendcount\fP, \fIsendtype\fP, \fIrecvbuf\fP, \fIrecvcounts\fP, \fIdispls\fP, - \fIrecvtype\fP, \fIcomm\fP, \fIierror\fP) - TYPE(*), DIMENSION(..), INTENT(IN) :: \fIsendbuf\fP - TYPE(*), DIMENSION(..) :: \fIrecvbuf\fP - INTEGER, INTENT(IN) :: \fIsendcount\fP, \fIrecvcounts(*)\fP, \fIdispls(*)\fP - TYPE(MPI_Datatype), INTENT(IN) :: \fIsendtype\fP, \fIrecvtype\fP - TYPE(MPI_Comm), INTENT(IN) :: \fIcomm\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -MPI_Iallgatherv(\fIsendbuf\fP, \fIsendcount\fP, \fIsendtype\fP, \fIrecvbuf\fP, \fIrecvcounts\fP, \fIdispls\fP, - \fIrecvtype\fP, \fIcomm\fP, \fIrequest\fP, \fIierror\fP) - TYPE(*), DIMENSION(..), INTENT(IN), ASYNCHRONOUS :: \fIsendbuf\fP - TYPE(*), DIMENSION(..), ASYNCHRONOUS :: \fIrecvbuf\fP - INTEGER, INTENT(IN) :: \fIsendcount\fP - INTEGER, INTENT(IN), ASYNCHRONOUS :: \fIrecvcounts(*)\fP, \fIdispls(*)\fP - TYPE(MPI_Datatype), INTENT(IN) :: \fIsendtype\fP, \fIrecvtype\fP - TYPE(MPI_Comm), INTENT(IN) :: \fIcomm\fP - TYPE(MPI_Request), INTENT(OUT) :: \fIrequest\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -MPI_Allgatherv_init(\fIsendbuf\fP, \fIsendcount\fP, \fIsendtype\fP, \fIrecvbuf\fP, \fIrecvcounts\fP, \fIdispls\fP, - \fIrecvtype\fP, \fIcomm\fP, \fIinfo\fP, \fIrequest\fP, \fIierror\fP) - TYPE(*), DIMENSION(..), INTENT(IN), ASYNCHRONOUS :: \fIsendbuf\fP - TYPE(*), DIMENSION(..), ASYNCHRONOUS :: \fIrecvbuf\fP - INTEGER, INTENT(IN) :: \fIsendcount\fP - INTEGER, INTENT(IN), ASYNCHRONOUS :: \fIrecvcounts(*)\fP, \fIdispls(*)\fP - TYPE(MPI_Datatype), INTENT(IN) :: \fIsendtype\fP, \fIrecvtype\fP - TYPE(MPI_Comm), INTENT(IN) :: \fIcomm\fP - TYPE(MPI_Info), INTENT(IN) :: \fIinfo\fP - TYPE(MPI_Request), INTENT(OUT) :: \fIrequest\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETERS -.ft R -.TP 1i -sendbuf -Starting address of send buffer (choice). -.TP 1i -sendcount -Number of elements in send buffer (integer). -.TP 1i -sendtype -Datatype of send buffer elements (handle). -.TP 1i -recvcount -Integer array (of length group size) containing the number of elements that are received from each process. -.TP 1i -displs -Integer array (of length group size). Entry i specifies the displacement (relative to recvbuf) at which to place the incoming data from process i. -.TP 1i -recvtype -Datatype of receive buffer elements (handle). -.TP 1i -comm -Communicator (handle). -.TP 1i -info -Info (handle, persistent only). -.sp -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -recvbuf -Address of receive buffer (choice). -.TP 1i -request -Request (handle, non-blocking only). -.ft R -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -MPI_Allgatherv is similar to MPI_Allgather in that all processes gather data from all other processes, except that each process can send a different amount of data. The block of data sent from the jth process is received by every process and placed in the jth block of the buffer -.I recvbuf. -.sp -The type signature associated with sendcount, sendtype, at process j must be equal to the type signature associated with recvcounts[j], recvtype at any other process. -.sp -The outcome is as if all processes executed calls to -.nf -MPI_Allgatherv(sendbuf,sendcount,sendtype,recvbuf,recvcount, - displs,recvtype,root,comm) -.fi -.sp -for root = 0 , ..., n-1. The rules for correct usage of MPI_Allgatherv are easily found from the corresponding rules for MPI_Gatherv. - -.SH USE OF IN-PLACE OPTION -When the communicator is an intracommunicator, you can perform an all-gather operation in-place (the output buffer is used as the input buffer). Use the variable MPI_IN_PLACE as the value of \fIsendbuf\fR. In this case, \fIsendcount\fR and \fIsendtype\fR are ignored. The input data of each process is assumed to be in the area where that process would receive its own contribution to the receive buffer. Specifically, the outcome of a call to MPI_Allgather that used the in-place option is identical to the case in which all processes executed \fIn\fR calls to -.sp -.nf - MPI_ALLGATHERV ( MPI_IN_PLACE, 0, MPI_DATATYPE_NULL, recvbuf, - recvcounts, displs, recvtype, root, comm ) - -for root =0, ... , n-1. -.fi -.sp -Note that MPI_IN_PLACE is a special kind of value; it has the same restrictions on its use as MPI_BOTTOM. -.sp -Because the in-place option converts the receive buffer into a send-and-receive buffer, a Fortran binding that includes INTENT must mark these as INOUT, not OUT. -.sp -.SH WHEN COMMUNICATOR IS AN INTER-COMMUNICATOR -.sp -When the communicator is an inter-communicator, the gather operation occurs in two phases. The data is gathered from all the members of the first group, concatenated, and received by all the members of the second group. Then the data is gathered from all the members of the second group, concatenated, and received by all the members of the first. The send buffer arguments in the one group must be consistent with the receive buffer arguments in the other group, and vice versa. The operation must exhibit symmetric, full-duplex behavior. -.sp - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for I/O function errors. The error handler -may be changed with MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN may be used to cause error values to be returned. Note that MPI does not guarantee that an MPI program can continue past an error. - -.SH SEE ALSO -.ft R -.sp -MPI_Gatherv -.br -MPI_Allgather diff --git a/ompi/mpi/man/man3/MPI_Allgatherv_init.3in b/ompi/mpi/man/man3/MPI_Allgatherv_init.3in deleted file mode 100644 index 8fc7b812b1a..00000000000 --- a/ompi/mpi/man/man3/MPI_Allgatherv_init.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/MPI_Allgatherv.3 diff --git a/ompi/mpi/man/man3/MPI_Alloc_mem.3in b/ompi/mpi/man/man3/MPI_Alloc_mem.3in deleted file mode 100644 index 768106b854b..00000000000 --- a/ompi/mpi/man/man3/MPI_Alloc_mem.3in +++ /dev/null @@ -1,112 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright (c) 2010-2014 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Alloc_mem 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Alloc_mem \fP \- Allocates a specified memory segment. - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -int MPI_Alloc_mem(MPI_Aint \fIsize\fP, MPI_Info \fIinfo\fP, void *\fIbaseptr\fP) - -.fi -.SH Fortran Syntax (see FORTRAN NOTES) -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_ALLOC_MEM(\fISIZE, INFO, BASEPTR, IERROR\fP) - INTEGER \fIINFO, IERROR\fP - INTEGER(KIND=MPI_ADDRESS_KIND) \fISIZE, BASEPTR\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Alloc_mem(\fIsize\fP, \fIinfo\fP, \fIbaseptr\fP, \fIierror\fP) - USE, INTRINSIC :: \fIISO_C_BINDING\fP, \fIONLY \fP - INTEGER(KIND=MPI_ADDRESS_KIND), INTENT(IN) :: \fIsize\fP - TYPE(MPI_Info), INTENT(IN) :: \fIinfo\fP - TYPE(C_PTR), INTENT(OUT) :: \fIbaseptr\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETERS -.ft R -.TP 1i -size -Size of memory segment in bytes (nonnegative integer). -.ft R -.TP 1i -info -Info argument (handle). - -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -baseptr -Pointer to beginning of memory segment allocated. -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -MPI_Alloc_mem allocates \fIsize\fP bytes of memory. The starting address -of this memory is returned in the variable \fIbaseptr\fP. -.sp - -.SH C NOTES -.ft R -The parameter \fIbaseptr\fP is of type \fIvoid *\fP to allow passing any pointer object for this parameter. The provided argument should be a pointer to a pointer of arbitrary type (e.g., \fIvoid **\fP). - -.SH FORTRAN NOTES -.ft R -There is no portable FORTRAN 77 syntax for using MPI_Alloc_mem. -There is no portable Fortran syntax for using pointers returned -from MPI_Alloc_mem. However, MPI_Alloc_mem can be used with Sun -Fortran compilers. -.sp -From FORTRAN 77, you can use the following non-standard -declarations for the SIZE and BASEPTR arguments: -.nf - INCLUDE "mpif.h" - INTEGER*MPI_ADDRESS_KIND SIZE, BASEPTR -.fi -.sp -From either FORTRAN 77 or Fortran 90, you can use "Cray pointers" -for the BASEPTR argument. Cray pointers are described further in -the Fortran User's Guide and are supported by many Fortran compilers. -For example, -.sp -.nf - INCLUDE "mpif.h" - REAL*4 A(100,100) - POINTER (BASEPTR, A) - INTEGER*MPI_ADDRESS_KIND SIZE - - SIZE = 4 * 100 * 100 - CALL MPI_ALLOC_MEM(SIZE,MPI_INFO_NULL,BASEPTR,IERR) - - ! use A - - CALL MPI_FREE_MEM(A, IERR) -.fi -.ft R - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for I/O function errors. The error handler -may be changed with MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN may be used to cause error values to be returned. Note that MPI does not guarantee that an MPI program can continue past an error. - -.SH SEE ALSO -.ft R -.sp -MPI_Free_mem diff --git a/ompi/mpi/man/man3/MPI_Allreduce.3in b/ompi/mpi/man/man3/MPI_Allreduce.3in deleted file mode 100644 index d4cfa2a4d9e..00000000000 --- a/ompi/mpi/man/man3/MPI_Allreduce.3in +++ /dev/null @@ -1,199 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2013 Los Alamos National Security, LLC. All rights reserved. -.\" Copyright (c) 2010-2014 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2007-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Allreduce 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Allreduce, MPI_Iallreduce, MPI_Allreduce_init\fP \- Combines values from all processes and distributes the result back to all processes. - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -int MPI_Allreduce(const void \fI*sendbuf\fP, void \fI*recvbuf\fP, int\fI count\fP, - MPI_Datatype\fI datatype\fP, MPI_Op\fI op\fP, MPI_Comm\fI comm\fP) - -int MPI_Iallreduce(const void \fI*sendbuf\fP, void \fI*recvbuf\fP, int\fI count\fP, - MPI_Datatype\fI datatype\fP, MPI_Op\fI op\fP, MPI_Comm\fI comm\fP, - MPI_Request \fI*request\fP) - -int MPI_Allreduce_init(const void \fI*sendbuf\fP, void \fI*recvbuf\fP, int\fI count\fP, - MPI_Datatype\fI datatype\fP, MPI_Op\fI op\fP, MPI_Comm\fI comm\fP, - MPI_Info \fIinfo\fP, MPI_Request \fI*request\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_ALLREDUCE(\fISENDBUF\fP,\fI RECVBUF\fP, \fICOUNT\fP,\fI DATATYPE\fP,\fI OP\fP, \fICOMM\fP, \fIIERROR\fP) - \fISENDBUF\fP(*), \fIRECVBUF\fP(*) - INTEGER \fICOUNT\fP,\fI DATATYPE\fP,\fI OP\fP,\fI COMM\fP,\fI IERROR\fP - -MPI_IALLREDUCE(\fISENDBUF, RECVBUF, COUNT, DATATYPE, OP, COMM, REQUEST, IERROR\fP) - \fISENDBUF\fP(*)\fI, RECVBUF\fP(*) - INTEGER \fICOUNT, DATATYPE, OP, COMM, REQUEST, IERROR\fP - -MPI_ALLREDUCE_INIT(\fISENDBUF, RECVBUF, COUNT, DATATYPE, OP, COMM, INFO, REQUEST, IERROR\fP) - \fISENDBUF\fP(*)\fI, RECVBUF\fP(*) - INTEGER \fICOUNT, DATATYPE, OP, COMM, INFO, REQUEST, IERROR\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Allreduce(\fIsendbuf\fP, \fIrecvbuf\fP, \fIcount\fP, \fIdatatype\fP, \fIop\fP, \fIcomm\fP, \fIierror\fP) - TYPE(*), DIMENSION(..), INTENT(IN) :: \fIsendbuf\fP - TYPE(*), DIMENSION(..) :: \fIrecvbuf\fP - INTEGER, INTENT(IN) :: \fIcount\fP - TYPE(MPI_Datatype), INTENT(IN) :: \fIdatatype\fP - TYPE(MPI_Op), INTENT(IN) :: \fIop\fP - TYPE(MPI_Comm), INTENT(IN) :: \fIcomm\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -MPI_Iallreduce(\fIsendbuf\fP, \fIrecvbuf\fP, \fIcount\fP, \fIdatatype\fP, \fIop\fP, \fIcomm\fP, \fIrequest\fP, - \fIierror\fP) - TYPE(*), DIMENSION(..), INTENT(IN), ASYNCHRONOUS :: \fIsendbuf\fP - TYPE(*), DIMENSION(..), ASYNCHRONOUS :: \fIrecvbuf\fP - INTEGER, INTENT(IN) :: \fIcount\fP - TYPE(MPI_Datatype), INTENT(IN) :: \fIdatatype\fP - TYPE(MPI_Op), INTENT(IN) :: \fIop\fP - TYPE(MPI_Comm), INTENT(IN) :: \fIcomm\fP - TYPE(MPI_Request), INTENT(OUT) :: \fIrequest\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -MPI_Allreduce_init(\fIsendbuf\fP, \fIrecvbuf\fP, \fIcount\fP, \fIdatatype\fP, \fIop\fP, \fIcomm\fP, \fIinfo\fP, \fIrequest\fP, - \fIierror\fP) - TYPE(*), DIMENSION(..), INTENT(IN), ASYNCHRONOUS :: \fIsendbuf\fP - TYPE(*), DIMENSION(..), ASYNCHRONOUS :: \fIrecvbuf\fP - INTEGER, INTENT(IN) :: \fIcount\fP - TYPE(MPI_Datatype), INTENT(IN) :: \fIdatatype\fP - TYPE(MPI_Op), INTENT(IN) :: \fIop\fP - TYPE(MPI_Comm), INTENT(IN) :: \fIcomm\fP - TYPE(MPI_Info), INTENT(IN) :: \fIinfo\fP - TYPE(MPI_Request), INTENT(OUT) :: \fIrequest\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETERS -.ft R -.TP 1i -sendbuf -Starting address of send buffer (choice). -.TP 1i -count -Number of elements in send buffer (integer). -.TP 1i -datatype -Datatype of elements of send buffer (handle). -.TP 1i -op -Operation (handle). -.TP 1i -comm -Communicator (handle). -.TP 1i -info -Info (handle, persistent only). - -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -recvbuf -Starting address of receive buffer (choice). -.TP 1i -request -Request (handle, non-blocking only). -.ft R -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -Same as MPI_Reduce except that the result appears in the receive buffer of all the group members. -.sp -\fBExample 1:\fR A routine that computes the product of a vector and an array that are distributed across a group of processes and returns the answer at all nodes (compare with Example 2, with MPI_Reduce, below). -.sp -.nf -SUBROUTINE PAR_BLAS2(m, n, a, b, c, comm) -REAL a(m), b(m,n) ! local slice of array -REAL c(n) ! result -REAL sum(n) -INTEGER n, comm, i, j, ierr - -! local sum -DO j= 1, n - sum(j) = 0.0 - DO i = 1, m - sum(j) = sum(j) + a(i)*b(i,j) - END DO -END DO - -! global sum -CALL MPI_ALLREDUCE(sum, c, n, MPI_REAL, MPI_SUM, comm, ierr) - -! return result at all nodes -RETURN -.fi -.sp -\fBExample 2:\fR A routine that computes the product of a vector and an array that are distributed across a group of processes and returns the answer at node zero. -.sp -.nf -SUBROUTINE PAR_BLAS2(m, n, a, b, c, comm) -REAL a(m), b(m,n) ! local slice of array -REAL c(n) ! result -REAL sum(n) -INTEGER n, comm, i, j, ierr - -! local sum -DO j= 1, n - sum(j) = 0.0 - DO i = 1, m - sum(j) = sum(j) + a(i)*b(i,j) - END DO -END DO - -! global sum -CALL MPI_REDUCE(sum, c, n, MPI_REAL, MPI_SUM, 0, comm, ierr) - -! return result at node zero (and garbage at the other nodes) -RETURN -.fi -.SH USE OF IN-PLACE OPTION -When the communicator is an intracommunicator, you can perform an all-reduce operation in-place (the output buffer is used as the input buffer). Use the variable MPI_IN_PLACE as the value of \fIsendbuf\fR at all processes. -.sp -Note that MPI_IN_PLACE is a special kind of value; it has the same restrictions on its use as MPI_BOTTOM. -.sp -Because the in-place option converts the receive buffer into a send-and-receive buffer, a Fortran binding that includes INTENT must mark these as INOUT, not OUT. -.sp -.SH WHEN COMMUNICATOR IS AN INTER-COMMUNICATOR -When the communicator is an inter-communicator, the reduce operation occurs in two phases. The data is reduced from all the members of the first group and received by all the members of the second group. Then the data is reduced from all the members of the second group and received by all the members of the first. The operation exhibits a symmetric, full-duplex behavior. -.sp -When the communicator is an intra-communicator, these groups are the same, and the operation occurs in a single phase. -.SH NOTES ON COLLECTIVE OPERATIONS - -The reduction functions ( -.I MPI_Op -) do not return an error value. As a result, -if the functions detect an error, all they can do is either call -.I MPI_Abort -or silently skip the problem. Thus, if you change the error handler from -.I MPI_ERRORS_ARE_FATAL -to something else, for example, -.I MPI_ERRORS_RETURN -, -then no error may be indicated. - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for I/O function errors. The error handler -may be changed with MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN may be used to cause error values to be returned. Note that MPI does not guarantee that an MPI program can continue past an error. - - diff --git a/ompi/mpi/man/man3/MPI_Allreduce_init.3in b/ompi/mpi/man/man3/MPI_Allreduce_init.3in deleted file mode 100644 index 9c97358ebe6..00000000000 --- a/ompi/mpi/man/man3/MPI_Allreduce_init.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/MPI_Allreduce.3 diff --git a/ompi/mpi/man/man3/MPI_Alltoall.3in b/ompi/mpi/man/man3/MPI_Alltoall.3in deleted file mode 100644 index bc980a30650..00000000000 --- a/ompi/mpi/man/man3/MPI_Alltoall.3in +++ /dev/null @@ -1,195 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2013 Los Alamos National Security, LLC. All rights reserved. -.\" Copyright (c) 2010-2014 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Alltoall 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" - -.SH NAME -\fBMPI_Alltoall, MPI_Ialltoall, MPI_Alltoall_init\fP \- All processes send data to all processes - -.SH SYNTAX -.ft R - -.SH C Syntax -.nf -#include -int MPI_Alltoall(const void *\fIsendbuf\fP, int \fIsendcount\fP, - MPI_Datatype \fIsendtype\fP, void *\fIrecvbuf\fP, int \fIrecvcount\fP, - MPI_Datatype \fIrecvtype\fP, MPI_Comm \fIcomm\fP) - -int MPI_Ialltoall(const void *\fIsendbuf\fP, int \fIsendcount\fP, - MPI_Datatype \fIsendtype\fP, void *\fIrecvbuf\fP, int \fIrecvcount\fP, - MPI_Datatype \fIrecvtype\fP, MPI_Comm \fIcomm\fP, MPI_Request \fI*request\fP) - -int MPI_Alltoall_init(const void *\fIsendbuf\fP, int \fIsendcount\fP, - MPI_Datatype \fIsendtype\fP, void *\fIrecvbuf\fP, int \fIrecvcount\fP, - MPI_Datatype \fIrecvtype\fP, MPI_Comm \fIcomm\fP, MPI_Info \fIinfo\fP, MPI_Request \fI*request\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_ALLTOALL(\fISENDBUF, SENDCOUNT, SENDTYPE, RECVBUF, RECVCOUNT, - RECVTYPE, COMM, IERROR\fP) - - \fISENDBUF(*), RECVBUF(*)\fP - INTEGER \fISENDCOUNT, SENDTYPE, RECVCOUNT, RECVTYPE\fP - INTEGER \fICOMM, IERROR\fP - -MPI_IALLTOALL(\fISENDBUF, SENDCOUNT, SENDTYPE, RECVBUF, RECVCOUNT, - RECVTYPE, COMM, REQUEST, IERROR\fP) - - \fISENDBUF(*), RECVBUF(*)\fP - INTEGER \fISENDCOUNT, SENDTYPE, RECVCOUNT, RECVTYPE\fP - INTEGER \fICOMM, REQUEST, IERROR\fP - -MPI_ALLTOALL_INIT(\fISENDBUF, SENDCOUNT, SENDTYPE, RECVBUF, RECVCOUNT, - RECVTYPE, COMM, INFO, REQUEST, IERROR\fP) - - \fISENDBUF(*), RECVBUF(*)\fP - INTEGER \fISENDCOUNT, SENDTYPE, RECVCOUNT, RECVTYPE\fP - INTEGER \fICOMM, INFO, REQUEST, IERROR\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Alltoall(\fIsendbuf\fP, \fIsendcount\fP, \fIsendtype\fP, \fIrecvbuf\fP, \fIrecvcount\fP, \fIrecvtype\fP, - \fIcomm\fP, \fIierror\fP) - - TYPE(*), DIMENSION(..), INTENT(IN) :: \fIsendbuf\fP - TYPE(*), DIMENSION(..) :: \fIrecvbuf\fP - INTEGER, INTENT(IN) :: \fIsendcount\fP, \fIrecvcount\fP - TYPE(MPI_Datatype), INTENT(IN) :: \fIsendtype\fP, \fIrecvtype\fP - TYPE(MPI_Comm), INTENT(IN) :: \fIcomm\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -MPI_Ialltoall(\fIsendbuf\fP, \fIsendcount\fP, \fIsendtype\fP, \fIrecvbuf\fP, \fIrecvcount\fP, \fIrecvtype\fP, - \fIcomm\fP, \fIrequest\fP, \fIierror\fP) - - TYPE(*), DIMENSION(..), INTENT(IN), ASYNCHRONOUS :: \fIsendbuf\fP - TYPE(*), DIMENSION(..), ASYNCHRONOUS :: \fIrecvbuf\fP - INTEGER, INTENT(IN) :: \fIsendcount\fP, \fIrecvcount\fP - TYPE(MPI_Datatype), INTENT(IN) :: \fIsendtype\fP, \fIrecvtype\fP - TYPE(MPI_Comm), INTENT(IN) :: \fIcomm\fP - TYPE(MPI_Request), INTENT(OUT) :: \fIrequest\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -MPI_Alltoall_init(\fIsendbuf\fP, \fIsendcount\fP, \fIsendtype\fP, \fIrecvbuf\fP, \fIrecvcount\fP, \fIrecvtype\fP, - \fIcomm\fP, \fIinfo\fP, \fIrequest\fP, \fIierror\fP) - - TYPE(*), DIMENSION(..), INTENT(IN), ASYNCHRONOUS :: \fIsendbuf\fP - TYPE(*), DIMENSION(..), ASYNCHRONOUS :: \fIrecvbuf\fP - INTEGER, INTENT(IN) :: \fIsendcount\fP, \fIrecvcount\fP - TYPE(MPI_Datatype), INTENT(IN) :: \fIsendtype\fP, \fIrecvtype\fP - TYPE(MPI_Comm), INTENT(IN) :: \fIcomm\fP - TYPE(MPI_Info), INTENT(IN) :: \fIinfo\fP - TYPE(MPI_Request), INTENT(OUT) :: \fIrequest\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETERS -.ft R -.TP 1.2i -sendbuf -Starting address of send buffer (choice). -.TP 1.2i -sendcount -Number of elements to send to each process (integer). -.TP 1.2i -sendtype -Datatype of send buffer elements (handle). -.TP 1.2i -recvcount -Number of elements to receive from each process (integer). -.TP 1.2i -recvtype -Datatype of receive buffer elements (handle). -.TP 1.2i -comm -Communicator over which data is to be exchanged (handle). -.TP 1.2i -info -Info (handle, persistent only) - -.SH OUTPUT PARAMETERS -.ft R -.TP 1.2i -recvbuf -Starting address of receive buffer (choice). -.TP 1.2i -request -Request (handle, non-blocking only). -.ft R -.TP 1.2i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -MPI_Alltoall is a collective operation in which all processes send the same amount of data to each other, and receive the same amount of data from each other. The operation of this routine can be represented as follows, where each process performs 2n (n being the number of processes in communicator \fIcomm\fP) independent point-to-point communications (including communication with itself). -.sp -.nf - MPI_Comm_size(\fIcomm\fP, &n); - for (i = 0, i < n; i++) - MPI_Send(\fIsendbuf\fP + i * \fIsendcount\fP * extent(\fIsendtype\fP), - \fIsendcount\fP, \fIsendtype\fP, i, ..., \fIcomm\fP); - for (i = 0, i < n; i++) - MPI_Recv(\fIrecvbuf\fP + i * \fIrecvcount\fP * extent(\fIrecvtype\fP), - \fIrecvcount\fP, \fIrecvtype\fP, i, ..., \fIcomm\fP); -.fi -.sp -Each process breaks up its local \fIsendbuf\fP into n blocks \- each -containing \fIsendcount\fP elements of type \fIsendtype\fP \- and -divides its \fIrecvbuf\fP similarly according to \fIrecvcount\fP and -\fIrecvtype\fP. Process j sends the k-th block of its local -\fIsendbuf\fP to process k, which places the data in the j-th block of -its local \fIrecvbuf\fP. The amount of data sent must be equal to the -amount of data received, pairwise, between every pair of processes. - -WHEN COMMUNICATOR IS AN INTER-COMMUNICATOR -.sp -When the communicator is an inter-communicator, the gather operation occurs in two phases. The data is gathered from all the members of the first group and received by all the members of the second group. Then the data is gathered from all the members of the second group and received by all the members of the first. The operation exhibits a symmetric, full-duplex behavior. -.sp -The first group defines the root process. The root process uses MPI_ROOT as the value of \fIroot\fR. All other processes in the first group use MPI_PROC_NULL as the value of \fIroot\fR. All processes in the second group use the rank of the root process in the first group as the value of \fIroot\fR. -.sp -When the communicator is an intra-communicator, these groups are the same, and the operation occurs in a single phase. - -.SH USE OF IN-PLACE OPTION -When the communicator is an intracommunicator, you can perform an all-to-all operation in-place (the output buffer is used as the input buffer). Use the variable MPI_IN_PLACE as the value of \fIsendbuf\fR. In this case, \fIsendcount\fR and \fIsendtype\fR are ignored. The input data of each process is assumed to be in the area where that process would receive its own contribution to the receive buffer. - -.sp -.SH NOTES -.sp -All arguments on all processes are significant. The \fIcomm\fP argument, -in particular, must describe the same communicator on all processes. -.sp -There are two MPI library functions that are more general than -MPI_Alltoall. MPI_Alltoallv allows all-to-all communication to and -from buffers that need not be contiguous; different processes may -send and receive different amounts of data. MPI_Alltoallw expands -MPI_Alltoallv's functionality to allow the exchange of data with -different datatypes. - -.SH ERRORS -.ft R -Almost all MPI routines return an error value; C routines as -the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for -I/O function errors. The error handler may be changed with -MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN -may be used to cause error values to be returned. Note that MPI does not -guarantee that an MPI program can continue past an error. - -.SH SEE ALSO -.ft R -.nf -MPI_Alltoallv -MPI_Alltoallw - diff --git a/ompi/mpi/man/man3/MPI_Alltoall_init.3in b/ompi/mpi/man/man3/MPI_Alltoall_init.3in deleted file mode 100644 index 591c20bb28d..00000000000 --- a/ompi/mpi/man/man3/MPI_Alltoall_init.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/MPI_Alltoall.3 diff --git a/ompi/mpi/man/man3/MPI_Alltoallv.3in b/ompi/mpi/man/man3/MPI_Alltoallv.3in deleted file mode 100644 index a5fc02713fd..00000000000 --- a/ompi/mpi/man/man3/MPI_Alltoallv.3in +++ /dev/null @@ -1,230 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2013 Los Alamos National Security, LLC. All rights reserved. -.\" Copyright (c) 2010-2014 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Alltoallv 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" - -.SH NAME -\fBMPI_Alltoallv, MPI_Ialltoallv, MPI_Alltoallv_init\fP \- All processes send different amount of data to, and receive different amount of data from, all processes -.SH SYNTAX -.ft R - -.SH C Syntax -.nf -#include -int MPI_Alltoallv(const void *\fIsendbuf\fP, const int \fIsendcounts\fP[], - const int \fIsdispls\fP[], MPI_Datatype \fIsendtype\fP, - void *\fIrecvbuf\fP, const int\fI recvcounts\fP[], - const int \fIrdispls\fP[], MPI_Datatype \fIrecvtype\fP, MPI_Comm \fIcomm\fP) - -int MPI_Ialltoallv(const void *\fIsendbuf\fP, const int \fIsendcounts\fP[], - const int \fIsdispls\fP[], MPI_Datatype \fIsendtype\fP, - void *\fIrecvbuf\fP, const int\fI recvcounts\fP[], - const int \fIrdispls\fP[], MPI_Datatype \fIrecvtype\fP, MPI_Comm \fIcomm\fP, - MPI_Request \fI*request\fP) - -int MPI_Alltoallv_init(const void *\fIsendbuf\fP, const int \fIsendcounts\fP[], - const int \fIsdispls\fP[], MPI_Datatype \fIsendtype\fP, - void *\fIrecvbuf\fP, const int\fI recvcounts\fP[], - const int \fIrdispls\fP[], MPI_Datatype \fIrecvtype\fP, MPI_Comm \fIcomm\fP, - MPI_Info \fIinfo\fP, MPI_Request \fI*request\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_ALLTOALLV(\fISENDBUF, SENDCOUNTS, SDISPLS, SENDTYPE, - RECVBUF, RECVCOUNTS, RDISPLS, RECVTYPE, COMM, IERROR\fP) - - \fISENDBUF(*), RECVBUF(*)\fP - INTEGER \fISENDCOUNTS(*), SDISPLS(*), SENDTYPE\fP - INTEGER \fIRECVCOUNTS(*), RDISPLS(*), RECVTYPE\fP - INTEGER \fICOMM, IERROR\fP - -MPI_IALLTOALLV(\fISENDBUF, SENDCOUNTS, SDISPLS, SENDTYPE, - RECVBUF, RECVCOUNTS, RDISPLS, RECVTYPE, COMM, REQUEST, IERROR\fP) - - \fISENDBUF(*), RECVBUF(*)\fP - INTEGER \fISENDCOUNTS(*), SDISPLS(*), SENDTYPE\fP - INTEGER \fIRECVCOUNTS(*), RDISPLS(*), RECVTYPE\fP - INTEGER \fICOMM, REQUEST, IERROR\fP - -MPI_ALLTOALLV_INIT(\fISENDBUF, SENDCOUNTS, SDISPLS, SENDTYPE, - RECVBUF, RECVCOUNTS, RDISPLS, RECVTYPE, COMM, INFO, REQUEST, IERROR\fP) - - \fISENDBUF(*), RECVBUF(*)\fP - INTEGER \fISENDCOUNTS(*), SDISPLS(*), SENDTYPE\fP - INTEGER \fIRECVCOUNTS(*), RDISPLS(*), RECVTYPE\fP - INTEGER \fICOMM, INFO, REQUEST, IERROR\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Alltoallv(\fIsendbuf\fP, \fIsendcounts\fP, \fIsdispls\fP, \fIsendtype\fP, \fIrecvbuf\fP, \fIrecvcounts\fP, - \fIrdispls\fP, \fIrecvtype\fP, \fIcomm\fP, \fIierror\fP) - - TYPE(*), DIMENSION(..), INTENT(IN) :: \fIsendbuf\fP - TYPE(*), DIMENSION(..) :: \fIrecvbuf\fP - INTEGER, INTENT(IN) :: \fIsendcounts(*)\fP, \fIsdispls(*)\fP, \fIrecvcounts(*),\fP - \fIrdispls(*)\fP - TYPE(MPI_Datatype), INTENT(IN) :: \fIsendtype\fP, \fIrecvtype\fP - TYPE(MPI_Comm), INTENT(IN) :: \fIcomm\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -MPI_Ialltoallv(\fIsendbuf\fP, \fIsendcounts\fP, \fIsdispls\fP, \fIsendtype\fP, \fIrecvbuf\fP, \fIrecvcounts\fP, - \fIrdispls\fP, \fIrecvtype\fP, \fIcomm\fP, \fIrequest\fP, \fIierror\fP) - - TYPE(*), DIMENSION(..), INTENT(IN), ASYNCHRONOUS :: \fIsendbuf\fP - TYPE(*), DIMENSION(..), ASYNCHRONOUS :: \fIrecvbuf\fP - INTEGER, INTENT(IN), ASYNCHRONOUS :: \fIsendcounts(*)\fP, \fIsdispls(*),\fP - \fIrecvcounts(*)\fP, \fIrdispls(*)\fP - TYPE(MPI_Datatype), INTENT(IN) :: \fIsendtype\fP, \fIrecvtype\fP - TYPE(MPI_Comm), INTENT(IN) :: \fIcomm\fP - TYPE(MPI_Request), INTENT(OUT) :: \fIrequest\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -MPI_Alltoallv_init(\fIsendbuf\fP, \fIsendcounts\fP, \fIsdispls\fP, \fIsendtype\fP, \fIrecvbuf\fP, \fIrecvcounts\fP, - \fIrdispls\fP, \fIrecvtype\fP, \fIcomm\fP, \fIinfo\fP, \fIrequest\fP, \fIierror\fP) - - TYPE(*), DIMENSION(..), INTENT(IN), ASYNCHRONOUS :: \fIsendbuf\fP - TYPE(*), DIMENSION(..), ASYNCHRONOUS :: \fIrecvbuf\fP - INTEGER, INTENT(IN), ASYNCHRONOUS :: \fIsendcounts(*)\fP, \fIsdispls(*),\fP - \fIrecvcounts(*)\fP, \fIrdispls(*)\fP - TYPE(MPI_Datatype), INTENT(IN) :: \fIsendtype\fP, \fIrecvtype\fP - TYPE(MPI_Comm), INTENT(IN) :: \fIcomm\fP - TYPE(MPI_Info), INTENT(IN) :: \fIinfo\fP - TYPE(MPI_Request), INTENT(OUT) :: \fIrequest\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETERS -.ft R -.TP 1.2i -sendbuf -Starting address of send buffer. -.TP 1.2i -sendcounts -Integer array, where entry i specifies the number of elements to send -to rank i. -.TP 1.2i -sdispls -Integer array, where entry i specifies the displacement (offset from -\fIsendbuf\fP, in units of \fIsendtype\fP) from which to send data to -rank i. -.TP 1.2i -sendtype -Datatype of send buffer elements. -.TP 1.2i -recvcounts -Integer array, where entry j specifies the number of elements to -receive from rank j. -.TP 1.2i -rdispls -Integer array, where entry j specifies the displacement (offset from -\fIrecvbuf\fP, in units of \fIrecvtype\fP) to which data from rank j -should be written. -.TP 1.2i -recvtype -Datatype of receive buffer elements. -.TP 1.2i -comm -Communicator over which data is to be exchanged. -.TP 1.2i -info -Info (handle, persistent only) - -.SH OUTPUT PARAMETERS -.ft R -.TP 1.2i -recvbuf -Address of receive buffer. -.TP 1.2i -request -Request (handle, non-blocking only). -.ft R -.TP 1.2i -IERROR -Fortran only: Error status. - -.SH DESCRIPTION -.ft R -MPI_Alltoallv is a generalized collective operation in which all -processes send data to and receive data from all other processes. It -adds flexibility to MPI_Alltoall by allowing the user to specify data -to send and receive vector-style (via a displacement and element -count). The operation of this routine can be thought of as follows, -where each process performs 2n (n being the number of processes in -communicator \fIcomm\fP) independent point-to-point communications -(including communication with itself). -.sp -.nf - MPI_Comm_size(\fIcomm\fP, &n); - for (i = 0, i < n; i++) - MPI_Send(\fIsendbuf\fP + \fIsdispls\fP[i] * extent(\fIsendtype\fP), - \fIsendcounts\fP[i], \fIsendtype\fP, i, ..., \fIcomm\fP); - for (i = 0, i < n; i++) - MPI_Recv(\fIrecvbuf\fP + \fIrdispls\fP[i] * extent(\fIrecvtype\fP), - \fIrecvcounts\fP[i], \fIrecvtype\fP, i, ..., \fIcomm\fP); -.fi -.sp -Process j sends the k-th block of its local \fIsendbuf\fP to process -k, which places the data in the j-th block of its local -\fIrecvbuf\fP. -.sp -When a pair of processes exchanges data, each may pass different -element count and datatype arguments so long as the sender specifies -the same amount of data to send (in bytes) as the receiver expects -to receive. -.sp -Note that process i may send a different amount of data to process j -than it receives from process j. Also, a process may send entirely -different amounts of data to different processes in the communicator. - -.sp -WHEN COMMUNICATOR IS AN INTER-COMMUNICATOR -.sp -When the communicator is an inter-communicator, the gather operation occurs in two phases. The data is gathered from all the members of the first group and received by all the members of the second group. Then the data is gathered from all the members of the second group and received by all the members of the first. The operation exhibits a symmetric, full-duplex behavior. -.sp -The first group defines the root process. The root process uses MPI_ROOT as the value of \fIroot\fR. All other processes in the first group use MPI_PROC_NULL as the value of \fIroot\fR. All processes in the second group use the rank of the root process in the first group as the value of \fIroot\fR. -.sp -When the communicator is an intra-communicator, these groups are the same, and the operation occurs in a single phase. -.sp - -.SH USE OF IN-PLACE OPTION -When the communicator is an intracommunicator, you can perform an all-to-all operation in-place (the output buffer is used as the input buffer). Use the variable MPI_IN_PLACE as the value of \fIsendbuf\fR. In this case, \fIsendcounts\fR, \fIsdispls\fP, and \fIsendtype\fR are ignored. The input data of each process is assumed to be in the area where that process would receive its own contribution to the receive buffer. - -.SH NOTES -.sp -The specification of counts and displacements should not cause -any location to be written more than once. -.sp -All arguments on all processes are significant. The \fIcomm\fP argument, -in particular, must describe the same communicator on all processes. -.sp -The offsets of \fIsdispls\fP and \fIrdispls\fP are measured in units -of \fIsendtype\fP and \fIrecvtype\fP, respectively. Compare this to -MPI_Alltoallw, where these offsets are measured in bytes. - -.SH ERRORS -.ft R -Almost all MPI routines return an error value; C routines as -the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for -I/O function errors. The error handler may be changed with -MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN -may be used to cause error values to be returned. Note that MPI does not -guarantee that an MPI program can continue past an error. - -.SH SEE ALSO -.ft R -.nf -MPI_Alltoall -MPI_Alltoallw - diff --git a/ompi/mpi/man/man3/MPI_Alltoallv_init.3in b/ompi/mpi/man/man3/MPI_Alltoallv_init.3in deleted file mode 100644 index 6cc7026e897..00000000000 --- a/ompi/mpi/man/man3/MPI_Alltoallv_init.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/MPI_Alltoallv.3 diff --git a/ompi/mpi/man/man3/MPI_Alltoallw.3in b/ompi/mpi/man/man3/MPI_Alltoallw.3in deleted file mode 100644 index 4407f10c96a..00000000000 --- a/ompi/mpi/man/man3/MPI_Alltoallw.3in +++ /dev/null @@ -1,235 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2013 Los Alamos National Security, LLC. All rights reserved. -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Alltoallw 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" - -.SH NAME -\fBMPI_Alltoallw, MPI_Ialltoallw, MPI_Alltoallw_init\fP \- All processes send data of different types to, and receive data of different types from, all processes - -.SH SYNTAX -.ft R - -.SH C Syntax -.nf -#include -int MPI_Alltoallw(const void *\fIsendbuf\fP, const int \fIsendcounts\fP[], - const int \fIsdispls\fP[], const MPI_Datatype \fIsendtypes\fP[], - void *\fIrecvbuf\fP, const int \fIrecvcounts\fP[], const int \fIrdispls\fP[], - const MPI_Datatype \fIrecvtypes\fP[], MPI_Comm \fIcomm\fP) - -int MPI_Ialltoallw(const void *\fIsendbuf\fP, const int \fIsendcounts\fP[], - const int \fIsdispls\fP[], const MPI_Datatype \fIsendtypes\fP[], - void *\fIrecvbuf\fP, const int \fIrecvcounts\fP[], const int \fIrdispls\fP[], - const MPI_Datatype \fIrecvtypes\fP[], MPI_Comm \fIcomm\fP, - MPI_Request \fI*request\fP) - -int MPI_Alltoallw_init(const void *\fIsendbuf\fP, const int \fIsendcounts\fP[], - const int \fIsdispls\fP[], const MPI_Datatype \fIsendtypes\fP[], - void *\fIrecvbuf\fP, const int \fIrecvcounts\fP[], const int \fIrdispls\fP[], - const MPI_Datatype \fIrecvtypes\fP[], MPI_Comm \fIcomm\fP, MPI_Info \fIinfo\fP, - MPI_Request \fI*request\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_ALLTOALLW(\fISENDBUF, SENDCOUNTS, SDISPLS, SENDTYPES, - RECVBUF, RECVCOUNTS, RDISPLS, RECVTYPES, COMM, IERROR\fP) - - \fISENDBUF(*), RECVBUF(*)\fP - INTEGER \fISENDCOUNTS(*), SDISPLS(*), SENDTYPES(*)\fP - INTEGER \fIRECVCOUNTS(*), RDISPLS(*), RECVTYPES(*)\fP - INTEGER \fICOMM, IERROR\fP - -MPI_IALLTOALLW(\fISENDBUF, SENDCOUNTS, SDISPLS, SENDTYPES, - RECVBUF, RECVCOUNTS, RDISPLS, RECVTYPES, COMM, REQUEST, IERROR\fP) - - \fISENDBUF(*), RECVBUF(*)\fP - INTEGER \fISENDCOUNTS(*), SDISPLS(*), SENDTYPES(*)\fP - INTEGER \fIRECVCOUNTS(*), RDISPLS(*), RECVTYPES(*)\fP - INTEGER \fICOMM, REQUEST, IERROR\fP - -MPI_ALLTOALLW_INIT(\fISENDBUF, SENDCOUNTS, SDISPLS, SENDTYPES, - RECVBUF, RECVCOUNTS, RDISPLS, RECVTYPES, COMM, INFO, REQUEST, IERROR\fP) - - \fISENDBUF(*), RECVBUF(*)\fP - INTEGER \fISENDCOUNTS(*), SDISPLS(*), SENDTYPES(*)\fP - INTEGER \fIRECVCOUNTS(*), RDISPLS(*), RECVTYPES(*)\fP - INTEGER \fICOMM, INFO, REQUEST, IERROR\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Alltoallw(\fIsendbuf\fP, \fIsendcounts\fP, \fIsdispls\fP, \fIsendtypes\fP, \fIrecvbuf\fP, \fIrecvcounts\fP, - \fIrdispls\fP, \fIrecvtypes\fP, \fIcomm\fP, \fIierror\fP) - - TYPE(*), DIMENSION(..), INTENT(IN) :: \fIsendbuf\fP - TYPE(*), DIMENSION(..) :: \fIrecvbuf\fP - INTEGER, INTENT(IN) :: \fIsendcounts(*)\fP, \fIsdispls(*)\fP, \fIrecvcounts(*),\fP - \fIrdispls(*)\fP - TYPE(MPI_Datatype), INTENT(IN) :: \fIsendtypes(*)\fP - TYPE(MPI_Datatype), INTENT(IN) :: \fIrecvtypes(*)\fP - TYPE(MPI_Comm), INTENT(IN) :: \fIcomm\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -MPI_Ialltoallw(\fIsendbuf\fP, \fIsendcounts\fP, \fIsdispls\fP, \fIsendtypes\fP, \fIrecvbuf\fP, - \fIrecvcounts\fP, \fIrdispls\fP, \fIrecvtypes\fP, \fIcomm\fP, \fIrequest\fP, \fIierror\fP) - - TYPE(*), DIMENSION(..), INTENT(IN), ASYNCHRONOUS :: \fIsendbuf\fP - TYPE(*), DIMENSION(..), ASYNCHRONOUS :: \fIrecvbuf\fP - INTEGER, INTENT(IN), ASYNCHRONOUS :: \fIsendcounts(*)\fP, \fIsdispls(*),\fP - \fIrecvcounts(*)\fP, \fIrdispls(*)\fP - TYPE(MPI_Datatype), INTENT(IN), ASYNCHRONOUS :: \fIsendtypes(*),\fP - \fIrecvtypes(*)\fP - TYPE(MPI_Comm), INTENT(IN) :: \fIcomm\fP - TYPE(MPI_Request), INTENT(OUT) :: \fIrequest\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -MPI_Alltoallw_init(\fIsendbuf\fP, \fIsendcounts\fP, \fIsdispls\fP, \fIsendtypes\fP, \fIrecvbuf\fP, - \fIrecvcounts\fP, \fIrdispls\fP, \fIrecvtypes\fP, \fIcomm\fP, fIinfo\fP, \fIrequest\fP, \fIierror\fP) - - TYPE(*), DIMENSION(..), INTENT(IN), ASYNCHRONOUS :: \fIsendbuf\fP - TYPE(*), DIMENSION(..), ASYNCHRONOUS :: \fIrecvbuf\fP - INTEGER, INTENT(IN), ASYNCHRONOUS :: \fIsendcounts(*)\fP, \fIsdispls(*),\fP - \fIrecvcounts(*)\fP, \fIrdispls(*)\fP - TYPE(MPI_Datatype), INTENT(IN), ASYNCHRONOUS :: \fIsendtypes(*),\fP - \fIrecvtypes(*)\fP - TYPE(MPI_Comm), INTENT(IN) :: \fIcomm\fP - TYPE(MPI_Info), INTENT(IN) :: \fIinfo\fP - TYPE(MPI_Request), INTENT(OUT) :: \fIrequest\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETERS -.ft R -.TP 1.2i -sendbuf -Starting address of send buffer. -.TP 1.2i -sendcounts -Integer array, where entry i specifies the number of elements to send -to rank i. -.TP 1.2i -sdispls -Integer array, where entry i specifies the displacement (in bytes, -offset from \fIsendbuf\fP) from which to send data to rank i. -.TP 1.2i -sendtypes -Datatype array, where entry i specifies the datatype to use when -sending data to rank i. -.TP 1.2i -recvcounts -Integer array, where entry j specifies the number of elements to -receive from rank j. -.TP 1.2i -rdispls -Integer array, where entry j specifies the displacement (in bytes, -offset from \fIrecvbuf\fP) to which data from rank j should -be written. -.TP 1.2i -recvtypes -Datatype array, where entry j specifies the datatype to use when -receiving data from rank j. -.TP 1.2i -comm -Communicator over which data is to be exchanged. -.TP 1.2i -info -Info (handle, persistent only) - -.SH OUTPUT PARAMETERS -.ft R -.TP 1.2i -recvbuf -Address of receive buffer. -.TP 1.2i -request -Request (handle, non-blocking only). -.ft R -.TP 1.2i -IERROR -Fortran only: Error status. - -.SH DESCRIPTION -.ft R -MPI_Alltoallw is a generalized collective operation in which all -processes send data to and receive data from all other processes. It -adds flexibility to MPI_Alltoallv by allowing the user to specify the -datatype of individual data blocks (in addition to displacement and -element count). Its operation can be thought of in the following way, -where each process performs 2n (n being the number of processes in -communicator \fIcomm\fP) independent point-to-point communications -(including communication with itself). -.sp -.nf - MPI_Comm_size(\fIcomm\fP, &n); - for (i = 0, i < n; i++) - MPI_Send(\fIsendbuf\fP + \fIsdispls\fP[i], \fIsendcounts\fP[i], - \fIsendtypes\fP[i], i, ..., \fIcomm\fP); - for (i = 0, i < n; i++) - MPI_Recv(\fIrecvbuf\fP + \fIrdispls\fP[i], \fIrecvcounts\fP[i], - \fIrecvtypes\fP[i], i, ..., \fIcomm\fP); -.fi -.sp -Process j sends the k-th block of its local \fIsendbuf\fP to process -k, which places the data in the j-th block of its local -\fIrecvbuf\fP. -.sp -When a pair of processes exchanges data, each may pass different -element count and datatype arguments so long as the sender specifies -the same amount of data to send (in bytes) as the receiver expects -to receive. -.sp -Note that process i may send a different amount of data to process j -than it receives from process j. Also, a process may send entirely -different amounts and types of data to different processes in the -communicator. - -WHEN COMMUNICATOR IS AN INTER-COMMUNICATOR -.sp -When the communicator is an inter-communicator, the gather operation occurs in two phases. The data is gathered from all the members of the first group and received by all the members of the second group. Then the data is gathered from all the members of the second group and received by all the members of the first. The operation exhibits a symmetric, full-duplex behavior. -.sp -The first group defines the root process. The root process uses MPI_ROOT as the value of \fIroot\fR. All other processes in the first group use MPI_PROC_NULL as the value of \fIroot\fR. All processes in the second group use the rank of the root process in the first group as the value of \fIroot\fR. -.sp -When the communicator is an intra-communicator, these groups are the same, and the operation occurs in a single phase. -.sp - -.SH USE OF IN-PLACE OPTION -When the communicator is an intracommunicator, you can perform an all-to-all operation in-place (the output buffer is used as the input buffer). Use the variable MPI_IN_PLACE as the value of \fIsendbuf\fR. In this case, \fIsendcounts\fR, \fIsdispls\fP, and \fIsendtypes\fR are ignored. The input data of each process is assumed to be in the area where that process would receive its own contribution to the receive buffer. - -.SH NOTES -.sp -The specification of counts, types, and displacements should not cause -any location to be written more than once. -.sp -All arguments on all processes are significant. The \fIcomm\fP argument, -in particular, must describe the same communicator on all processes. -.sp -The offsets of \fIsdispls\fP and \fIrdispls\fP are measured in bytes. -Compare this to MPI_Alltoallv, where these offsets are measured in units -of \fIsendtype\fP and \fIrecvtype\fP, respectively. - -.SH ERRORS -.ft R -Almost all MPI routines return an error value; C routines as -the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for -I/O function errors. The error handler may be changed with -MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN -may be used to cause error values to be returned. Note that MPI does not -guarantee that an MPI program can continue past an error. - -.SH SEE ALSO -.ft R -.nf -MPI_Alltoall -MPI_Alltoallv - diff --git a/ompi/mpi/man/man3/MPI_Alltoallw_init.3in b/ompi/mpi/man/man3/MPI_Alltoallw_init.3in deleted file mode 100644 index 0cca872ba48..00000000000 --- a/ompi/mpi/man/man3/MPI_Alltoallw_init.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/MPI_Alltoallw.3 diff --git a/ompi/mpi/man/man3/MPI_Attr_delete.3in b/ompi/mpi/man/man3/MPI_Attr_delete.3in deleted file mode 100644 index 2ea3485e04c..00000000000 --- a/ompi/mpi/man/man3/MPI_Attr_delete.3in +++ /dev/null @@ -1,70 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright (c) 2010-2014 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Attr_delete 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Attr_delete\fP \- Deletes attribute value associated with a key -- use of this routine is deprecated. - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -int MPI_Attr_delete(MPI_Comm\fI comm\fP, int\fI keyval\fP) - -.fi -.SH Fortran Syntax -.nf -INCLUDE 'mpif.h' -MPI_ATTR_DELETE(\fICOMM\fP,\fI KEYVAL\fP, \fIIERROR\fP) - INTEGER \fICOMM\fP,\fI KEYVAL\fP,\fI IERROR\fP - - -.fi -.SH INPUT PARAMETERS -.ft R -.TP 1i -comm -Communicator to which attribute is attached (handle). -.TP 1i -keyval -The key value of the deleted attribute (integer). - -.SH OUTPUT PARAMETER -.ft R -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -Note that use of this routine is \fIdeprecated\fP as of MPI-2, and -was \fIdeleted\fP in MPI-3. Please use MPI_Comm_delete_attr. This -function does not have a mpi_f08 binding. -.sp -Delete attribute from cache by key. This function invokes the attribute delete function delete_fn specified when the keyval was created. The call will fail if the delete_fn function returns an error code other than MPI_SUCCESS. - -Whenever a communicator is replicated using the function MPI_Comm_dup, all callback copy functions for attributes that are currently set are invoked (in arbitrary order). Whenever a communicator is deleted using the function MPI_Comm_free, all callback delete functions for attributes that are currently set are invoked. - - -.SH NOTES -Note that it is not defined by the MPI standard what happens if the -delete_fn callback invokes other MPI functions. In Open MPI, it is -not valid for delete_fn callbacks (or any of their children) to add or -delete attributes on the same object on which the delete_fn callback -is being invoked. - - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for I/O function errors. The error handler -may be changed with MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN may be used to cause error values to be returned. Note that MPI does not guarantee that an MPI program can continue past an error. - -.SH SEE ALSO -MPI_Comm_delete_attr -.br - diff --git a/ompi/mpi/man/man3/MPI_Attr_get.3in b/ompi/mpi/man/man3/MPI_Attr_get.3in deleted file mode 100644 index bcfb2819232..00000000000 --- a/ompi/mpi/man/man3/MPI_Attr_get.3in +++ /dev/null @@ -1,71 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright (c) 2010-2014 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Attr_get 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Attr_get\fP \- Retrieves attribute value by key -- use of this routine is deprecated. - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -int MPI_Attr_get(MPI_Comm \fIcomm\fP, int\fI keyval\fP,void\fI *attribute_val\fP, - int\fI *flag \fP) - -.fi -.SH Fortran Syntax -.nf -INCLUDE 'mpif.h' -MPI_ATTR_GET(\fICOMM\fP,\fI KEYVAL\fP, \fIATTRIBUTE_VAL\fP,\fI FLAG\fP,\fI IERROR\fP) - INTEGER \fICOMM\fP,\fI KEYVAL\fP, \fIATTRIBUTE_VAL\fP,\fI IERROR\fP - LOGICAL \fIFLAG\fP - - -.fi -.SH INPUT PARAMETERS -.ft R -.TP 1i -comm -Communicator to which attribute is attached (handle). -.TP 1i -keyval - Key value (integer). - -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -attribute_val -Attribute value, unless flag = false. -.TP 1i -flag -True if an attribute value was extracted; false if no attribute is associated with the key. -.ft R -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -Note that use of this routine is \fIdeprecated\fP as of MPI-2, and -was \fIdeleted\fP in MPI-3. Please use MPI_Comm_get_attr. This -function does not have a mpi_f08 binding. -.sp -Retrieves attribute value by key. The call is erroneous if there is no key -with value keyval. On the other hand, the call is correct if the key value exists, but no attribute is attached on comm for that key; in such case, the call returns flag = false. In particular MPI_KEYVAL_INVALID is an erroneous key value. - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for I/O function errors. The error handler -may be changed with MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN may be used to cause error values to be returned. Note that MPI does not guarantee that an MPI program can continue past an error. - - -.SH SEE ALSO - -MPI_Comm_get_attr -.br diff --git a/ompi/mpi/man/man3/MPI_Attr_put.3in b/ompi/mpi/man/man3/MPI_Attr_put.3in deleted file mode 100644 index c4553a97bc9..00000000000 --- a/ompi/mpi/man/man3/MPI_Attr_put.3in +++ /dev/null @@ -1,73 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright (c) 2010-2014 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Attr_put 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Attr_put\fP \- Stores attribute value associated with a key -- use of this routine is deprecated. - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -int MPI_Attr_put(MPI_Comm \fIcomm\fP, int\fI keyval\fP, void\fI *attribute_val\fP) - -.fi -.SH Fortran Syntax -.nf -INCLUDE 'mpif.h' -MPI_ATTR_PUT(\fICOMM\fP,\fI KEYVAL\fP, \fIATTRIBUTE_VAL\fP,\fI IERROR\fP) - INTEGER \fICOMM\fP,\fI KEYVAL\fP,\fI ATTRIBUTE_VAL\fP,\fI IERROR - - -.fi -.SH INPUT PARAMETERS -.ft R -.TP 1i -comm -Communicator to which attribute will be attached (handle). -.TP 1i -keyval -Key value, as returned by MPI_KEYVAL_CREATE (integer). -.TP 1i -attribute_val -Attribute value. - -.SH OUTPUT PARAMETER -.ft R -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -Note that use of this routine is \fIdeprecated\fP as of MPI-2, and -was \fIdeleted\fP in MPI-3. Please use MPI_Comm_set_attr. This -function does not have a mpi_f08 binding. -.sp -MPI_Attr_put stores the stipulated attribute value attribute_val for subsequent retrieval by MPI_Attr_get. If the value is already present, then the outcome is as if MPI_Attr_delete was first called to delete the previous value (and the callback function delete_fn was executed), and a new value was next stored. The call is erroneous if there is no key with value keyval; in particular MPI_KEYVAL_INVALID is an erroneous key value. The call will fail if the delete_fn function returned an error code other than MPI_SUCCESS. - -.SH NOTES -.ft R -Values of the permanent attributes MPI_TAG_UB, MPI_HOST, -MPI_IO, and MPI_WTIME_IS_GLOBAL may not be changed. -.sp -The type of the attribute value depends on whether C or Fortran is being used. In C, an attribute value is a pointer (void *); in Fortran, it is a single integer (not a pointer, since Fortran has no pointers and there are systems for which a pointer does not fit in an integer, e.g., any 32-bit address system that uses 64 bits for Fortran DOUBLE PRECISION). -.sp -If an attribute is already present, the delete function (specified when the corresponding keyval was created) will be called. - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for I/O function errors. The error handler -may be changed with MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN may be used to cause error values to be returned. Note that MPI does not guarantee that an MPI program can continue past an error. - -.SH SEE ALSO -.ft R -.sp -MPI_Comm_set_attr -.br diff --git a/ompi/mpi/man/man3/MPI_Barrier.3.md b/ompi/mpi/man/man3/MPI_Barrier.3.md deleted file mode 100644 index f12f0b73510..00000000000 --- a/ompi/mpi/man/man3/MPI_Barrier.3.md +++ /dev/null @@ -1,55 +0,0 @@ -# NAME - -MPI_Barrier, MPI_Ibarrier - Synchronization between MPI processes in a group - -# Syntax - -## C Syntax -```c -#include -int MPI_Barrier(MPI_Comm) -int MPI_Ibarrier(MPI_Comm comm, MPI_Request *request) -int MPI_barrier_init(MPI_Comm comm, MPI_Info info, MPI_Request *request) -``` -## Fortran Syntax -```fortran -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_BARRIER(COMM, IERROR) - INTEGER COMM, IERROR -MPI_IBARRIER(COMM, REQUEST, IERROR) - INTEGER COMM, REQUEST, IERROR -MPI_BARRIER_INIT(COMM, INFO, REQUEST, IERROR) - INTEGER COMM, INFO, REQUEST, IERROR -``` -## Fortran 2008 Syntax -```fortran -USE mpi_f08 -MPI_Barrier(comm, ierror) - TYPE(MPI_Comm), INTENT(IN) :: comm - INTEGER, OPTIONAL, INTENT(OUT) :: ierror -MPI_Ibarrier(comm, request, ierror) - TYPE(MPI_Comm), INTENT(IN) :: comm - TYPE(MPI_Request), INTENT (OUT) :: request - INTEGER, OPTIONAL, INTENT(OUT) :: ierror -MPI_Barrier_init(comm, info, request, ierror) - TYPE(MPI_Comm), INTENT(IN) :: comm - TYPE(MPI_Info), INTENT(IN) :: info - TYPE(MPI_Request), INTENT (OUT) :: request - INTEGER, OPTIONAL, INTENT(OUT) :: ierror -``` -# Input Parameter -* `comm` : Communicator (handle). -* `info` : Info (handle, persistent only). -# Output Parameters -* `request` : Request (handle, non-blocking only). -* `IERROR` : Fortran only: Error status (integer). -# Description -An MPI barrier completes after all groups members have entered the barrier. -# When Communicator is an Inter-Communicator -When the communicator is an inter-communicator, the barrier operation is performed across all processes in both groups. All processes in the first group may exit the barrier when all processes in the second group have entered the barrier. -# Errors -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -Before the error value is returned, the current MPI error handler is called. By default, this error handler aborts the MPI job, except for I/O function errors. The error handler may be changed with `MPI_Comm_set_errhandler`; the predefined error handler `MPI_ERRORS_RETURN` may be used to cause error values to be returned. Note that MPI does not guarantee that an MPI program can continue past an error. -# See Also -[`MPI_Bcast`(3)](MPI_Bcast.html) diff --git a/ompi/mpi/man/man3/MPI_Barrier_init.3in b/ompi/mpi/man/man3/MPI_Barrier_init.3in deleted file mode 100644 index 17e1bd261a8..00000000000 --- a/ompi/mpi/man/man3/MPI_Barrier_init.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/MPI_Barrier.3 diff --git a/ompi/mpi/man/man3/MPI_Bcast.3.md b/ompi/mpi/man/man3/MPI_Bcast.3.md deleted file mode 100644 index 1212c9ef47c..00000000000 --- a/ompi/mpi/man/man3/MPI_Bcast.3.md +++ /dev/null @@ -1,122 +0,0 @@ -# NAME - -`MPI_Bcast`, `MPI_Ibcast` - Broadcasts a message from the process with -rank *root* to all other processes of the group. - -# SYNTAX - -## C Syntax - -```C -#include -int MPI_Bcast(void *buffer, int count, MPI_Datatype datatype, - int root, MPI_Comm comm) - -int MPI_Ibcast(void *buffer, int count, MPI_Datatype datatype, - int root, MPI_Comm comm, MPI_Request *request) -``` - -## Fortran Syntax - -```Fortran -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_BCAST(BUFFER, COUNT, DATATYPE, ROOT, COMM, IERROR) - BUFFER(*) - INTEGER COUNT, DATATYPE, ROOT, COMM, IERROR - -MPI_IBCAST(BUFFER, COUNT, DATATYPE, ROOT, COMM, REQUEST, IERROR) - BUFFER(*) - INTEGER COUNT, DATATYPE, ROOT, COMM, REQUEST, IERROR -``` - -## Fortran 2008 Syntax - -``` Fortran -USE mpi_f08 -MPI_Bcast(buffer, count, datatype, root, comm, ierror) - TYPE(*), DIMENSION(..) :: buffer - INTEGER, INTENT(IN) :: count, root - TYPE(MPI_Datatype), INTENT(IN) :: datatype - TYPE(MPI_Comm), INTENT(IN) :: comm - INTEGER, OPTIONAL, INTENT(OUT) :: ierror - -MPI_Ibcast(buffer, count, datatype, root, comm, request, ierror) - TYPE(*), DIMENSION(..), ASYNCHRONOUS :: buffer - INTEGER, INTENT(IN) :: count, root - TYPE(MPI_Datatype), INTENT(IN) :: datatype - TYPE(MPI_Comm), INTENT(IN) :: comm - TYPE(MPI_Request), INTENT(OUT) :: request - INTEGER, OPTIONAL, INTENT(OUT) :: ierror -``` - -# INPUT/OUTPUT PARAMETERS - -* `buffer`: Starting address of buffer (choice). -* `count`: Number of entries in buffer (integer). -* `datatype`: Data type of buffer (handle). -* `root`: Rank of broadcast root (integer). -* `comm`: Communicator (handle). - -# OUTPUT PARAMETERS - -* `request`: Request (handle, non-blocking only). -* `IERROR`: Fortran only: Error status (integer). - -# DESCRIPTION - -`MPI_Bcast` broadcasts a message from the process with rank root to all -processes of the group, itself included. It is called by all members of -group using the same arguments for `comm`, `root`. On return, the contents -of root's communication buffer has been copied to all processes. - -General, derived datatypes are allowed for datatype. The type signature -of count, datatype on any process must be equal to the type signature o f -count, datatype at the root. This implies that the amount of data sent -must be equal to the amount received, pairwise between each process and -the root. `MPI_Bcast` and all other data-movement collective routines make -this restriction. Distinct type maps between sender and receiver are -still allowed. - -**Example:** Broadcast 100 ints from process 0 to every process in the -group. - -```C -MPI_Comm comm; -int array[100]; -int root=0; -//... -MPI_Bcast( array, 100, MPI_INT, root, comm); -``` - -As in many of our sample code fragments, we assume that some of the -variables (such as comm in the example above) have been assigned -appropriate values. - -# WHEN COMMUNICATOR IS AN INTER-COMMUNICATOR - -When the communicator is an inter-communicator, the root process in the -first group broadcasts data to all the processes in the second group. -The first group defines the root process. That process uses `MPI_ROOT` as -the value of its `root` argument. The remaining processes use -`MPI_PROC_NULL` as the value of their `root` argument. All processes in -the second group use the rank of that root process in the first group as -the value of their `root` argument. The receive buffer arguments of the -processes in the second group must be consistent with the send buffer -argument of the root process in the first group. - -# NOTES - -This function does not support the in-place option. - -# ERRORS - -Almost all MPI routines return an error value; C routines as the value -of the function and Fortran routines in the last argument. - -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for -I/O function errors. The error handler may be changed with -`MPI_Comm_set_errhandler`; the predefined error handler `MPI_ERRORS_RETURN` -may be used to cause error values to be returned. Note that MPI does not -guarantee that an MPI program can continue past an error. diff --git a/ompi/mpi/man/man3/MPI_Bcast_init.3in b/ompi/mpi/man/man3/MPI_Bcast_init.3in deleted file mode 100644 index c0a86bebc97..00000000000 --- a/ompi/mpi/man/man3/MPI_Bcast_init.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/MPI_Bcast.3 diff --git a/ompi/mpi/man/man3/MPI_Bsend.3.md b/ompi/mpi/man/man3/MPI_Bsend.3.md deleted file mode 100644 index 4f8aeb34764..00000000000 --- a/ompi/mpi/man/man3/MPI_Bsend.3.md +++ /dev/null @@ -1,91 +0,0 @@ -# Name - -`MPI_Bsend` - Basic send with user-specified buffering. - -# Syntax - -## C Syntax - -```c -#include -int MPI_Bsend(const void *buf, int count, MPI_Datatype datatype, - int dest, int tag, MPI_Comm comm) -``` - -## Fortran Syntax - -```fortran -USE MPI -! or the older form: INCLUDE 'mpif.h' - -MPI_BSEND(BUF, COUNT,DATATYPE, DEST, TAG, COMM, IERROR) - BUF(*) - INTEGER COUNT, DATATYPE, DEST, TAG, COMM, IERROR -``` - -## Fortran 2008 Syntax - -```fortran -USE mpi_f08 -MPI_Bsend(buf, count, datatype, dest, tag, comm, ierror) - TYPE(*), DIMENSION(..), INTENT(IN) :: buf - INTEGER, INTENT(IN) :: count, dest, tag - TYPE(MPI_Datatype), INTENT(IN) :: datatype - TYPE(MPI_Comm), INTENT(IN) :: comm - INTEGER, OPTIONAL, INTENT(OUT) :: ierror -``` - -# Input Parameters - -* `buf` : Initial address of send buffer (choice). -* `count` : Number of entries in send buffer (nonnegative integer). -* `datatype` : Datatype of each send buffer element (handle). -* `dest` : Rank of destination (integer). -* `tag` : Message tag (integer). -* `comm` : Communicator (handle). - -# Output Parameters - -* `IERROR` : Fortran only: Error status (integer). - -# Description - -`MPI_Bsend` performs a buffered-mode, blocking send. - -# Notes - -This send is provided as a convenience function; it allows the user to -send messages without worrying about where they are buffered (because -the user must have provided buffer space with `MPI_Buffer_attach`). - -In deciding how much buffer space to allocate, remember that the buffer -space is not available for reuse by subsequent `MPI_Bsend`s unless you -are certain that the message has been received (not just that it should -have been received). For example, this code does not allocate enough -buffer space: -```c -MPI_Buffer_attach( b, n*sizeof(double) + MPI_BSEND_OVERHEAD ); -for (i=0; i - -int MPI_Bsend_init(const void *buf, int count, MPI_Datatype datatype, - int dest, int tag, MPI_Comm comm, MPI_Request *request) -``` - -## Fortran Syntax - -```fortran -USE MPI -! or the older form: INCLUDE 'mpif.h' - -MPI_BSEND_INIT(BUF, COUNT, DATATYPE, DEST, TAG, COMM, REQUEST, IERROR) - BUF(*) - INTEGER COUNT, DATATYPE, DEST, TAG, - INTEGER COMM, REQUEST, IERROR -``` - -## Fortran 2008 Syntax - -```fortran -USE mpi_f08 - -MPI_Bsend_init(buf, count, datatype, dest, tag, comm, request, ierror) - TYPE(*), DIMENSION(..), INTENT(IN), ASYNCHRONOUS :: buf - INTEGER, INTENT(IN) :: count, dest, tag - TYPE(MPI_Datatype), INTENT(IN) :: datatype - TYPE(MPI_Comm), INTENT(IN) :: comm - TYPE(MPI_Request), INTENT(OUT) :: request - INTEGER, OPTIONAL, INTENT(OUT) :: ierror -``` - -# Input Parameters - -* `buf` : Initial address of send buffer (choice). -* `count` : Number of elements sent (integer). -* `datatype` : Type of each element (handle). -* `dest` : Rank of destination (integer). -* `tag` : Message tag (integer). -* `comm` : Communicator (handle). - -# Output Parameters - -* `request` : Communication request (handle). -* `IERROR` : Fortran only: Error status (integer). - -# Description - -Creates a persistent communication `request` for a buffered mode send, and -binds to it all the arguments of a send operation. - -A communication (send or receive) that uses a persistent `request` is -initiated by the function `MPI_Start`. - - -# Errors - -Almost all MPI routines return an error value; C routines as the value -of the function and Fortran routines in the last argument. - -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for -I/O function errors. The error handler may be changed with -`MPI_Comm_set_errhandler`; the predefined error handler `MPI_ERRORS_RETURN` -may be used to cause error values to be returned. Note that MPI does not -guarantee that an MPI program can continue past an error. - - -# See Also - -[MPI_Send_init(3)](MPI_Send_init.html) -[MPI_Ssend_init(3)](MPI_Ssend_init.html) -[MPI_Rsend_init(3)](MPI_Rsend_init.html) -[MPI_Recv_init(3)](MPI_Recv_init.html) -[MPI_Start(3)](MPI_Start.html) -[MPI_Startall(3)](MPI_Startall.html) diff --git a/ompi/mpi/man/man3/MPI_Buffer_attach.3.md b/ompi/mpi/man/man3/MPI_Buffer_attach.3.md deleted file mode 100644 index 2ea01bf319f..00000000000 --- a/ompi/mpi/man/man3/MPI_Buffer_attach.3.md +++ /dev/null @@ -1,92 +0,0 @@ -# Name - -`MPI_Buffer_attach` - Attaches a user-defined buffer for sending. - -# Syntax - -## C Syntax - -```c -#include - -int MPI_Buffer_attach(void *buf, int size) -``` - -## Fortran Syntax - -```fortran -USE MPI -! or the older form: INCLUDE 'mpif.h' - -MPI_BUFFER_ATTACH(BUF, SIZE, IERROR) - BUF(*) - INTEGER SIZE, IERROR -``` - -## Fortran 2008 Syntax - -```fortran -USE mpi_f08 - -MPI_Buffer_attach(buffer, size, ierror) - TYPE(*), DIMENSION(..), ASYNCHRONOUS :: buffer - INTEGER, INTENT(IN) :: size - INTEGER, OPTIONAL, INTENT(OUT) :: ierror -``` - -# Input Parameters - -* `buf` : Initial buffer address (choice). -* `size` : Buffer size, in bytes (integer). - -# Output Parameter - -* `IERROR` : Fortran only: Error status (integer). - -# Description - -Provides to MPI a buffer in the user's memory to be used for buffering -outgoing messages. The buffer is used only by messages sent in buffered -mode. Only one buffer can be attached to a process at a time. - -# Notes - -The size given should be the sum of the sizes of all outstanding Bsends -that you intend to have, plus `MPI_BSEND_OVERHEAD` bytes for each Bsend -that you do. For the purposes of calculating size, you should use -`MPI_Pack_size`. In other words, in the code - -```c -MPI_Buffer_attach( buf, size ); -MPI_Bsend( ..., count=20, datatype=type1, ... ); -//... -MPI_Bsend( ..., count=40, datatype=type2, ... ); -``` -the value of size in the `MPI_Buffer_attach` call should be greater than the value computed by - -```c -MPI_Pack_size( 20, type1, comm, &s1 ); -MPI_Pack_size( 40, type2, comm, &s2 ); -size = s1 + s2 + 2 * MPI_BSEND_OVERHEAD; -``` - -`MPI_BSEND_OVERHEAD` gives the maximum amount of buffer space that may be -used by the Bsend routines. This value is in mpi.h for C and mpif.h -for Fortran. - -# Errors - -Almost all MPI routines return an error value; C routines as the value -of the function and Fortran routines in the last argument. - -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for -I/O function errors. The error handler may be changed with -`MPI_Comm_set_errhandler`; the predefined error handler `MPI_ERRORS_RETURN` -may be used to cause error values to be returned. Note that MPI does not -guarantee that an MPI program can continue past an error. - - -# See Also - -[MPI_Buffer_detach(3)](MPI_Buffer_detach.html) diff --git a/ompi/mpi/man/man3/MPI_Buffer_detach.3.md b/ompi/mpi/man/man3/MPI_Buffer_detach.3.md deleted file mode 100644 index 3122d034e56..00000000000 --- a/ompi/mpi/man/man3/MPI_Buffer_detach.3.md +++ /dev/null @@ -1,116 +0,0 @@ -# Name - -`MPI_Buffer_detach` - Removes an existing buffer (for use in -in `MPI_Bsend`, etc.) - -# Syntax - -## C Syntax - -```c -#include - -int MPI_Buffer_detach(void *buf, int *size) -``` - -## Fortran Syntax - -```fortran -USE MPI -! or the older form: INCLUDE 'mpif.h' - -MPI_BUFFER_DETACH(BUF, SIZE, IERROR) - BUF(*) - INTEGER SIZE, IERROR -``` - -## Fortran 2008 Syntax - -```fortran -USE mpi_f08 - -MPI_Buffer_detach(buffer_addr, size, ierror) - USE, INTRINSIC :: ISO_C_BINDING, ONLY - TYPE(C_PTR), INTENT(OUT) :: buffer_addr - INTEGER, INTENT(OUT) :: size - INTEGER, OPTIONAL, INTENT(OUT) :: ierror -``` - -# Output Parameters - -* `buf` : Initial buffer address (choice). -* `size` : Buffer size, in bytes (integer). -* `IERROR` : Fortran only: Error status (integer). - -# Description - -Detach the buffer currently associated with MPI. The call returns the -address and the size of the detached buffer. This operation will block -until all messages currently in the buffer have been transmitted. Upon -return of this function, the user may reuse or deallocate the space -taken by the buffer. - -Example: Calls to attach and detach buffers. -```c -#define BUFFSIZE 10000 - -int size -char *buff; -MPI_Buffer_attach( malloc(BUFFSIZE), BUFFSIZE); -/* a buffer of 10000 bytes can now be used by MPI_Bsend */ -MPI_Buffer_detach( &buff, &size); -/* Buffer size reduced to zero */ -MPI_Buffer_attach( buff, size); -/* Buffer of 10000 bytes available again */ -``` - -# Notes -The reason that `MPI_Buffer_detach` returns the address and size of -the buffer being detached is to allow nested libraries to replace and -restore the buffer. For example, consider -```c -int size, mysize, idummy; -void *ptr, *myptr, *dummy; -MPI_Buffer_detach( &ptr, &size ); -MPI_Buffer_attach( myptr, mysize ); -/* -... library code ... -*/ -MPI_Buffer_detach( &dummy, &idummy ); -MPI_Buffer_attach( ptr, size ); -``` -This is much like the action of the UNIX signal routine and has the -same strengths (it's simple) and weak‐nesses (it only works for -nested usages). - -For Fortran: The Fortran binding for this routine is different. -Because Fortran does not have pointers, it is impossible to provide -a way to use the output of this routine to exchange buffers. In this -case, only the size field is set. - -For C: Even though the buf argument is declared as void, it is really -the address of a void pointer. See Rationale, below, for more details. - -Even though the C functions `MPI_Buffer_attach` and `MPI_Buffer_detach` -both have a first argument of type void*, these arguments are -used differently: A pointer to the buffer is passed to MPI_Buffer_attach; -the address of the pointer is passed to MPI_Buffer_detach, so that this -call can return the pointer value. - -# Errors - -Almost all MPI routines return an error value; C routines as the value -of the function and Fortran routines in the last argument. - -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for -I/O function errors. The error handler may be changed with -`MPI_Comm_set_errhandler`; the predefined error handler `MPI_ERRORS_RETURN` -may be used to cause error values to be returned. Note that MPI does not -guarantee that an MPI program can continue past an error. - - -# See Also - -[MPI_Buffer_attach(3)](MPI_Buffer_attach.html) -[MPI_Bsend(3)](MPI_Bsend.html) diff --git a/ompi/mpi/man/man3/MPI_Cancel.3.md b/ompi/mpi/man/man3/MPI_Cancel.3.md deleted file mode 100644 index d95f7348fcd..00000000000 --- a/ompi/mpi/man/man3/MPI_Cancel.3.md +++ /dev/null @@ -1,114 +0,0 @@ -# Name - -`MPI_Cancel` - Cancels a communication request. - -# Syntax - -## C Syntax - -```c -#include - -int MPI_Cancel(MPI_Request *request) -``` - -## Fortran Syntax - -```fortran -USE MPI -! or the older form: INCLUDE 'mpif.h' - -MPI_CANCEL(REQUEST, IERROR) - INTEGER REQUEST, IERROR -``` - -## Fortran 2008 Syntax - -```fortran -USE mpi_f08 - -MPI_Cancel(request, ierror) - TYPE(MPI_Request), INTENT(IN) :: request - INTEGER, OPTIONAL, INTENT(OUT) :: ierror -``` - - -# Input Parameter - -* `request` : Communication request (handle). - -# Output Parameter - -* `IERROR` : Fortran only: Error status (integer). - -# Description - -The `MPI_Cancel` operation allows pending communications to be canceled. -This is required for cleanup. Posting a send or a receive ties up user -resources (send or receive buffers), and a cancel may be needed to free -these resources gracefully. - -A call to `MPI_Cancel` marks for cancellation a pending, nonblocking -communication operation (send or receive). The cancel call is local. It -returns immediately, possibly before the communication is actually -canceled. It is still necessary to complete a communication that has -been marked for cancellation, using a call to `MPI_Request_free,` -`MPI_Wait`, or `MPI_Test` (or any of the derived operations). - -If a communication is marked for cancellation, then an `MPI_Wait` call for -that communication is guaranteed to return, irrespective of the -activities of other processes (i.e., `MPI_Wait` behaves as a local -function); similarly if `MPI_Test` is repeatedly called in a busy wait -loop for a canceled communication, then `MPI_Test` will eventually be -successful. - -`MPI_Cancel` can be used to cancel a communication that uses a persistent -`request` (see Section 3.9 in the MPI-1 Standard, "Persistent -Communication Requests") in the same way it is used for nonpersistent -`request`s. A successful cancellation cancels the active communication, -but not the `request` itself. After the call to `MPI_Cancel` and the -subsequent call to `MPI_Wait` or `MPI_Test`, the `request` becomes inactive -and can be activated for a new communication. - -The successful cancellation of a buffered send frees the buffer space -occupied by the pending message. - -Either the cancellation succeeds or the communication succeeds, but not -both. If a send is marked for cancellation, then it must be the case -that either the send completes normally, in which case the message sent -is received at the destination process, or that the send is successfully -canceled, in which case no part of the message is received at the -destination. Then, any matching receive has to be satisfied by another -send. If a receive is marked for cancellation, then it must be the case -that either the receive completes normally, or that the receive is -successfully canceled, in which case no part of the receive buffer is -altered. Then, any matching send has to be satisfied by another receive. - -If the operation has been canceled, then information to that effect will -be returned in the status argument of the operation that completes the -communication. - -# Notes - -The primary expected use of `MPI_Cancel` is in multi-buffering schemes, -where speculative `MPI_Irecvs` are made. When the computation completes, -some of these `request`s may remain; using `MPI_Cancel` allows the user to -cancel these unsatisfied `request`s. - -# Errors - -Almost all MPI routines return an error value; C routines as the value -of the function and Fortran routines in the last argument. -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for -I/O function errors. The error handler may be changed with -`MPI_Comm_set_errhandler`; the predefined error handler `MPI_ERRORS_RETURN` -may be used to cause error values to be returned. Note that MPI does not -guarantee that an MPI program can continue past an error. - -# See Also - -[`MPI_Probe`(3)](MPI_Probe.html) -[`MPI_Iprobe`(3)](MPI_Iprobe.html) -[`MPI_Test_cancelled`(3)](MPI_Test_cancelled.html) -[`MPI_Cart_coords`(3)](MPI_Cart_coords.html) diff --git a/ompi/mpi/man/man3/MPI_Cart_coords.3.md b/ompi/mpi/man/man3/MPI_Cart_coords.3.md deleted file mode 100644 index aec947104d2..00000000000 --- a/ompi/mpi/man/man3/MPI_Cart_coords.3.md +++ /dev/null @@ -1,66 +0,0 @@ -# Name - -`MPI_Cart_coords` - Determines process coords in Cartesian topology -given rank in group. - -# Syntax - -## C Syntax - -```c -#include - -int MPI_Cart_coords(MPI_Comm comm, int rank, int maxdims, - int coords[]) -``` - -## Fortran Syntax - -```fortran -USE MPI -! or the older form: INCLUDE 'mpif.h' - -MPI_CART_COORDS(COMM, RANK, MAXDIMS, COORDS, IERROR) - INTEGER COMM, RANK, MAXDIMS, COORDS(*), IERROR -``` - -## Fortran 2008 Syntax - -```fortran -USE mpi_f08 - -MPI_Cart_coords(comm, rank, maxdims, coords, ierror) - TYPE(MPI_Comm), INTENT(IN) :: comm - INTEGER, INTENT(IN) :: rank, maxdims - INTEGER, INTENT(OUT) :: coords(maxdims) - INTEGER, OPTIONAL, INTENT(OUT) :: ierror -``` - -# Input Parameters - -* `comm` : Communicator with Cartesian structure (handle). -* `rank` : Rank of a process within group of comm (integer). -* `maxdims` : Length of vector coords in the calling program (integer). - Length of vector coords in the calling program (integer). - -# Output Parameters - -* `coords` : Integer array (of size ndims,which was defined by MPI_Cart_create -call) containing the Cartesian coordinates of specified process -(integer). -* `IERROR` : Fortran only: Error status (integer). - -# Description - -`MPI_Cart_coords` provies a mapping of `rank`s to Cartesian coordinates. - -# Errors - -Almost all MPI routines return an error value; C routines as the value -of the function and Fortran routines in the last argument. -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for -I/O function errors. The error handler may be changed with -`MPI_Comm_set_errhandler`; the predefined error handler `MPI_ERRORS_RETURN` -may be used to cause error values to be returned. Note that MPI does not -guarantee that an MPI program can continue past an error. diff --git a/ompi/mpi/man/man3/MPI_Cart_create.3.md b/ompi/mpi/man/man3/MPI_Cart_create.3.md deleted file mode 100644 index c3188445d0c..00000000000 --- a/ompi/mpi/man/man3/MPI_Cart_create.3.md +++ /dev/null @@ -1,80 +0,0 @@ -# Name - -`MPI_Cart_create` - Makes a new communicator to which Cartesian -topology information has been attached. - -# Syntax - -## C Syntax - -```c -#include - -int MPI_Cart_create(MPI_Comm comm_old, int ndims, const int dims[], - - const int periods[], int reorder, MPI_Comm *comm_cart) -``` - -## Fortran Syntax - -```fortran -USE MPI -! or the older form: INCLUDE 'mpif.h' - -MPI_CART_CREATE(COMM_OLD, NDIMS, DIMS, PERIODS, REORDER, - COMM_CART, IERROR) - INTEGER COMM_OLD, NDIMS, DIMS(*), COMM_CART, IERROR - LOGICAL PERIODS(*), REORDER -``` - -## Fortran 2008 Syntax - -```fortran -USE mpi_f08 - -MPI_Cart_create(comm_old, ndims, dims, periods, reorder, comm_cart, ierror) - TYPE(MPI_Comm), INTENT(IN) :: comm_old - INTEGER, INTENT(IN) :: ndims, dims(ndims) - LOGICAL, INTENT(IN) :: periods(ndims), reorder - TYPE(MPI_Comm), INTENT(OUT) :: comm_cart - INTEGER, OPTIONAL, INTENT(OUT) :: ierror -``` - - -# Input Parameters - -* `comm_old` : Input communicator (handle). -* `ndims` : Number of dimensions of Cartesian grid (integer). -* `dims` : Integer array of size ndims specifying the number of processes in -each dimension. -* `periods` : Logical array of size ndims specifying whether the grid is periodic -(true) or not (false) in each dimension. -* `reorder` : Ranking may be reordered (true) or not (false) (logical). - -# Output Parameters - -* `comm_cart` : Communicator with new Cartesian topology (handle). -* `IERROR` : Fortran only: Error status (integer). - -# Description - -`MPI_Cart_create` returns a handle to a new communicator to which the -Cartesian topology information is attached. If `reorder` = false then the -rank of each process in the new group is identical to its rank in the -old group. Otherwise, the function may `reorder` the processes (possibly -so as to choose a good embedding of the virtual topology onto the -physical machine). If the total size of the Cartesian grid is smaller -than the size of the group of comm, then some processes are returned -`MPI_COMM_NULL`, in analogy to `MPI_Comm_split`. The call is erroneous if it -specifies a grid that is larger than the group size. - -# Errors - -Almost all MPI routines return an error value; C routines as the value -of the function and Fortran routines in the last argument. -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for -I/O function errors. The error handler may be changed with -`MPI_Comm_set_errhandler`; the predefined error handler `MPI_ERRORS_RETURN` -may be used to cause error values to be returned. Note that MPI does not -guarantee that an MPI program can continue past an error. diff --git a/ompi/mpi/man/man3/MPI_Cart_get.3.md b/ompi/mpi/man/man3/MPI_Cart_get.3.md deleted file mode 100644 index d4dd0a5f030..00000000000 --- a/ompi/mpi/man/man3/MPI_Cart_get.3.md +++ /dev/null @@ -1,77 +0,0 @@ -# Name - -`MPI_Cart_get` - Retrieves Cartesian topology information associated -with a communicator. - -# Syntax - -## C Syntax - -```c -#include - -int MPI_Cart_get(MPI_Comm comm, int maxdims, int dims[], int periods[], - int coords[]) -``` - -## Fortran Syntax - -```fortran -USE MPI -! or the older form: INCLUDE 'mpif.h' - -MPI_CART_GET(COMM, MAXDIMS, DIMS, PERIODS, COORDS, IERROR) - INTEGER COMM, MAXDIMS, DIMS(*), COORDS(*), IERROR - LOGICAL PERIODS(*) -``` - -## Fortran 2008 Syntax - -```fortran -USE mpi_f08 - -MPI_Cart_get(comm, maxdims, dims, periods, coords, ierror) - TYPE(MPI_Comm), INTENT(IN) :: comm - INTEGER, INTENT(IN) :: maxdims - INTEGER, INTENT(OUT) :: dims(maxdims), coords(maxdims) - LOGICAL, INTENT(OUT) :: periods(maxdims) - INTEGER, OPTIONAL, INTENT(OUT) :: ierror -``` - -# Input Parameters - -* `comm` : Communicator with Cartesian structure (handle). -* `maxdims` : Length of vectors dims, periods, and coords in the calling program -(integer). - -# Output Parameters - -* `dims` : Number of processes for each Cartesian dimension (array of -integers). -* `periods` : Periodicity (true/false) for each Cartesian dimension (array of -logicals). -* `coords` : Coordinates of calling process in Cartesian structure (array of -integers). -* `IERROR` : Fortran only: Error status (integer). - -# Description - -The functions `MPI_Cartdim_get` and `MPI_Cart_get` return the Cartesian -topology information that was associated with a `comm`unicator by -`MPI_Cart_create.` - -# Errors - -Almost all MPI routines return an error value; C routines as the value -of the function and Fortran routines in the last argument. -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for -I/O function errors. The error handler may be changed with -`MPI_Comm_set_errhandler`; the predefined error handler `MPI_ERRORS_RETURN` -may be used to cause error values to be returned. Note that MPI does not -guarantee that an MPI program can continue past an error. - -# See Also - -[`MPI_Cartdim_get`(3)](MPI_Cartdim_get.html) -[`MPI_Cart_create`(3)](MPI_Cart_create.html) diff --git a/ompi/mpi/man/man3/MPI_Cart_map.3.md b/ompi/mpi/man/man3/MPI_Cart_map.3.md deleted file mode 100644 index fee5af3aea1..00000000000 --- a/ompi/mpi/man/man3/MPI_Cart_map.3.md +++ /dev/null @@ -1,79 +0,0 @@ -# Name - -`MPI_Cart_map` - Maps process to Cartesian topology information. - -# Syntax - -## C Syntax - -```c -#include - -int MPI_Cart_map(MPI_Comm comm, int ndims, const int dims[], - const int periods[], int *newrank) -``` - -## Fortran Syntax - -```fortran -USE MPI -! or the older form: INCLUDE 'mpif.h' - -MPI_CART_MAP(COMM, NDIMS, DIMS, PERIODS, NEWRANK, IERROR) - INTEGER COMM, NDIMS, DIMS(*), NEWRANK, IERROR - LOGICAL PERIODS(*) -``` - -## Fortran 2008 Syntax - -```fortran -USE mpi_f08 - -MPI_Cart_map(comm, ndims, dims, periods, newrank, ierror) - TYPE(MPI_Comm), INTENT(IN) :: comm - INTEGER, INTENT(IN) :: ndims, dims(ndims) - LOGICAL, INTENT(IN) :: periods(ndims) - INTEGER, INTENT(OUT) :: newrank - INTEGER, OPTIONAL, INTENT(OUT) :: ierror -``` - -# Input Parameters - -* `comm` : Input communicator (handle). -* `ndims` : Number of dimensions of Cartesian structure (integer). -* `dims` : Integer array of size ndims specifying the number of processes in -each coordinate direction. -* `periods` : Logical array of size ndims specifying the periodicity specification -in each coordinate direction. - -# Output Parameters - -* `newrank` : Reordered rank of the calling process; `MPI_UNDEFINED` if calling -process does not belong to grid (integer). -* `IERROR` : Fortran only: Error status (integer). - -# Description - -`MPI_Cart_map` and `MPI_Graph_map` can be used to implement all other -topology functions. In general they will not be called by the user -directly, unless he or she is creating additional virtual topology -capability other than that provided by MPI. -`MPI_Cart_map` computes an "optimal" placement for the calling process -on the physical machine. A possible implementation of this function is -to always return the rank of the calling process, that is, not to -perform any reordering. - -# Errors - -Almost all MPI routines return an error value; C routines as the value -of the function and Fortran routines in the last argument. -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for -I/O function errors. The error handler may be changed with -`MPI_Comm_set_errhandler`; the predefined error handler `MPI_ERRORS_RETURN` -may be used to cause error values to be returned. Note that MPI does not -guarantee that an MPI program can continue past an error. - -# See Also - -[`MPI_Graph_map`(3)](MPI_Graph_map.html) diff --git a/ompi/mpi/man/man3/MPI_Cart_rank.3.md b/ompi/mpi/man/man3/MPI_Cart_rank.3.md deleted file mode 100644 index 1ae4e6a33fc..00000000000 --- a/ompi/mpi/man/man3/MPI_Cart_rank.3.md +++ /dev/null @@ -1,72 +0,0 @@ -# Name - -`MPI_Cart_rank` - Determines process rank in communicator given -Cartesian location. - -# Syntax - -## C Syntax - -```c -#include - -int MPI_Cart_rank(MPI_Comm comm, int coords[], int *rank) -``` - -## Fortran Syntax - -```fortran -USE MPI -! or the older form: INCLUDE 'mpif.h' - -MPI_CART_RANK(COMM, COORDS, RANK, IERROR) - INTEGER COMM, COORDS(*), RANK, IERROR -``` - -## Fortran 2008 Syntax - -```fortran -USE mpi_f08 - -MPI_Cart_rank(comm, coords, rank, ierror) - TYPE(MPI_Comm), INTENT(IN) :: comm - INTEGER, INTENT(IN) :: coords(*) - INTEGER, INTENT(OUT) :: rank - INTEGER, OPTIONAL, INTENT(OUT) :: ierror -``` - -# Input Parameters - -* `comm` : Communicator with Cartesian structure (handle). -* `coords` : Integer array (of size ndims, which was defined by `MPI_Cart_create` -call) specifying the Cartesian coordinates of a process. - -# Output Parameter - -* `rank` : Rank of specified process (integer). -* `IERROR` : Fortran only: Error status (integer). - -# Description - -For a process group with Cartesian structure, the function `MPI_Cart_rank` -translates the logical process coordinates to process `rank`s as they are -used by the point-to-point routines. For dimension i with periods(i) = -true, if the coordinate, `coords(i)`, is out of range, that is, `coords(i)` -< 0 or `coords(i)` >= `dims(i)`, it is shifted back to the interval 0 =< -`coords(i)` < `dims(i)` automatically. Out-of-range coordinates are -erroneous for nonperiodic dimensions. - -# Errors - -Almost all MPI routines return an error value; C routines as the value -of the function and Fortran routines in the last argument. -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for -I/O function errors. The error handler may be changed with -`MPI_Comm_set_errhandler`; the predefined error handler `MPI_ERRORS_RETURN` -may be used to cause error values to be returned. Note that MPI does not -guarantee that an MPI program can continue past an error. - -# See Also - -[`MPI_Cart_create`(3)](MPI_Cart_create.html) diff --git a/ompi/mpi/man/man3/MPI_Cart_shift.3.md b/ompi/mpi/man/man3/MPI_Cart_shift.3.md deleted file mode 100644 index 2a6a7bc5c5d..00000000000 --- a/ompi/mpi/man/man3/MPI_Cart_shift.3.md +++ /dev/null @@ -1,112 +0,0 @@ -# Name - -`MPI_Cart_shift` - Returns the shifted source and destination ranks, -given a shift direction and amount. - -# Syntax - -## C Syntax - -```c -#include - -int MPI_Cart_shift(MPI_Comm comm, int direction, int disp, - int *rank_source, int *rank_dest) -``` - -## Fortran Syntax - -```fortran -USE MPI -! or the older form: INCLUDE 'mpif.h' - -MPI_CART_SHIFT(COMM, DIRECTION, DISP, RANK_SOURCE, - RANK_DEST, IERROR) - INTEGER COMM, DIRECTION, DISP, RANK_SOURCE - INTEGER RANK_DEST, IERROR -``` - -## Fortran 2008 Syntax - -```fortran -USE mpi_f08 - -MPI_Cart_shift(comm, direction, disp, rank_source, rank_dest, ierror) - TYPE(MPI_Comm), INTENT(IN) :: comm - INTEGER, INTENT(IN) :: direction, disp - INTEGER, INTENT(OUT) :: rank_source, rank_dest - INTEGER, OPTIONAL, INTENT(OUT) :: ierror -``` - -# Input Parameters - -* `comm` : Communicator with Cartesian structure (handle). -* `direction` : Coordinate dimension of shift (integer). -* `disp` : Displacement ( > 0: upward shift, < 0: downward shift) (integer). - -# Output Parameters - -* `rank_source` : Rank of source process (integer). -* `rank_dest` : Rank of destination process (integer). -* `IERROR` : Fortran only: Error status (integer). - -# Description - -If the process topology is a Cartesian structure, an `MPI_Sendrecv` -operation is likely to be used along a coordinate `direction` to perform a -shift of data. As input, `MPI_Sendrecv` takes the rank of a source process -for the receive, and the rank of a destination process for the send. If -the function `MPI_Cart_shift` is called for a Cartesian process group, it -provides the calling process with the above identifiers, which then can -be passed to `MPI_Sendrecv`. The user specifies the coordinate `direction` -and the size of the step (positive or negative). The function is local. - -The `direction` argument indicates the dimension of the shift, i.e., the -coordinate whose value is modified by the shift. The coordinates are -numbered from 0 to ndims-1, where ndims is the number of dimensions. - -Note: The `direction` argument is in the range [0, n-1] for an -n-dimensional Cartesian mesh. - -Depending on the periodicity of the Cartesian group in the specified -coordinate `direction`, `MPI_Cart_shift` provides the identifiers for a -circular or an end-off shift. In the case of an end-off shift, the value -`MPI_PROC_NULL` may be returned in ``rank_source`` or ``rank_dest``, indicating -that the source or the destination for the shift is out of range. - -Example: The `comm`unicator, `comm`, has a two-dimensional, periodic, -Cartesian topology associated with it. A two-dimensional array of REALs -is stored one element per process, in variable A. One wishes to skew -this array, by shifting column i (vertically, i.e., along the column) by -i steps. - -```fortran -! find process rank - CALL MPI_COMM_RANK(comm, rank, ierr) -! find Cartesian coordinates - CALL MPI_CART_COORDS(comm, rank, maxdims, coords, ierr) -! compute shift source and destination - CALL MPI_CART_SHIFT(comm, 0, coords(2), source, dest, ierr) -! skew array - CALL MPI_SENDRECV_REPLACE(A, 1, MPI_REAL, dest, 0, source, 0, comm, status, - ierr) -``` - -# Note - -In Fortran, the dimension indicated by DIRECTION = i has DIMS(i+1) nodes, -where DIMS is the array that was used to create the grid. In C, the -dimension indicated by direction = i is the dimension specified by -dims[i]. - -# Errors - -Almost all MPI routines return an error value; C routines as the value -of the function and Fortran routines in the last argument. - -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for -I/O function errors. The error handler may be changed with -`MPI_Comm_set_errhandler`; the predefined error handler `MPI_ERRORS_RETURN` -may be used to cause error values to be returned. Note that MPI does not -guarantee that an MPI program can continue past an error. diff --git a/ompi/mpi/man/man3/MPI_Cart_sub.3.md b/ompi/mpi/man/man3/MPI_Cart_sub.3.md deleted file mode 100644 index 408e58c6373..00000000000 --- a/ompi/mpi/man/man3/MPI_Cart_sub.3.md +++ /dev/null @@ -1,86 +0,0 @@ -# Name - -`MPI_Cart_sub` - Partitions a communicator into subgroups, which form -lower-dimensional Cartesian subgrids. - -# Syntax - -## C Syntax - -```c -#include - -int MPI_Cart_sub(MPI_Comm comm, const int remain_dims[], MPI_Comm *comm_new) -``` - -## Fortran Syntax - -```fortran -USE MPI -! or the older form: INCLUDE 'mpif.h' - -MPI_CART_SUB(COMM, REMAIN_DIMS, COMM_NEW, IERROR) - INTEGER COMM, COMM_NEW, IERROR - LOGICAL REMAIN_DIMS(*) -``` - -## Fortran 2008 Syntax - -```fortran -USE mpi_f08 - -MPI_Cart_sub(comm, remain_dims, newcomm, ierror) - TYPE(MPI_Comm), INTENT(IN) :: comm - LOGICAL, INTENT(IN) :: remain_dims(*) - TYPE(MPI_Comm), INTENT(OUT) :: newcomm - INTEGER, OPTIONAL, INTENT(OUT) :: ierror -``` - -# Input Parameters - -* `comm` : Communicator with Cartesian structure (handle). -* `remain_dims` : The ith entry of `remain_dims` specifies whether the ith dimension is -kept in the subgrid (true) or is dropped (false) (logical vector). - -# Output Parameters - -* `comm_new` : Communicator containing the subgrid that includes the calling -process (handle). -* `IERROR` : Fortran only: Error status (integer). - -# Description - -If a Cartesian topology has been created with `MPI_Cart_create`, the -function `MPI_Cart_sub` can be used to partition the communicator group -into subgroups that form lower-dimensional Cartesian subgrids, and to -build for each subgroup a communicator with the associated subgrid -Cartesian topology. (This function is closely related to -`MPI_Comm_split`.) - -Example: Assume that `MPI_Cart_create( ..., comm)` has defined a (2 x -3 x 4) grid. Let `remain_dims` = (true, false, true). Then a call to - - MPI_Cart_sub(comm, remain_dims, comm_new) - -will create three communicators, each with eight processes in a 2 x 4 -Cartesian topology. If `remain_dims = (false, false, true)` then the call -to `MPI_Cart_sub(comm, remain_dims, comm_new)` will create six -nonoverlapping communicators, each with four processes, in a -one-dimensional Cartesian topology. - -# Errors - -Almost all MPI routines return an error value; C routines as the value -of the function and Fortran routines in the last argument. -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for -I/O function errors. The error handler may be changed with -`MPI_Comm_set_errhandler`; the predefined error handler `MPI_ERRORS_RETURN` -may be used to cause error values to be returned. Note that MPI does not -guarantee that an MPI program can continue past an error. - -# See Also - -[`MPI_Cart_create`(3)](MPI_Cart_create.html) -[`MPI_Comm_split`(3)](MPI_Comm_split.html) - diff --git a/ompi/mpi/man/man3/MPI_Cart_sub.3in b/ompi/mpi/man/man3/MPI_Cart_sub.3in deleted file mode 100644 index a895b038bee..00000000000 --- a/ompi/mpi/man/man3/MPI_Cart_sub.3in +++ /dev/null @@ -1,82 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2013 Los Alamos National Security, LLC. All rights reserved. -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Cart_sub 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Cart_sub \fP \- Partitions a communicator into subgroups, which form lower-dimensional Cartesian subgrids. - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -int MPI_Cart_sub(MPI_Comm \fIcomm\fP, const int\fI remain_dims\fP[], MPI_Comm\fI *comm_new\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_CART_SUB(\fICOMM, REMAIN_DIMS, COMM_NEW, IERROR\fP) - INTEGER \fICOMM, COMM_NEW, IERROR\fP - LOGICAL \fIREMAIN_DIMS\fP(*) - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Cart_sub(\fIcomm\fP, \fIremain_dims\fP, \fInewcomm\fP, \fIierror\fP) - TYPE(MPI_Comm), INTENT(IN) :: \fIcomm\fP - LOGICAL, INTENT(IN) :: \fIremain_dims(*)\fP - TYPE(MPI_Comm), INTENT(OUT) :: \fInewcomm\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETERS -.ft R -.TP 1i -comm -Communicator with Cartesian structure (handle). -.TP 1i -remain_dims -The ith entry of remain_dims specifies whether the ith dimension is kept in the subgrid (true) or is dropped (false) (logical vector). - -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -comm_new -Communicator containing the subgrid that includes the calling process (handle). -.ft R -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -If a Cartesian topology has been created with MPI_Cart_create, the function MPI_Cart_sub can be used to partition the communicator group into subgroups that form lower-dimensional Cartesian subgrids, and to build for each subgroup a communicator with the associated subgrid Cartesian topology. (This function is closely related to MPI_Comm_split.) -.sp -\fBExample:\fP Assume that MPI_Cart_create( \&..., comm) has defined a (2 x 3 x 4) grid. Let remain_dims = (true, false, true). Then a call to -.sp -.nf - MPI_Cart_sub(comm, remain_dims, comm_new) -.fi -.sp -will create three communicators, each with eight processes in a 2 x 4 Cartesian topology. If remain_dims = (false, false, true) then the call to MPI_Cart_sub(comm, remain_dims, comm_new) will create six nonoverlapping communicators, each with four processes, in a one-dimensional Cartesian topology. - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for I/O function errors. The error handler may be changed with MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN may be used to cause error values to be returned. Note that MPI does not guarantee that an MPI program can continue past an error. - -.SH SEE ALSO -.ft R -.sp -.nf -MPI_Cart_create -MPI_Comm_split - diff --git a/ompi/mpi/man/man3/MPI_Cartdim_get.3.md b/ompi/mpi/man/man3/MPI_Cartdim_get.3.md deleted file mode 100644 index 5b8f3b12f28..00000000000 --- a/ompi/mpi/man/man3/MPI_Cartdim_get.3.md +++ /dev/null @@ -1,66 +0,0 @@ -# Name - -`MPI_Cartdim_get` - Retrieves Cartesian topology information -associated with a communicator. - -# Syntax - -## C Syntax - -```c -#include - -int MPI_Cartdim_get(MPI_Comm comm, int *ndims) -``` - -## Fortran Syntax - -```fortran -USE MPI -! or the older form: INCLUDE 'mpif.h' - -MPI_CARTDIM_GET(COMM, NDIMS, IERROR) - INTEGER COMM, NDIMS, IERROR -``` - -## Fortran 2008 Syntax - -```fortran -USE mpi_f08 - -MPI_Cartdim_get(comm, ndims, ierror) - TYPE(MPI_Comm), INTENT(IN) :: comm - INTEGER, INTENT(OUT) :: ndims - INTEGER, OPTIONAL, INTENT(OUT) :: ierror -``` - - -# Input Parameter - -* `comm` : Communicator with Cartesian structure (handle). - -# Output Parameters - -* `ndims` : Number of dimensions of the Cartesian structure (integer). -* `IERROR` : Fortran only: Error status (integer). - -# Description - -`MPI_Cartdim_get` returns the number of dimensions of the Cartesian -structure. - -# Errors - -Almost all MPI routines return an error value; C routines as the value -of the function and Fortran routines in the last argument. -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for -I/O function errors. The error handler may be changed with -`MPI_Comm_set_errhandler`; the predefined error handler `MPI_ERRORS_RETURN` -may be used to cause error values to be returned. Note that MPI does not -guarantee that an MPI program can continue past an error. - -# See Also - -[`MPI_Cart_get`(3)](MPI_Cart_get.html) -[`MPI_Cart_create`(3)](MPI_Cart_create.html) diff --git a/ompi/mpi/man/man3/MPI_Close_port.3.md b/ompi/mpi/man/man3/MPI_Close_port.3.md deleted file mode 100644 index a713e6fc8b8..00000000000 --- a/ompi/mpi/man/man3/MPI_Close_port.3.md +++ /dev/null @@ -1,58 +0,0 @@ -# Name - -`MPI_Close_port` - Releases the specified network address. - -# Syntax - -## C Syntax - -```c -#include - -int MPI_Close_port(const char *port_name) -``` - -## Fortran Syntax - -```fortran -USE MPI -! or the older form: INCLUDE 'mpif.h' - -MPI_CLOSE_PORT(PORT_NAME, IERROR) - CHARACTER*(*) PORT_NAME - INTEGER IERROR -``` - -## Fortran 2008 Syntax - -```fortran -USE mpi_f08 - -MPI_Close_port(port_name, ierror) - CHARACTER(LEN=*), INTENT(IN) :: port_name - INTEGER, OPTIONAL, INTENT(OUT) :: ierror -``` - - -# Input Parameter - -* `port_name` : A port (string). - -# Output Parameter - -* `IERROR` : Fortran only: Error status (integer). - -# Description - -`MPI_Close_port` releases the network address represented by `port_name`. - -# Errors - -Almost all MPI routines return an error value; C routines as the value -of the function and Fortran routines in the last argument. -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for -I/O function errors. The error handler may be changed with -`MPI_Comm_set_errhandler`; the predefined error handler `MPI_ERRORS_RETURN` -may be used to cause error values to be returned. Note that MPI does not -guarantee that an MPI program can continue past an error. diff --git a/ompi/mpi/man/man3/MPI_Comm_accept.3.md b/ompi/mpi/man/man3/MPI_Comm_accept.3.md deleted file mode 100644 index c091c597235..00000000000 --- a/ompi/mpi/man/man3/MPI_Comm_accept.3.md +++ /dev/null @@ -1,78 +0,0 @@ -# Name - -`MPI_Comm_accept` - Establishes communication with a client. - -# Syntax - -## C Syntax - -```c -#include - -int MPI_Comm_accept(const char *port_name, MPI_Info info, int root, MPI_Comm comm, MPI_Comm *newcomm) -``` - -## Fortran Syntax - -```fortran -USE MPI -! or the older form: INCLUDE 'mpif.h' - -MPI_COMM_ACCEPT(PORT_NAME, INFO, ROOT, COMM, NEWCOMM, IERROR) - CHARACTER*(*) PORT_NAME - INTEGER INFO, ROOT, COMM, NEWCOMM, IERROR -``` - -## Fortran 2008 Syntax - -```fortran -USE mpi_f08 - -MPI_Comm_accept(port_name, info, root, comm, newcomm, ierror) - CHARACTER(LEN=*), INTENT(IN) :: port_name - TYPE(MPI_Info), INTENT(IN) :: info - INTEGER, INTENT(IN) :: root - TYPE(MPI_Comm), INTENT(IN) :: comm - TYPE(MPI_Comm), INTENT(OUT) :: newcomm - INTEGER, OPTIONAL, INTENT(OUT) :: ierror -``` - - -# Input Parameters - -* `port_name` : Port name (string, used only on *root*). -* `info` : Options given by root for the accept (handle, used only on root). No -options currently supported. -* `root` : Rank in *comm* of root node (integer). -* `comm` : Intracommunicator over which call is collective (handle). - -# Output Parameters - -* `newcomm` : Intercommunicator with client as remote group (handle) -* `IERROR` : Fortran only: Error status (integer). - -# Description - -`MPI_Comm_accept` establishes communication with a client. It is -collective over the calling communicator. It returns an -intercommunicator that allows communication with the client, after the -client has connected with the `MPI_Comm_accept` function using the -`MPI_Comm_connect` function. -The `port_name` must have been established through a call to -`MPI_Open_port` on the `root`. - -# Errors - -Almost all MPI routines return an error value; C routines as the value -of the function and Fortran routines in the last argument. -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for -I/O function errors. The error handler may be changed with -`MPI_Comm_set_errhandler`; the predefined error handler `MPI_ERRORS_RETURN` -may be used to cause error values to be returned. -See the MPI man page for a full list of MPI error codes. - -# See Also - -[`MPI_Comm_connect`(3)](MPI_Comm_connect.html) -[`MPI_Open_port`(3)](MPI_Open_port.html) diff --git a/ompi/mpi/man/man3/MPI_Comm_c2f.3in b/ompi/mpi/man/man3/MPI_Comm_c2f.3in deleted file mode 100644 index a13fce697dd..00000000000 --- a/ompi/mpi/man/man3/MPI_Comm_c2f.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/MPI_Comm_f2c.3 diff --git a/ompi/mpi/man/man3/MPI_Comm_call_errhandler.3.md b/ompi/mpi/man/man3/MPI_Comm_call_errhandler.3.md deleted file mode 100644 index 64bf08a93a9..00000000000 --- a/ompi/mpi/man/man3/MPI_Comm_call_errhandler.3.md +++ /dev/null @@ -1,69 +0,0 @@ -# Name - -`MPI_Comm_call_errhandler` - Passes the supplied error code to the -error handler assigned to a communicator - -# Syntax - -## C Syntax - -```c -#include - -int MPI_Comm_call_errhandler(MPI_Comm comm, int errorcode) -``` - -## Fortran Syntax - -```fortran -USE MPI -! or the older form: INCLUDE 'mpif.h' - -MPI_COMM_CALL_ERRHANDLER(COMM, ERRORCODE, IERROR) - INTEGER COMM, ERRORCODE, IERROR -``` - -## Fortran 2008 Syntax - -```fortran -USE mpi_f08 - -MPI_Comm_call_errhandler(comm, errorcode, ierror) - TYPE(MPI_Comm), INTENT(IN) :: comm - INTEGER, INTENT(IN) :: errorcode - INTEGER, OPTIONAL, INTENT(OUT) :: ierror -``` - - -# Input Parameter - -* `comm` : communicator with error handler (handle). -* `errorcode` : error code (integer). - -# Output Parameters - -* `IERROR` : Fortran only: Error status (integer). - -# Description - -This function invokes the error handler assigned to the `comm`unicator -`comm` with the supplied error code `errorcode`. If the error handler -was successfully called, the process is not aborted, and the error -handler returns, this function returns `MPI_SUCCESS.` - -# Notes - -Users should note that the default error handler is -`MPI_ERRORS_ARE_FATAL`. Thus, calling this function will abort the -processes in `comm` if the default error handler has not been changed. - -# Errors - -Almost all MPI routines return an error value; C routines as the value -of the function and Fortran routines in the last argument. -See the MPI man page for a full list of MPI error codes. - -# See Also - -[`MPI_Comm_create_errhandler`(3)](MPI_Comm_create_errhandler.html) -[`MPI_Comm_set_errhandler`(3)](MPI_Comm_set_errhandler.html) diff --git a/ompi/mpi/man/man3/MPI_Comm_compare.3.md b/ompi/mpi/man/man3/MPI_Comm_compare.3.md deleted file mode 100644 index 22c0637b5f9..00000000000 --- a/ompi/mpi/man/man3/MPI_Comm_compare.3.md +++ /dev/null @@ -1,65 +0,0 @@ -# Name - -`MPI_Comm_compare` - Compares two communicators. - -# Syntax - -## C Syntax - -```c -#include - -int MPI_Comm_compare(MPI_Comm comm1, MPI_Comm comm2, int *result) -``` - -## Fortran Syntax - -```fortran -USE MPI -! or the older form: INCLUDE 'mpif.h' - -MPI_COMM_COMPARE(COMM1, COMM2, RESULT, IERROR) - INTEGER COMM1, COMM2, RESULT, IERROR -``` - -## Fortran 2008 Syntax - -```fortran -USE mpi_f08 - -MPI_Comm_compare(comm1, comm2, result, ierror) - TYPE(MPI_Comm), INTENT(IN) :: comm1, comm2 - INTEGER, INTENT(OUT) :: result - INTEGER, OPTIONAL, INTENT(OUT) :: ierror -``` - - -# Input Parameters - -* `comm1` : Comm1 (handle). -* `comm2` : Comm2 (handle). - -# Output Parameters - -* `result` : Result of comparison (integer). -* `IERROR` : Fortran only: Error status (integer). - -# Description - -`MPI_IDENT` `result`s if and only if `comm1` and `comm2` are handles for the -same object (identical groups and same contexts). `MPI_CONGRUENT` results -if the underlying groups are identical in constituents and rank order; -these communicators differ only by context. `MPI_SIMILAR` results of the -group members of both communicators are the same but the rank order -differs. `MPI_UNEQUAL` results otherwise. - -# Errors - -Almost all MPI routines return an error value; C routines as the value -of the function and Fortran routines in the last argument. -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for -I/O function errors. The error handler may be changed with -`MPI_Comm_set_errhandler`; the predefined error handler `MPI_ERRORS_RETURN` -may be used to cause error values to be returned. Note that MPI does not -guarantee that an MPI program can continue past an error. diff --git a/ompi/mpi/man/man3/MPI_Comm_connect.3.md b/ompi/mpi/man/man3/MPI_Comm_connect.3.md deleted file mode 100644 index 1d38e3d9353..00000000000 --- a/ompi/mpi/man/man3/MPI_Comm_connect.3.md +++ /dev/null @@ -1,86 +0,0 @@ -# Name - -`MPI_Comm_connect` - Establishes communication with a server. - -# Syntax - -## C Syntax - -```c -#include - -int MPI_Comm_connect(const char *port_name, MPI_Info info, int root, - MPI_Comm comm, MPI_Comm *newcomm) -``` - -## Fortran Syntax - -```fortran -USE MPI -! or the older form: INCLUDE 'mpif.h' - -MPI_COMM_CONNECT(PORT_NAME, INFO, ROOT, COMM, NEWCOMM, IERROR) - CHARACTER*(*) PORT_NAME - INTEGER INFO, ROOT, COMM, NEWCOMM, IERROR -``` - -## Fortran 2008 Syntax - -```fortran -USE mpi_f08 - -MPI_Comm_connect(port_name, info, root, comm, newcomm, ierror) - CHARACTER(LEN=*), INTENT(IN) :: port_name - TYPE(MPI_Info), INTENT(IN) :: info - INTEGER, INTENT(IN) :: root - TYPE(MPI_Comm), INTENT(IN) :: comm - TYPE(MPI_Comm), INTENT(OUT) :: newcomm - INTEGER, OPTIONAL, INTENT(OUT) :: ierror -``` - -# Input Parameters - -* `port_name` : Port name (string, used only on *root*). -* `info` : Options given by root for the connect (handle, used only on root). -No options currently supported. -* `root` : Rank in *comm* of root node (integer). -* `comm` : Intracommunicator over which call is collective (handle). - -# Output Parameters - -* `newcomm` : Intercommunicator with client as remote group (handle) -* `IERROR` : Fortran only: Error status (integer). - -# Description - -`MPI_Comm_connect` establishes communication with a server specified by -`port_name`. It is collective over the calling communicator and returns -an intercommunicator in which the remote group participated in an -`MPI_Comm_accept`. The `MPI_Comm_connect` call must only be called after the -`MPI_Comm_accept` call has been made by the MPI job acting as the server. -If the named port does not exist (or has been closed), `MPI_Comm_connect` -raises an error of class `MPI_ERR_PORT`. -MPI provides no guarantee of fairness in servicing connection attempts. -That is, connection attempts are not necessarily satisfied in the order -in which they were initiated, and competition from other connection -attempts may prevent a particular connection attempt from being -satisfied. -The `port_name` parameter is the address of the server. It must be the -same as the name returned by `MPI_Open_port` on the server. - -# Errors - -Almost all MPI routines return an error value; C routines as the value -of the function and Fortran routines in the last argument. -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for -I/O function errors. The error handler may be changed with -`MPI_Comm_set_errhandler`; the predefined error handler `MPI_ERRORS_RETURN` -may be used to cause error values to be returned. Note that MPI does not -guarantee that an MPI program can continue past an error. -See the MPI man page for a full list of MPI error codes. - -# See Also - -[`MPI_Comm_accept`(3)](MPI_Comm_accept.html) -[`MPI_Open_port`(3)](MPI_Open_port.html) diff --git a/ompi/mpi/man/man3/MPI_Comm_create.3.md b/ompi/mpi/man/man3/MPI_Comm_create.3.md deleted file mode 100644 index 42daef5bf9d..00000000000 --- a/ompi/mpi/man/man3/MPI_Comm_create.3.md +++ /dev/null @@ -1,85 +0,0 @@ -# Name - -`MPI_Comm_create` - Creates a new communicator. - -# Syntax - -## C Syntax - -```c -#include - -int MPI_Comm_create(MPI_Comm comm, MPI_Group group, MPI_Comm *newcomm) -``` - -## Fortran Syntax - -```fortran -USE MPI -! or the older form: INCLUDE 'mpif.h' - -MPI_COMM_CREATE(COMM, GROUP, NEWCOMM, IERROR) - INTEGER COMM, GROUP, NEWCOMM, IERROR -``` - -## Fortran 2008 Syntax - -```fortran -USE mpi_f08 - -MPI_Comm_create(comm, group, newcomm, ierror) - TYPE(MPI_Comm), INTENT(IN) :: comm - TYPE(MPI_Group), INTENT(IN) :: group - TYPE(MPI_Comm), INTENT(OUT) :: newcomm - INTEGER, OPTIONAL, INTENT(OUT) :: ierror -``` - -# Input Parameter - -* `comm` : Communicator (handle). -* `group` : Group, which is a subset of the group of comm (handle). - -# Output Parameters - -* `newcomm` : New communicator (handle). -* `IERROR` : Fortran only: Error status (integer). - -# Description - -This function creates a new communicator `newcomm` with communication -`group` defined by `group` and a new context. The function sets `newcomm` to -a new communicator that spans all the processes that are in the `group`. -It sets `newcomm` to `MPI_COMM_NULL` for processes that are not in the -`group`. -Each process must call with a `group` argument that is a subgroup of the -`group` associated with `comm`; this could be `MPI_GROUP_EMPTY`. The -processes may specify different values for the `group` argument. If a -process calls with a non-empty `group`, then all processes in that `group` -must call the function with the same `group` as argument, that is: the -same processes in the same order. Otherwise the call is erroneous. - -# Notes - -`MPI_Comm_create` provides a means of making a subset of processes for the -purpose of separate MIMD computation, with separate communication space. -`newcomm`, which is created by `MPI_Comm_create`, can be used in -subsequent calls to `MPI_Comm_create` (or other communicator constructors) -to further subdivide a computation into parallel sub-computations. A -more general service is provided by `MPI_Comm_split`. - -# Errors - -Almost all MPI routines return an error value; C routines as the value -of the function and Fortran routines in the last argument. -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for -I/O function errors. The error handler may be changed with -`MPI_Comm_set_errhandler`; the predefined error handler `MPI_ERRORS_RETURN` -may be used to cause error values to be returned. Note that MPI does not -guarantee that an MPI program can continue past an error. - -# See Also - -[`MPI_Comm_split`(3)](MPI_Comm_split.html) -[`MPI_Intercomm_create`(3)](MPI_Intercomm_create.html) -[`MPI_Comm_create_group`(3)](MPI_Comm_create_group.html) diff --git a/ompi/mpi/man/man3/MPI_Comm_create_errhandler.3.md b/ompi/mpi/man/man3/MPI_Comm_create_errhandler.3.md deleted file mode 100644 index 8fee33561a4..00000000000 --- a/ompi/mpi/man/man3/MPI_Comm_create_errhandler.3.md +++ /dev/null @@ -1,83 +0,0 @@ -# Name - -`MPI_Comm_create_errhandler` - Creates an error handler that can be -attached to communicators. - -# Syntax - -## C Syntax - -```c -#include - -int MPI_Comm_create_errhandler(MPI_Comm_errhandler_function *function, - MPI_Errhandler *errhandler) -``` - -## Fortran Syntax - -```fortran -USE MPI -! or the older form: INCLUDE 'mpif.h' - -MPI_COMM_CREATE_ERRHANDLER(FUNCTION, ERRHANDLER, IERROR) - EXTERNAL FUNCTION - INTEGER ERRHANDLER, IERROR -``` - -## Fortran 2008 Syntax - -```fortran -USE mpi_f08 - -MPI_Comm_create_errhandler(comm_errhandler_fn, errhandler, ierror) - PROCEDURE(MPI_Comm_errhandler_function) :: comm_errhandler_fn - TYPE(MPI_Errhandler), INTENT(OUT) :: errhandler - INTEGER, OPTIONAL, INTENT(OUT) :: ierror -``` - -# Deprecated Type Name Note - -MPI-2.2 deprecated the `MPI_Comm_errhandler_fn` and `MPI::Comm::Errhandler_fn` -types in favor of `MPI_Comm_errhandler_function` and `MPI::Comm::Errhandler_function`, -respectively. Open MPI supports both names (indeed, the \_fn names are typedefs -to the \_function names). - -# Input Parameter - -* `function` : User-defined error handling procedure (function). - -# Output Parameters - -* `errhandler` : MPI error handler (handle). -* `IERROR` : Fortran only: Error status (integer). - -# Description - -`MPI_Comm_create_errhandler` creates an error handler that can be attached -to communicators. This `function` is identical to `MPI_Errhandler_create`, -the use of which is deprecated. -In C, the user routine should be a `function` of type -`MPI_Comm_errhandler_function`, which is defined as -```c -typedef void MPI_Comm_errhandler_function(MPI_Comm *, int *, ...); -``` -The first argument is the communicator in use. The second is the error -code to be returned by the MPI routine that raised the error. This -typedef replaces `MPI_Handler_function`, the use of which is deprecated. -In Fortran, the user routine should be of this form: -```fortran -SUBROUTINE COMM_ERRHANDLER_FUNCTION(COMM, ERROR_CODE, ...) - INTEGER COMM, ERROR_CODE -``` - -# Errors - -Almost all MPI routines return an error value; C routines as the value -of the `function` and Fortran routines in the last argument. -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for -I/O `function` errors. The error handler may be changed with -`MPI_Comm_set_errhandler`; the predefined error handler `MPI_ERRORS_RETURN` -may be used to cause error values to be returned. Note that MPI does not -guarantee that an MPI program can continue past an error. diff --git a/ompi/mpi/man/man3/MPI_Comm_create_from_group.3.md b/ompi/mpi/man/man3/MPI_Comm_create_from_group.3.md deleted file mode 100644 index 52739ef9f20..00000000000 --- a/ompi/mpi/man/man3/MPI_Comm_create_from_group.3.md +++ /dev/null @@ -1,89 +0,0 @@ -# Name - -`MPI_Comm_create_from_group` - Creates a new communicator from a group and stringtag - -# Syntax - -## C Syntax - -```c -#include - -int MPI_Comm_create_from_group(MPI_Group group, const char *stringtag, MPI_Info info, MPI_Errhandler errhandler, MPI_Comm *newcomm) -``` - -## Fortran Syntax - -```fortran -USE MPI -! or the older form: INCLUDE 'mpif.h' - -MPI_COMM_CREATE_FROM_GROUP(GROUP, STRINGTAG, INFO, ERRHANDLER, NEWCOMM, IERROR) - INTEGER GROUP, INFO, ERRHANDLER, NEWCOMM, IERROR - CHARACTER*(*) STRINGTAG -``` - -## Fortran 2008 Syntax - -```fortran -USE mpi_f08 - -MPI_Comm_create_from_group(group, stringtag, info, errhandler, newcomm, ierror) - TYPE(MPI_Group), INTENT(IN) :: group - CHARACTER(LEN=*), INTENT(IN) :: stringtag - TYPE(MPI_Info), INTENT(IN) :: info - TYPE(MPI_Errhandler), INTENT(IN) :: errhandler - TYPE(MPI_Comm), INTENT(OUT) :: newcomm - INTEGER, OPTIONAL, INTENT(OUT) :: ierror -``` - -# Input Parameters - -* `group` : Group (handler) -* `stringtag` : Unique identifier for this operation (string) -* `info` : info object (handler) -* `errhandler` : error handler to be attached to the new intra-communicator (handle) - -# Output Parameters - -* `newcomm` : New communicator (handle). -* `IERROR` : Fortran only: Error status (integer). - -# Description - -`MPI_Comm_create_from_group` is similar to `MPI_Comm_create_group`, except -that the set of MPI processes involved in the creation of the new intra-communicator -is specified by a group argument, rather than the group associated with a pre-existing communicator. -If a non-empty group is specified, then all MPI processes in that group must call -the function and each of these MPI processes must provide the same arguments, including -a `group` that contains the same members with the same ordering, and identical `stringtag` -value. In the event that `MPI_GROUP_EMPTY` is supplied as the group argument, then the -call is a local operation and `MPI_COMM_NULL` is returned as `newcomm`. The `stringtag` argument -is analogous to the `tag` used for `MPI_Comm_create_group`. If multiple threads at -a given MPI process perform concurrent `MPI_Comm_create_from_group` operations, -the user must distinguish these operations by providing different `stringtag` arguments. The -`stringtag` shall not exceed MPI_MAX_STRINGTAG_LEN characters in length. For C, this includes -space for a null terminating character. - -# Notes - -The `errhandler` argument specifies an error handler to be attached to the new intracommunicator. -The `info` argument provides hints and assertions, possibly MPI implementation dependent, which -indicate desired characteristics and guide communicator creation. MPI_MAX_STRINGTAG_LEN shall have a value -of at least 63. - - -# Errors - -Almost all MPI routines return an error value; C routines as the value -of the function and Fortran routines in the last argument. -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for -I/O function errors. The error handler may be changed with -`MPI_Comm_set_errhandler`; the predefined error handler `MPI_ERRORS_RETURN` -may be used to cause error values to be returned. Note that MPI does not -guarantee that an MPI program can continue past an error. - -# See Also - -[`MPI_Comm_create_group`(3)](MPI_Comm_create_group.html) diff --git a/ompi/mpi/man/man3/MPI_Comm_create_group.3.md b/ompi/mpi/man/man3/MPI_Comm_create_group.3.md deleted file mode 100644 index dcfa8699290..00000000000 --- a/ompi/mpi/man/man3/MPI_Comm_create_group.3.md +++ /dev/null @@ -1,94 +0,0 @@ -# Name - -`MPI_Comm_create_group` - Creates a new communicator. - -# Syntax - -## C Syntax - -```c -#include - -int MPI_Comm_create_group(MPI_Comm comm, MPI_Group group, int tag, MPI_Comm *newcomm) -``` - -## Fortran Syntax - -```fortran -USE MPI -! or the older form: INCLUDE 'mpif.h' - -MPI_COMM_CREATE_GROUP(COMM, GROUP, TAG, NEWCOMM, IERROR) - INTEGER COMM, GROUP, TAG, NEWCOMM, IERROR -``` - -## Fortran 2008 Syntax - -```fortran -USE mpi_f08 - -MPI_Comm_create_group(comm, group, tag, newcomm, ierror) - TYPE(MPI_Comm), INTENT(IN) :: comm - TYPE(MPI_Group), INTENT(IN) :: group - INTEGER, INTENT(IN) :: tag - TYPE(MPI_Comm), INTENT(OUT) :: newcomm - INTEGER, OPTIONAL, INTENT(OUT) :: ierror -``` - -# Input Parameters - -* `comm` : Communicator (handle). -* `group` : Group, which is a subset of the group of comm (handle). -* `tag` : Tag (integer). - -# Output Parameters - -* `newcomm` : New communicator (handle). -* `IERROR` : Fortran only: Error status (integer). - -# Description - -`MPI_Comm_create_group` is similar to `MPI_Comm_create`; however, -`MPI_Comm_create` must be called by all processes in the `group` of `comm`, -whereas `MPI_Comm_create_group` must be called by all processes in `group`, -which is a subgroup of the `group` of `comm`. In addition, -`MPI_Comm_create_group` requires that `comm` is an intracommunicator. -`MPI_Comm_create_group` returns a new intracommunicator, `newcomm`, for -which the `group` argument defines the communication `group`. No cached -information propagates from `comm` to `newcomm`. -Each process must provide a `group` argument that is a sub`group` of the -`group` associated with `comm`; this could be `MPI_GROUP_EMPTY`. If a -non-empty `group` is specified, then all processes in that `group` must call -the function, and each of these processes must provide the same -arguments, including a `group` that contains the same members with the -same ordering. Otherwise the call is erroneous. If the calling process -is a member of the `group` given as the `group` argument, then `newcomm` is -a communicator with `group` as its associated `group`. If the calling -process is not a member of `group`, e.g., `group` is `MPI_GROUP_EMPTY`, then -the call is a local operation and `MPI_COMM_NULL` is returned as -`newcomm`. - -# Notes - -`MPI_Comm_create_group` provides a means of making a subset of processes -for the purpose of separate MIMD computation, with separate -communication space. `newcomm`, which is created by -`MPI_Comm_create_group`, can be used in subsequent calls to -`MPI_Comm_create_group` (or other communicator constructors) to further -subdivide a computation into parallel sub-computations. A more general -service is provided by `MPI_Comm_split`. - -# Errors - -Almost all MPI routines return an error value; C routines as the value -of the function and Fortran routines in the last argument. -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for -I/O function errors. The error handler may be changed with -`MPI_Comm_set_errhandler`; the predefined error handler `MPI_ERRORS_RETURN` -may be used to cause error values to be returned. Note that MPI does not -guarantee that an MPI program can continue past an error. - -# See Also - -[`MPI_Comm_create`(3)](MPI_Comm_create.html) diff --git a/ompi/mpi/man/man3/MPI_Comm_create_keyval.3.md b/ompi/mpi/man/man3/MPI_Comm_create_keyval.3.md deleted file mode 100644 index 260f9e3b632..00000000000 --- a/ompi/mpi/man/man3/MPI_Comm_create_keyval.3.md +++ /dev/null @@ -1,124 +0,0 @@ -# Name - -`MPI_Comm_create_keyval` - Generates a new attribute key. - -# Syntax - -## C Syntax - -```c -#include - -int MPI_Comm_create_keyval(MPI_Comm_copy_attr_function - *comm_copy_attr_fn, MPI_Comm_delete_attr_function - *comm_delete_attr_fn, int *comm_keyval, - void *extra_state) -``` - -## Fortran Syntax (See Fortran 77 Notes) - -```fortran -USE MPI -! or the older form: INCLUDE 'mpif.h' - -MPI_COMM_CREATE_KEYVAL(COMM_COPY_ATTR_FN, COMM_DELETE_ATTR_FN, - COMM_KEYVAL, EXTRA_STATE, IERROR) - EXTERNAL COMM_COPY_ATTR_FN, COMM_DELETE_ATTR_FN - INTEGER COMM_KEYVAL, IERROR - - INTEGER(KIND=MPI_ADDRESS_KIND) EXTRA_STATE -``` - -## Fortran 2008 Syntax - -```fortran -USE mpi_f08 - -MPI_Comm_create_keyval(comm_copy_attr_fn, comm_delete_attr_fn, comm_keyval, - extra_state, ierror) - PROCEDURE(MPI_Comm_copy_attr_function) :: comm_copy_attr_fn - PROCEDURE(MPI_Comm_delete_attr_function) :: comm_delete_attr_fn - INTEGER, INTENT(OUT) :: comm_keyval - INTEGER(KIND=MPI_ADDRESS_KIND), INTENT(IN) :: extra_state - INTEGER, OPTIONAL, INTENT(OUT) :: ierror -``` - -# Input Parameters - -* `comm_copy_attr_fn` : Copy callback function for `comm_keyval` (function). -* `comm_delete_attr_fn` : Delete callback function for `comm_keyval` (function). -* `extra_state` : Extra state for callback functions. - -# Output Parameter - -* `comm_keyval` : Key value for future access (integer). -* `IERROR` : Fortran only: Error status (integer). - -# Description - -This function replaces `MPI_Keyval_create`, the use of which is -deprecated. The C binding is identical. The Fortran binding differs in -that `extra_state` is an address-sized integer. Also, the copy and -delete callback functions have Fortran bindings that are consistent with -address-sized attributes. -The argument `comm_copy_attr_fn` may be specified as -`MPI_COMM_NULL_COPY_FN` or `MPI_COMM_DUP_FN` from C or Fortran. -`MPI_COMM_NULL_COPY_FN` is a function that does nothing more than -returning `flag = 0` and `MPI_SUCCESS`. `MPI_COMM_DUP_FN` is a simple-minded -copy function that sets `flag = 1`, returns the value of -`attribute_val_in` in `attribute_val_out`, and returns `MPI_SUCCESS`. -These replace the MPI-1 predefined callbacks `MPI_NULL_COPY_FN` and -`MPI_DUP_FN`, the use of which is deprecated. -The two C callback functions are: -```c -typedef int MPI_Comm_copy_attr_function(MPI_Comm oldcomm, int comm_keyval, - void *extra_state, void *attribute_val_in, - void *attribute_val_out, int *flag); - -typedef int MPI_Comm_delete_attr_function(MPI_Comm comm, int comm_keyval, - void *attribute_val, void *extra_state); -``` -which are the same as the MPI-1.1 calls but with a new name. The old -names are deprecated. -The two Fortran callback functions are: -```fortran -SUBROUTINE COMM_COPY_ATTR_FN(OLDCOMM, COMM_KEYVAL, EXTRA_STATE, - ATTRIBUTE_VAL_IN, ATTRIBUTE_VAL_OUT, FLAG, IERROR) - INTEGER OLDCOMM, COMM_KEYVAL, IERROR - - INTEGER(KIND=MPI_ADDRESS_KIND) EXTRA_STATE, ATTRIBUTE_VAL_IN, - ATTRIBUTE_VAL_OUT - LOGICAL FLAG - -SUBROUTINE COMM_DELETE_ATTR_FN(COMM, COMM_KEYVAL, ATTRIBUTE_VAL, EXTRA_STATE, - IERROR) - INTEGER COMM, COMM_KEYVAL, IERROR - - INTEGER(KIND=MPI_ADDRESS_KIND) ATTRIBUTE_VAL, EXTRA_STATE -``` - -## Fortran 77 Notes - -The MPI standard prescribes portable Fortran syntax for the -`EXTRA_STATE` argument only for Fortran 90. FORTRAN 77 users may use the -non-portable syntax -```fortran -INTEGER*MPI_ADDRESS_KIND EXTRA_STATE -``` -where `MPI_ADDRESS_KIND` is a constant defined in mpif.h and gives the -length of the declared integer in bytes. - -# Errors - -Almost all MPI routines return an error value; C routines as the value -of the function and Fortran routines in the last argument. -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for -I/O function errors. The error handler may be changed with -`MPI_Comm_set_errhandler`; the predefined error handler `MPI_ERRORS_RETURN` -may be used to cause error values to be returned. Note that MPI does not -guarantee that an MPI program can continue past an error. -See the MPI man page for a full list of MPI error codes. -SEE ALSO - -# See Also diff --git a/ompi/mpi/man/man3/MPI_Comm_delete_attr.3.md b/ompi/mpi/man/man3/MPI_Comm_delete_attr.3.md deleted file mode 100644 index 2b10bb1f2f6..00000000000 --- a/ompi/mpi/man/man3/MPI_Comm_delete_attr.3.md +++ /dev/null @@ -1,81 +0,0 @@ -# Name - -`MPI_Comm_delete_attr` - Deletes attribute value associated with a -key. - -# Syntax - -## C Syntax - -```c -#include - -int MPI_Comm_delete_attr(MPI_Comm comm, int comm_keyval) -``` - -## Fortran Syntax - -```fortran -USE MPI -! or the older form: INCLUDE 'mpif.h' - -MPI_COMM_DELETE_ATTR(COMM, COMM_KEYVAL, IERROR) - INTEGER COMM, COMM_KEYVAL, IERROR -``` - -## Fortran 2008 Syntax - -```fortran -USE mpi_f08 - -MPI_Comm_delete_attr(comm, comm_keyval, ierror) - TYPE(MPI_Comm), INTENT(IN) :: comm - INTEGER, INTENT(IN) :: comm_keyval - INTEGER, OPTIONAL, INTENT(OUT) :: ierror -``` - -## Input/Output Parameter - -* `comm` : Communicator from which the attribute is deleted (handle). - -# Input Parameter - -* `comm_keyval` : Key value (integer). - -# Output Parameter - -* `IERROR` : Fortran only: Error status (integer). - -# Description - -`MPI_Comm_delete_attr` deletes an attribute from cache by key. This -function invokes the attribute delete function `delete_fn` specified when -the `comm_keyval` was created. The call will fail if the `delete_fn` -function returns an error code other than `MPI_SUCCESS.` -Whenever a communicator is replicated using the function `MPI_Comm_dup`, -all callback copy functions for attributes that are currently set are -invoked (in arbitrary order). Whenever a communicator is deleted using -the function `MPI_Comm_free`, all callback delete functions for attributes -that are currently set are invoked. -This function is the same as `MPI_Attr_delete` but is needed to match the -`comm`unicator-specific functions introduced in the MPI-2 standard. The -use of `MPI_Attr_delete` is deprecated. - -# Notes - -Note that it is not defined by the MPI standard what happens if the -`delete_fn` callback invokes other MPI functions. In Open MPI, it is not -valid for `delete_fn` callbacks (or any of their children) to add or -delete attributes on the same object on which the `delete_fn` callback is -being invoked. - -# Errors - -Almost all MPI routines return an error value; C routines as the value -of the function and Fortran routines in the last argument. -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for -I/O function errors. The error handler may be changed with -`MPI_Comm_set_errhandler`; the predefined error handler `MPI_ERRORS_RETURN` -may be used to cause error values to be returned. Note that MPI does not -guarantee that an MPI program can continue past an error. diff --git a/ompi/mpi/man/man3/MPI_Comm_disconnect.3.md b/ompi/mpi/man/man3/MPI_Comm_disconnect.3.md deleted file mode 100644 index a687499ebd1..00000000000 --- a/ompi/mpi/man/man3/MPI_Comm_disconnect.3.md +++ /dev/null @@ -1,80 +0,0 @@ -# Name - -`MPI_Comm_disconnect` - Deallocates communicator object and sets -handle to `MPI_COMM_NULL.` - -# Syntax - -## C Syntax - -```c -#include - -int MPI_Comm_disconnect(MPI_Comm *comm) -``` - -## Fortran Syntax - -```fortran -USE MPI -! or the older form: INCLUDE 'mpif.h' - -MPI_COMM_DISCONNECT(COMM, IERROR) - INTEGER COMM, IERROR -``` - -## Fortran 2008 Syntax - -```fortran -USE mpi_f08 - -MPI_Comm_disconnect(comm, ierror) - TYPE(MPI_Comm), INTENT(INOUT) :: comm - INTEGER, OPTIONAL, INTENT(OUT) :: ierror -``` - -## Input/Output Parameter - -* `comm` : Communicator (handle). - -# Output Parameter - -* `IERROR` : Fortran only: Error status (integer). - -# Description - -`MPI_Comm_disconnect` waits for all pending communication on `comm` to -complete internally, deallocates the communicator object, and sets the -handle to `MPI_COMM_NULL`. It is a collective operation. -It may not be called with the communicator `MPI_COMM_WORLD` or -`MPI_COMM_SELF.` -`MPI_Comm_disconnect` may be called only if all communication is complete -and matched, so that buffered data can be delivered to its destination. -This requirement is the same as for `MPI_Finalize.` -`MPI_Comm_disconnect` has the same action as `MPI_Comm_free`, except that it -waits for pending communication to finish internally and enables the -guarantee about the behavior of disconnected processes. - -# Notes - -To disconnect two processes you may need to call `MPI_Comm_disconnect,` -`MPI_Win_free`, and `MPI_File_close` to remove all communication paths -between the two processes. Note that it may be necessary to disconnect -several communicators (or to free several windows or files) before two -processes are completely independent. - -# Errors - -Almost all MPI routines return an error value; C routines as the value -of the function and Fortran routines in the last argument. -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for -I/O function errors. The error handler may be changed with -`MPI_Comm_set_errhandler`; the predefined error handler `MPI_ERRORS_RETURN` -may be used to cause error values to be returned. Note that MPI does not -guarantee that an MPI program can continue past an error. - -# See Also - -[`MPI_Comm_connect`(3)](MPI_Comm_connect.html) -[`MPI_Comm_accept`(3)](MPI_Comm_accept.html) diff --git a/ompi/mpi/man/man3/MPI_Comm_dup.3.md b/ompi/mpi/man/man3/MPI_Comm_dup.3.md deleted file mode 100644 index 8c0938c0e03..00000000000 --- a/ompi/mpi/man/man3/MPI_Comm_dup.3.md +++ /dev/null @@ -1,89 +0,0 @@ -# Name - -`MPI_Comm_dup` - Duplicates an existing communicator with all its -cached information. - -# Syntax - -## C Syntax - -```c -#include - -int MPI_Comm_dup(MPI_Comm comm, MPI_Comm *newcomm) -``` - -## Fortran Syntax - -```fortran -USE MPI -! or the older form: INCLUDE 'mpif.h' - -MPI_COMM_DUP(COMM, NEWCOMM, IERROR) - INTEGER COMM, NEWCOMM, IERROR -``` - -## Fortran 2008 Syntax - -```fortran -USE mpi_f08 - -MPI_Comm_dup(comm, newcomm, ierror) - TYPE(MPI_Comm), INTENT(IN) :: comm - TYPE(MPI_Comm), INTENT(OUT) :: newcomm - INTEGER, OPTIONAL, INTENT(OUT) :: ierror -``` - -# Input Parameter - -* `comm` : Communicator (handle). - -# Output Parameters - -* `newcomm` : Copy of comm (handle). -* `IERROR` : Fortran only: Error status (integer). - -# Description - -`MPI_Comm_dup` duplicates the existing communicator `comm` with associated -key values. For each key value, the respective copy callback function -determines the attribute value associated with this key in the new -communicator; one particular action that a copy callback may take is to -delete the attribute from the new communicator. Returns in newcomm a new -communicator with the same group, any copied cached information, but a -new context (see Section 5.7.1 of the MPI-1 Standard, -"Functionality"). - -# Notes - -This operation is used to provide a parallel library call with a -duplicate communication space that has the same properties as the -original communicator. This includes any attributes (see below) and -topologies (see Chapter 6, "Process Topologies," in the MPI-1 -Standard). This call is valid even if there are pending point-to-point -communications involving the communicator `comm`. A typical call might -involve an `MPI_Comm_dup` at the beginning of the parallel call, and an -`MPI_Comm_free` of that duplicated communicator at the end of the call. -Other models of communicator management are also possible. -This call applies to both intra- and intercommunicators. -Note that it is not defined by the MPI standard what happens if the -attribute copy callback invokes other MPI functions. In Open MPI, it is -not valid for attribute copy callbacks (or any of their children) to add -or delete attributes on the same object on which the attribute copy -callback is being invoked. - -# Errors - -Almost all MPI routines return an error value; C routines as the value -of the function and Fortran routines in the last argument. -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for -I/O function errors. The error handler may be changed with -`MPI_Comm_set_errhandler`; the predefined error handler `MPI_ERRORS_RETURN` -may be used to cause error values to be returned. Note that MPI does not -guarantee that an MPI program can continue past an error. - -# See Also - -[`MPI_Comm_dup_with_info`(3)](MPI_Comm_dup_with_info.html) -[`MPI_Comm_idup`(3)](MPI_Comm_idup.html) diff --git a/ompi/mpi/man/man3/MPI_Comm_dup_with_info.3.md b/ompi/mpi/man/man3/MPI_Comm_dup_with_info.3.md deleted file mode 100644 index 63102673ec5..00000000000 --- a/ompi/mpi/man/man3/MPI_Comm_dup_with_info.3.md +++ /dev/null @@ -1,89 +0,0 @@ -# Name - -`MPI_Comm_dup_with_info` - Duplicates an existing communicator using -provided info. - -# Syntax - -## C Syntax - -```c -#include - -int MPI_Comm_dup_with_info(MPI_Comm comm, MPI_Info info, MPI_Comm *newcomm) -``` - -## Fortran Syntax - -```fortran -USE MPI -! or the older form: INCLUDE 'mpif.h' - -MPI_COMM_DUP_WITH_INFO(COMM, INFO, NEWCOMM, IERROR) - INTEGER COMM, INFO, NEWCOMM, IERROR -``` - -## Fortran 2008 Syntax - -```fortran -USE mpi_f08 - -MPI_Comm_dup_with_info(comm, info, newcomm, ierror) - TYPE(MPI_Comm), INTENT(IN) :: comm - TYPE(MPI_Info), INTENT(IN) :: info - TYPE(MPI_Comm), INTENT(OUT) :: newcomm - INTEGER, OPTIONAL, INTENT(OUT) :: ierror -``` - -# Input Parameter - -* `comm` : Communicator (handle). -* `info` : Info argument (handle). - -# Output Parameters - -* `newcomm` : Copy of comm (handle). -* `IERROR` : Fortran only: Error status (integer). - -# Description - -`MPI_Comm_dup_with_info` acts exactly like `MPI_Comm_dup` except that the -`info` hints associated with the communicator `comm` are not duplicated in -`newcomm`. The hints provided by the argument `info` are associated with -the output communicator `newcomm` instead. -See `MPI_Comm_set_info(3)` for the list of recognized `info` keys. - -# Notes - -This operation is used to provide a parallel library call with a -duplicate communication space that has the same properties as the -original communicator. This includes any attributes (see below) and -topologies (see Chapter 6, "Process Topologies," in the MPI-1 -Standard). This call is valid even if there are pending point-to-point -communications involving the communicator `comm`. A typical call might -involve an `MPI_Comm_dup_with_info` at the beginning of the parallel call, -and an `MPI_Comm_free` of that duplicated communicator at the end of the -call. Other models of communicator management are also possible. -This call applies to both intra- and intercommunicators. -Note that it is not defined by the MPI standard what happens if the -attribute copy callback invokes other MPI functions. In Open MPI, it is -not valid for attribute copy callbacks (or any of their children) to add -or delete attributes on the same object on which the attribute copy -callback is being invoked. - -# Errors - -Almost all MPI routines return an error value; C routines as the value -of the function and Fortran routines in the last argument. -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for -I/O function errors. The error handler may be changed with -`MPI_Comm_set_errhandler`; the predefined error handler `MPI_ERRORS_RETURN` -may be used to cause error values to be returned. Note that MPI does not -guarantee that an MPI program can continue past an error. - -# See Also - -[`MPI_Comm_dup`(3)](MPI_Comm_dup.html) -[`MPI_Comm_idup`(3)](MPI_Comm_idup.html) -[`MPI_Comm_set_info`(3)](MPI_Comm_set_info.html) diff --git a/ompi/mpi/man/man3/MPI_Comm_f2c.3in b/ompi/mpi/man/man3/MPI_Comm_f2c.3in deleted file mode 100644 index 687990fb9f2..00000000000 --- a/ompi/mpi/man/man3/MPI_Comm_f2c.3in +++ /dev/null @@ -1,49 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Comm_f2c 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Comm_f2c, MPI_Comm_c2f, MPI_File_f2c, MPI_File_c2f, MPI_Info_f2c, MPI_Info_c2f, MPI_Message_f2c, MPI_Message_c2f, MPI_Op_f2c, MPI_Op_c2f, MPI_Request_f2c, MPI_Request_c2f, MPI_Type_f2c, MPI_Type_c2f, MPI_Win_f2c, MPI_Win_c2f \fP \- Translates a C handle into a Fortran handle, or vice versa. - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -MPI_Comm MPI_Comm_f2c(MPI_Fint \fIcomm\fP) -MPI_Fint MPI_Comm_c2f(MPI_Comm \fIcomm\fP) - -MPI_File MPI_File_f2c(MPI_Fint \fIfile\fP) -MPI_Fint MPI_File_c2f(MPI_File \fIfile\fP) - -MPI_Group MPI_Group_f2c(MPI Fint \fIgroup\fP) -MPI_Fint MPI_Group_c2f(MPI Group \fIgroup\fP) - -MPI_Info MPI_Info_f2c(MPI_Fint \fIinfo\fP) -MPI_Fint MPI_Info_c2f(MPI_Info \fIinfo\fP) - -MPI_Message MPI_Message_f2c(MPI_Fint \fImessage\fP) -MPI_Fint MPI_Message_c2f(MPI_Message \fImessage\fP) - -MPI_Op MPI_Op_f2c(MPI_Fint \fIop\fP) -MPI_Fint MPI_Op_c2f(MPI_Op \fIop\fP) - -MPI_Request MPI_Request_f2c(MPI_Fint \fIrequest\fP) -MPI_Fint MPI_Request_c2f(MPI_Request \fIrequest\fP) - -MPI_Datatype MPI_Type_f2c(MPI_Fint \fIdatatype\fP) -MPI_Fint MPI_Type_c2f(MPI_Datatype \fIdatatype\fP) - -MPI_Win MPI_Win_f2c(MPI_Fint \fIwin\fP) -MPI_Fint MPI_Win_c2f(MPI_Win \fIwin\fP) - -.fi -.SH DESCRIPTION -.ft R -Handles are passed between Fortran and C by using an explicit C wrapper to convert Fortran handles to C handles. There is no direct access to C handles in Fortran. The type definition \fIMPI_Fint\fP is provided in C for an integer of the size that matches a Fortran \fIINTEGER\fP; usually, \fIMPI_Fint\fP will be equivalent to \fIint\fP. The handle translation functions are provided in C to convert from a Fortran handle (which is an integer) to a C handle, and vice versa. -.PP -For example, if \fIcomm\fP is a valid Fortran handle to a communicator, then MPI_Comm_f2c returns a valid C handle to that same communicator; if \fIcomm\fP = MPI_COMM_NULL (Fortran value), then MPI_Comm_f2c returns a null C handle; if \fIcomm\fP is an invalid Fortran handle, then MPI_Comm_f2c returns an invalid C handle. -.SH NOTE -This function does not return an error value. Consequently, the result of calling it before MPI_Init or after MPI_Finalize is undefined. diff --git a/ompi/mpi/man/man3/MPI_Comm_free.3in b/ompi/mpi/man/man3/MPI_Comm_free.3in deleted file mode 100644 index 9fa51cd0a7b..00000000000 --- a/ompi/mpi/man/man3/MPI_Comm_free.3in +++ /dev/null @@ -1,69 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright (c) 2010-2014 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Comm_free 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Comm_free \fP \- Mark a communicator object for deallocation. - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -int MPI_Comm_free(MPI_Comm *\fIcomm\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_COMM_FREE(\fICOMM, IERROR\fP) - INTEGER \fICOMM, IERROR\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Comm_free(\fIcomm\fP, \fIierror\fP) - TYPE(MPI_Comm), INTENT(INOUT) :: \fIcomm\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETER -.ft R -.TP 1i -comm -Communicator to be destroyed (handle). - -.SH OUTPUT PARAMETER -.ft R -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -This operation marks the communicator object for deallocation. The handle is set to MPI_COMM_NULL. Any pending operations that use this communicator will complete normally; the object is actually deallocated only if there are no other active references to it. This call applies to intracommunicators and intercommunicators. Upon actual deallocation, the delete callback functions for all cached attributes (see Section 5.7 in the MPI-1 Standard, "Caching") are called in arbitrary order. - - -.SH NOTES -Note that it is not defined by the MPI standard what happens if the -delete_fn callback invokes other MPI functions. In Open MPI, it is -not valid for delete_fn callbacks (or any of their children) to add or -delete attributes on the same object on which the delete_fn callback -is being invoked. - - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for I/O function errors. The error handler may be changed with MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN may be used to cause error values to be returned. Note that MPI does not guarantee that an MPI program can continue past an error. - -.SH SEE ALSO -.ft R -.nf -MPI_Comm_delete_attr diff --git a/ompi/mpi/man/man3/MPI_Comm_free_keyval.3in b/ompi/mpi/man/man3/MPI_Comm_free_keyval.3in deleted file mode 100644 index 38abe527116..00000000000 --- a/ompi/mpi/man/man3/MPI_Comm_free_keyval.3in +++ /dev/null @@ -1,63 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Comm_free_keyval 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Comm_free_keyval\fP \- Frees attribute key for communicator cache attribute. - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -int MPI_Comm_free_keyval(int *\fIcomm_keyval\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_COMM_FREE_KEYVAL(\fICOMM_KEYVAL, IERROR\fP) - INTEGER \fICOMM_KEYVAL, IERROR \fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Comm_free_keyval(\fIcomm_keyval\fP, \fIierror\fP) - INTEGER, INTENT(INOUT) :: \fIcomm_keyval\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT/OUTPUT PARAMETER -.ft R -.TP 1i -comm_keyval - -.SH OUTPUT PARAMETER -.ft R -.TP 1i -IERROR -Fortran only: Error status (integer). - - -.SH DESCRIPTION -.ft R -MPI_Comm_free_keyval frees an extant attribute key. This function sets the value of \fIkeyval\fP to MPI_KEYVAL_INVALID. Note that it is not erroneous to free an attribute key that is in use, because the actual free does not transpire until after all references (in other communicators on the process) to the key have been freed. These references need to be explicitly freed by the program, either via calls to MPI_Comm_delete_attr that free one attribute instance, or by calls to MPI_Comm_free that free all attribute instances associated with the freed communicator. -.sp -This call is identical to the call MPI_Keyval_free but is needed to match the communicator-specific creation function introduced in the MPI-2 standard. The use of MPI_Keyval_free is deprecated. - - -.SH NOTES -.ft R -Key values are global (they can be used with any and all communicators). - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for I/O function errors. The error handler may be changed with MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN may be used to cause error values to be returned. Note that MPI does not guarantee that an MPI program can continue past an error. - diff --git a/ompi/mpi/man/man3/MPI_Comm_get_attr.3in b/ompi/mpi/man/man3/MPI_Comm_get_attr.3in deleted file mode 100644 index 50766d01f9e..00000000000 --- a/ompi/mpi/man/man3/MPI_Comm_get_attr.3in +++ /dev/null @@ -1,86 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Comm_get_attr 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Comm_get_attr\fP \- Retrieves attribute value by key. - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -int MPI_Comm_get_attr(MPI_Comm \fIcomm\fP, int \fIcomm_keyval\fP, - void *\fIattribute_val\fP, int *\fIflag\fP) - -.fi -.SH Fortran Syntax (see FORTRAN 77 NOTES) -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_COMM_GET_ATTR(\fICOMM, COMM_KEYVAL, ATTRIBUTE_VAL, FLAG, IERROR\fP) - INTEGER \fICOMM, COMM_KEYVAL, IERROR \fP - INTEGER(KIND=MPI_ADDRESS_KIND) \fIATTRIBUTE_VAL\fP - LOGICAL \fIFLAG\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Comm_get_attr(\fIcomm\fP, \fIcomm_keyval\fP, \fIattribute_val\fP, \fIflag\fP, \fIierror\fP) - TYPE(MPI_Comm), INTENT(IN) :: \fIcomm\fP - INTEGER, INTENT(IN) :: \fIcomm_keyval\fP - INTEGER(KIND=MPI_ADDRESS_KIND), INTENT(OUT) :: \fIattribute_val\fP - LOGICAL, INTENT(OUT) :: \fIflag\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETERS -.ft R -.TP 1i -comm -Communicator to which the attribute is attached (handle). -.TP 1i -comm_keyval -Key value (integer). - -.SH OUTPUT PARAMETER -.ft R -.TP 1i -attribute_val -Attribute value, unless f\fIlag\fP = false. -.TP 1i -flag -False if no attribute is associated with the key (logical). -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -MPI_Comm_get_attr retrieves an attribute value by key. The call is erroneous if there is no key with value \fIkeyval\fP. On the other hand, the call is correct if the key value exists, but no attribute is attached on \fIcomm\fP for that key; in that case, the call returns \fIflag\fP = false. In particular, MPI_KEYVAL_INVALID is an erroneous key value. -.sp -This function replaces MPI_Attr_get, the use of which is deprecated. The C binding is identical. The Fortran binding differs in that \fIattribute_val\fP is an address-sized integer. - -.SH FORTRAN 77 NOTES -.ft R -The MPI standard prescribes portable Fortran syntax for -the \fIATTRIBUTE_VAL\fP argument only for Fortran 90. Sun FORTRAN 77 -users may use the non-portable syntax -.sp -.nf - INTEGER*MPI_ADDRESS_KIND \fIATTRIBUTE_VAL\fP -.fi -.sp -where MPI_ADDRESS_KIND is a constant defined in mpif.h -and gives the length of the declared integer in bytes. - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for I/O function errors. The error handler may be changed with MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN may be used to cause error values to be returned. Note that MPI does not guarantee that an MPI program can continue past an error. - diff --git a/ompi/mpi/man/man3/MPI_Comm_get_errhandler.3in b/ompi/mpi/man/man3/MPI_Comm_get_errhandler.3in deleted file mode 100644 index ec5894569c2..00000000000 --- a/ompi/mpi/man/man3/MPI_Comm_get_errhandler.3in +++ /dev/null @@ -1,65 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Comm_get_errhandler 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Comm_get_errhandler \fP \- Retrieves error handler associated with a communicator. - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -int MPI_Comm_get_errhandler(MPI_Comm \fIcomm\fP, - MPI_Errhandler *\fIerrhandler\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_COMM_GET_ERRHANDLER(\fICOMM, ERRHANDLER, IERROR\fP) - INTEGER \fICOMM, ERRHANDLER, IERROR\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Comm_get_errhandler(\fIcomm\fP, \fIerrhandler\fP, \fIierror\fP) - TYPE(MPI_Comm), INTENT(IN) :: \fIcomm\fP - TYPE(MPI_Errhandler), INTENT(OUT) :: \fIerrhandler\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETER -.ft R -.TP 1i -comm -Communicator (handle). - -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -errhandler -New error handler for communicator (handle). -.ft R -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -MPI_Comm_get_errhandler retrieves the error handler currently associated with a communicator. This call is identical to MPI_Errhandler_get, the use of which is deprecated. - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for I/O function errors. The error handler may be changed with MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN may be used to cause error values to be returned. Note that MPI does not guarantee that an MPI program can continue past an error. -.sp -See the MPI man page for a full list of MPI error codes. - - diff --git a/ompi/mpi/man/man3/MPI_Comm_get_info.3in b/ompi/mpi/man/man3/MPI_Comm_get_info.3in deleted file mode 100644 index 09067e00126..00000000000 --- a/ompi/mpi/man/man3/MPI_Comm_get_info.3in +++ /dev/null @@ -1,76 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines -.\" $COPYRIGHT$ -.TH MPI_Comm_get_info 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Comm_get_info\fP \- Retrieves active communicator info hints -. -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -int MPI_Comm_get_info(MPI_Comm \fIcomm\fP, MPI_Info \fI*info_used\fP) -. -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_COMM_GET_INFO(\fICOMM, INFO_USED, IERROR\fP) - INTEGER \fICOMM, INFO_USED, IERROR \fP -. -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Comm_get_info(\fIcomm\fP, \fIinfo_used\fP, \fIierror\fP) - TYPE(MPI_Comm), INTENT(IN) :: \fIcomm\fP - TYPE(MPI_Info), INTENT(OUT) :: \fIinfo_used\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETERS -.ft R -.TP 1i -comm -Communicator from which to receive active info hints -. -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -info_used -New info object returned with all active hints on this communicator. -.TP 1i -IERROR -Fortran only: Error status (integer). -. -.SH DESCRIPTION -.ft R -MPI_Comm_get_info returns a new info object containing the hints of -the communicator associated with -.IR comm . -The current setting of all hints actually used by the system related -to this communicator is returned in -.IR info_used . -If no such hints exist, a handle to a newly created info object is -returned that contains no key/value pair. The user is responsible for -freeing info_used via MPI_Info_free. -. -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value -of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for -I/O function errors. The error handler may be changed with -MPI_Comm_set_errhandler; the predefined error handler -MPI_ERRORS_RETURN may be used to cause error values to be -returned. Note that MPI does not guarantee that an MPI program can -continue past an error. -. -.SH SEE ALSO -MPI_Comm_get_info, -MPI_Info_free diff --git a/ompi/mpi/man/man3/MPI_Comm_get_name.3in b/ompi/mpi/man/man3/MPI_Comm_get_name.3in deleted file mode 100644 index 3ff46aab501..00000000000 --- a/ompi/mpi/man/man3/MPI_Comm_get_name.3in +++ /dev/null @@ -1,74 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Comm_get_name 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Comm_get_name\fP \- Returns the name that was most recently associated with a communicator. - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -int MPI_Comm_get_name(MPI_Comm \fIcomm\fP, char *\fIcomm_name\fP, int *\fIresultlen\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_COMM_GET_NAME(\fICOMM, COMM_NAME, RESULTLEN, IERROR\fP) - INTEGER \fICOMM, RESULTLEN, IERROR \fP - CHARACTER*(*) \fICOMM_NAME\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Comm_get_name(\fIcomm\fP, \fIcomm_name\fP, \fIresultlen\fP, \fIierror\fP) - TYPE(MPI_Comm), INTENT(IN) :: \fIcomm\fP - CHARACTER(LEN=MPI_MAX_OBJECT_NAME), INTENT(OUT) :: \fIcomm_name\fP - INTEGER, INTENT(OUT) :: \fIresultlen\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETER -.ft R -.TP 1i -comm -Communicator the name of which is to be returned (handle). -.TP 1i - -.SH OUTPUT PARAMETER -.ft R -.TP 1i -comm_name -Name previously stored on the communicator, or an empty string if no such name exists (string). -.TP 1i -resultlen -Length of returned name (integer). -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -MPI_Comm_get_name returns the last name that was previously associated with the given communicator. The name may be set and retrieved from any language. The same name will be returned independent of the language used. \fIcomm_name\fP should be allocated so that it can hold a resulting string of length MPI_MAX_OBJECT_NAME characters. MPI_Comm_get_name returns a copy of the set name in \fIcomm_name\fP. -.sp -If the user has not associated a name with a communicator, or an error occurs, MPI_Comm_get_name will return an empty string (all spaces in Fortran, "" in C). The three predefined communicators will have predefined names associated with them. Thus, the names of MPI_COMM_WORLD, MPI_COMM_SELF, and MPI_COMM_PARENT will have the default of MPI_COMM_WORLD, MPI_COMM_SELF, and MPI_COMM_PARENT. The fact that the system may have chosen to give a default name to a communicator does not prevent the user from setting a name on the same communicator; doing this removes the old name and assigns the new one. - -.SH NOTES -.ft R -It is safe simply to print the string returned by MPI_Comm_get_name, as it is always a valid string even if there was no name. -.sp -Note that associating a name with a communicator has no effect on the semantics of an MPI program, and will (necessarily) increase the store requirement of the program, since the names must be saved. Therefore, there is no requirement that users use these functions to associate names with communicators. However debugging and profiling MPI applications may be made easier if names are associated with communicators, since the debugger or profiler should then be able to present information in a less cryptic manner. - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for I/O function errors. The error handler may be changed with MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN may be used to cause error values to be returned. Note that MPI does not guarantee that an MPI program can continue past an error. - diff --git a/ompi/mpi/man/man3/MPI_Comm_get_parent.3in b/ompi/mpi/man/man3/MPI_Comm_get_parent.3in deleted file mode 100644 index faf3134e378..00000000000 --- a/ompi/mpi/man/man3/MPI_Comm_get_parent.3in +++ /dev/null @@ -1,68 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Comm_get_parent 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Comm_get_parent\fP \- Returns the parent intercommunicator of current spawned process. - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -int MPI_Comm_get_parent(MPI_Comm *\fIparent\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_COMM_GET_PARENT(\fIPARENT, IERROR\fP) - INTEGER \fIPARENT, IERROR \fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Comm_get_parent(\fIparent\fP, \fIierror\fP) - TYPE(MPI_Comm), INTENT(OUT) :: \fIparent\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -parent -The parent communicator (handle). -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -If a process was started with MPI_Comm_spawn or MPI_Comm_spawn_multiple, MPI_Comm_get_parent returns the "parent" intercommunicator of the current process. This parent intercommunicator is created implicitly inside of MPI_Init and is the same intercommunicator returned by the spawn call made in the parents. -.sp -If the process was not spawned, MPI_Comm_get_parent returns MPI_COMM_NULL. -.sp -After the parent communicator is freed or disconnected, MPI_Comm_get_parent returns MPI_COMM_NULL. - -.SH NOTES -.ft R -MPI_Comm_get_parent returns a handle to a single intercommunicator. Calling MPI_Comm_get_parent a second time returns a handle to the same intercommunicator. Freeing the handle with MPI_Comm_disconnect or MPI_Comm_free will cause other references to the intercommunicator to become invalid (dangling). Note that calling MPI_Comm_free on the parent communicator is not useful. - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for I/O function errors. The error handler may be changed with MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN may be used to cause error values to be returned. Note that MPI does not guarantee that an MPI program can continue past an error. - -.SH SEE ALSO -.ft R -.sp -.nf -MPI_Comm_spawn -MPI_Comm_spawn_multiple - diff --git a/ompi/mpi/man/man3/MPI_Comm_group.3in b/ompi/mpi/man/man3/MPI_Comm_group.3in deleted file mode 100644 index 421b83d2c21..00000000000 --- a/ompi/mpi/man/man3/MPI_Comm_group.3in +++ /dev/null @@ -1,61 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Comm_group 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Comm_group \fP \- Returns the group associated with a communicator. - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -int MPI_Comm_group(MPI_Comm \fIcomm\fP, MPI_Group *\fIgroup\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_COMM_GROUP(\fICOMM, GROUP, IERROR\fP) - INTEGER \fICOMM, GROUP, IERROR\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Comm_group(\fIcomm\fP, \fIgroup\fP, \fIierror\fP) - TYPE(MPI_Comm), INTENT(IN) :: \fIcomm\fP - TYPE(MPI_Group), INTENT(OUT) :: \fIgroup\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETER -.ft R -.TP 1i -comm -Communicator. - -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -group -Group in communicator (handle). -.ft R -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -If the communicator is an intercommunicator (enables communication between two groups of processes), this function returns the local group. To return the remote group, use the MPI_Comm_remote_group function. - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for I/O function errors. The error handler may be changed with MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN may be used to cause error values to be returned. Note that MPI does not guarantee that an MPI program can continue past an error. - diff --git a/ompi/mpi/man/man3/MPI_Comm_idup.3in b/ompi/mpi/man/man3/MPI_Comm_idup.3in deleted file mode 100644 index b13dea4066b..00000000000 --- a/ompi/mpi/man/man3/MPI_Comm_idup.3in +++ /dev/null @@ -1,85 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2013 Los Alamos National Security, LLC. All rights reserved. -.\" Copyright (c) 2010-2014 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" $COPYRIGHT$ -.TH MPI_Comm_idup 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Comm_idup \fP \- Start the nonblocking duplication of an existing communicator with all its cached information. - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -int MPI_Comm_idup(MPI_Comm \fIcomm\fP, MPI_Comm\fI *newcomm\fP, MPI_Request\fI *request\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_COMM_IDUP(\fICOMM, NEWCOMM, REQUEST, IERROR\fP) - INTEGER \fICOMM, NEWCOMM, REQUEST, IERROR\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Comm_idup(\fIcomm\fP, \fInewcomm\fP, \fIrequest\fP, \fIierror\fP) - TYPE(MPI_Comm), INTENT(IN) :: \fIcomm\fP - TYPE(MPI_Comm), INTENT(OUT) :: \fInewcomm\fP - TYPE(MPI_Request), INTENT(OUT) :: \fIrequest\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETER -.ft R -.TP 1i -comm -Communicator (handle). - -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -newcomm -Copy of comm (handle). -.ft R -.TP 1i -request -Communication request (handle). -.ft R -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -MPI_Comm_idup starts the nonblocking duplication of an existing communicator comm with associated key -values. For each key value, the respective copy callback function determines the attribute value associated with this key in the new communicator; one particular action that a copy callback may take is to delete the attribute from the new communicator. Returns in newcomm a new communicator with the same group, any copied cached information, but a new context (see Section 5.7.1 of the MPI-1 Standard, "Functionality"). The communicator returned in \fInewcomm\fP will not be available until the request is complete. -.sp -The completion of a communicator duplication request can be determined by calling any of MPI_Wait, MPI_Waitany, MPI_Test, or MPI_Testany with the request returned by this function. - -.SH NOTES -This operation is used to provide a parallel -library call with a duplicate communication space that has the same properties as the original communicator. This includes any attributes (see below) and topologies (see Chapter 6, "Process Topologies," in the MPI-1 Standard). This call is valid even if there are pending point-to-point communications involving the communicator comm. A typical call might involve an MPI_Comm_idup at the beginning of the parallel call, and an MPI_Comm_free of that duplicated communicator at the end of the call. Other models of communicator management are also possible. -.sp -This call applies to both intra- and intercommunicators. - -Note that it is not defined by the MPI standard what happens if the -attribute copy callback invokes other MPI functions. In Open MPI, it -is not valid for attribute copy callbacks (or any of their children) -to add or delete attributes on the same object on which the attribute -copy callback is being invoked. - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for I/O function errors. The error handler may be changed with MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN may be used to cause error values to be returned. Note that MPI does not guarantee that an MPI program can continue past an error. - - -.SH SEE ALSO -MPI_Comm_dup -MPI_Comm_dup_with_info diff --git a/ompi/mpi/man/man3/MPI_Comm_idup_with_info.3in b/ompi/mpi/man/man3/MPI_Comm_idup_with_info.3in deleted file mode 100644 index 13690bf4200..00000000000 --- a/ompi/mpi/man/man3/MPI_Comm_idup_with_info.3in +++ /dev/null @@ -1,91 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright (c) 2021 Triad National Security, LLC. All rights reserved. -.\" Copyright 2013 Los Alamos National Security, LLC. All rights reserved. -.\" Copyright (c) 2010-2014 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2021 Triad National Security, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Comm_idup_with_info 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Comm_idup_with_info \fP \- Start the nonblocking duplication of an existing communicator with all its cached information. - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -int MPI_Comm_idup_with_info(MPI_Comm \fIcomm\fP, MPI_Info \fIinfo\fP, MPI_Comm\fI *newcomm\fP, MPI_Request\fI *request\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_COMM_IDUP_WITH_INFO(\fICOMM, INFO, NEWCOMM, REQUEST, IERROR\fP) - INTEGER \fICOMM, INFO, NEWCOMM, REQUEST, IERROR\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Comm_idup_with_info(\fIcomm\fP, \fIinfo\fP, \fInewcomm\fP, \fIrequest\fP, \fIierror\fP) - TYPE(MPI_Comm), INTENT(IN) :: \fIcomm\fP - TYPE(MPI_Info), INTENT(IN) :: \fIinfo\fP - TYPE(MPI_Comm), INTENT(OUT) :: \fInewcomm\fP - TYPE(MPI_Request), INTENT(OUT) :: \fIrequest\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETER -.ft R -.TP 1i -comm -Communicator (handle). -info -Info object (handle). - -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -newcomm -Copy of comm (handle). -.ft R -.TP 1i -request -Communication request (handle). -.ft R -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -MPI_Comm_idup_with_info starts the nonblocking duplication of an existing communicator comm with associated key -values. For each key value, the respective copy callback function determines the attribute value associated with this key in the new communicator; one particular action that a copy callback may take is to delete the attribute from the new communicator. Returns in newcomm a new communicator with the same group, any copied cached information, but a new context (see Section 5.7.1 of the MPI-1 Standard, "Functionality"). The communicator returned in \fInewcomm\fP will not be available until the request is complete. The hints provided by the supplied \fIinfo\fP argument are associated with the output communicator. -.sp -The completion of a communicator duplication request can be determined by calling any of MPI_Wait, MPI_Waitany, MPI_Test, or MPI_Testany with the request returned by this function. - -.SH NOTES -This operation is used to provide a parallel -library call with a duplicate communication space that has the same properties as the original communicator. This includes any attributes (see below) and topologies (see Chapter 6, "Process Topologies," in the MPI-1 Standard). This call is valid even if there are pending point-to-point communications involving the communicator comm. A typical call might involve an MPI_Comm_idup_with_info at the beginning of the parallel call, and an MPI_Comm_free of that duplicated communicator at the end of the call. Other models of communicator management are also possible. -.sp -This call applies to both intra- and intercommunicators. - -Note that it is not defined by the MPI standard what happens if the -attribute copy callback invokes other MPI functions. In Open MPI, it -is not valid for attribute copy callbacks (or any of their children) -to add or delete attributes on the same object on which the attribute -copy callback is being invoked. - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for I/O function errors. The error handler may be changed with MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN may be used to cause error values to be returned. Note that MPI does not guarantee that an MPI program can continue past an error. - - -.SH SEE ALSO -MPI_Comm_dup -MPI_Comm_idup -MPI_Comm_dup_with_info diff --git a/ompi/mpi/man/man3/MPI_Comm_join.3in b/ompi/mpi/man/man3/MPI_Comm_join.3in deleted file mode 100644 index ae493c9c783..00000000000 --- a/ompi/mpi/man/man3/MPI_Comm_join.3in +++ /dev/null @@ -1,104 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Comm_join 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" - -.SH NAME -\fBMPI_Comm_join\fP \- Establishes communication between MPI jobs - -.SH SYNTAX -.ft R - -.SH C Syntax -.nf -#include -int MPI_Comm_join(int \fIfd\fP, MPI_Comm *\fIintercomm\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_COMM_JOIN(\fIFD, INTERCOMM, IERROR\fP) - INTEGER \fIFD, INTERCOMM, IERROR\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Comm_join(\fIfd\fP, \fIintercomm\fP, \fIierror\fP) - INTEGER, INTENT(IN) :: \fIfd\fP - TYPE(MPI_Comm), INTENT(OUT) :: \fIintercomm\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETER -.ft R -.TP 1i -fd -socket file descriptor (socket). - -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -intercomm -Intercommunicator between processes (handle). -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -MPI_Comm_join creates an intercommunicator from the union of two MPI -processes that are connected by a socket. \fIfd\fP is a file -descriptor representing a socket of type SOCK_STREAM (a two-way -reliable byte-stream connection). Nonblocking I/O and asynchronous -notification via SIGIO must not be enabled for the socket. The socket -must be in a connected state, and must be quiescent when MPI_Comm_join -is called. -.sp -MPI_Comm_join must be called by the process at each end of the -socket. It does not return until both processes have called -MPI_Comm_join. - -.SH NOTES -.ft R -There are no MPI library calls for opening and manipulating a socket. -The socket \fIfd\fP can be opened using standard socket API calls. -MPI uses the socket to bootstrap creation of the intercommunicator, -and for nothing else. Upon return, the file descriptor will be open -and quiescent. -.sp -In a multithreaded process, the application must ensure that other -threads do not access the socket while one is in the midst of -calling MPI_Comm_join. -.sp -The returned communicator will contain the two processes connected by -the socket, and may be used to establish MPI communication with -additional processes, through the usual MPI communicator-creation -mechanisms. - -.SH ERRORS -.ft R -Almost all MPI routines return an error value; C routines as -the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for -I/O function errors. The error handler may be changed with -MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN -may be used to cause error values to be returned. Note that MPI does not -guarantee that an MPI program can continue past an error. -.sp -See the MPI man page for a full list of MPI error codes. - -.SH SEE ALSO -.ft R -.nf -socket(3SOCKET) -MPI_Comm_create -MPI_Comm_group - diff --git a/ompi/mpi/man/man3/MPI_Comm_rank.3in b/ompi/mpi/man/man3/MPI_Comm_rank.3in deleted file mode 100644 index 7c8d0c0386b..00000000000 --- a/ompi/mpi/man/man3/MPI_Comm_rank.3in +++ /dev/null @@ -1,73 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Comm_rank 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Comm_rank\fP \- Determines the rank of the calling process in the communicator. - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -int MPI_Comm_rank(MPI_Comm \fIcomm\fP, int\fI *rank\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_COMM_RANK(\fICOMM, RANK, IERROR\fP) - INTEGER \fICOMM, RANK, IERROR\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Comm_rank(\fIcomm\fP, \fIrank\fP, \fIierror\fP) - TYPE(MPI_Comm), INTENT(IN) :: \fIcomm\fP - INTEGER, INTENT(OUT) :: \fIrank\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETERS -.ft R -.TP 1i -comm -Communicator (handle). - -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -rank -Rank of the calling process in group of comm (integer). -.ft R -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -This function gives the rank of the process in the -particular communicator's group. It is equivalent to accessing the -communicator's group with MPI_Comm_group, computing the rank using MPI_Group_rank, and then freeing the temporary group via MPI_Group_free. -.sp -Many programs will be written with the master-slave model, where one process (such as the rank-zero process) will play a supervisory role, and the other processes will serve as compute nodes. In this framework, MPI_Comm_size and MPI_Comm_rank are useful for determining the roles of the various processes of a communicator. - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for I/O function errors. The error handler may be changed with MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN may be used to cause error values to be returned. Note that MPI does not guarantee that an MPI program can continue past an error. - -.SH SEE ALSO -.ft R -.sp -.nf -MPI_Comm_group -MPI_Comm_size -MPI_Comm_compare - diff --git a/ompi/mpi/man/man3/MPI_Comm_remote_group.3in b/ompi/mpi/man/man3/MPI_Comm_remote_group.3in deleted file mode 100644 index 6a81efc8c0a..00000000000 --- a/ompi/mpi/man/man3/MPI_Comm_remote_group.3in +++ /dev/null @@ -1,72 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Comm_remote_group 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Comm_remote_group \fP \- Accesses the remote group associated with an intercommunicator. - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -int MPI_Comm_remote_group(MPI_Comm \fIcomm\fP, MPI_Group\fI *group\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_COMM_REMOTE_GROUP(\fICOMM, GROUP, IERROR\fP) - INTEGER \fICOMM, GROUP, IERROR\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Comm_remote_group(\fIcomm\fP, \fIgroup\fP, \fIierror\fP) - TYPE(MPI_Comm), INTENT(IN) :: \fIcomm\fP - TYPE(MPI_Group), INTENT(OUT) :: \fIgroup\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETER -.ft R -.TP 1i -comm -Communicator. - -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -group -Remote group of communicator. -.ft R -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -MPI_Comm_remote_group accesses the remote group associated with an intercommunicator. -.sp -The intercommunicator accessors (MPI_Comm_test_inter, MPI_Comm_remote_size, -MPI_Comm_remote_group) are all local operations. - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for I/O function errors. The error handler may be changed with MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN may be used to cause error values to be returned. Note that MPI does not guarantee that an MPI program can continue past an error. - -.SH SEE ALSO -.sp -.nf -MPI_Comm_test_inter -MPI_Comm_remote_size -MPI_Intercomm_create -MPI_Intercomm_merge - diff --git a/ompi/mpi/man/man3/MPI_Comm_remote_size.3in b/ompi/mpi/man/man3/MPI_Comm_remote_size.3in deleted file mode 100644 index 37b8e3b818f..00000000000 --- a/ompi/mpi/man/man3/MPI_Comm_remote_size.3in +++ /dev/null @@ -1,72 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Comm_remote_size 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Comm_remote_size \fP \- Determines the size of the remote group associated with an intercommunicator. - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -int MPI_Comm_remote_size(MPI_Comm \fIcomm\fP, int\fI *size\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_COMM_REMOTE_SIZE(\fICOMM, SIZE, IERROR\fP) - INTEGER \fICOMM, SIZE, IERROR\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Comm_remote_size(\fIcomm\fP, \fIsize\fP, \fIierror\fP) - TYPE(MPI_Comm), INTENT(IN) :: \fIcomm\fP - INTEGER, INTENT(OUT) :: \fIsize\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETER -.ft R -.TP 1i -comm -Communicator (handle). - -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -size -Number of processes in the remote group of comm (integer). -.ft R -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -MPI_Comm_remote_size determines the size of the remote group associated with an intercommunicator. -.sp -The intercommunicator accessors (MPI_Comm_test_inter, MPI_Comm_remote_size, MPI_Comm_remote_group) are all local operations. - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for I/O function errors. The error handler may be changed with MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN may be used to cause error values to be returned. Note that MPI does not guarantee that an MPI program can continue past an error. - -.SH SEE ALSO -.ft R -.sp -.nf -MPI_Comm_test_inter -MPI_Comm_remote_group -MPI_Intercomm_create -MPI_Intercomm_merge - diff --git a/ompi/mpi/man/man3/MPI_Comm_set_attr.3in b/ompi/mpi/man/man3/MPI_Comm_set_attr.3in deleted file mode 100644 index ec79d39e7e6..00000000000 --- a/ompi/mpi/man/man3/MPI_Comm_set_attr.3in +++ /dev/null @@ -1,92 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Comm_set_attr 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Comm_set_attr\fP \- Stores attribute value associated with a key. - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -int MPI_Comm_set_attr(MPI_Comm \fIcomm\fP, int \fIcomm_keyval\fP, void *\fIattribute_val\fP) - -.fi -.SH Fortran Syntax (see FORTRAN 77 NOTES) -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_COMM_SET_ATTR(\fICOMM, COMM_KEYVAL, ATTRIBUTE_VAL, IERROR\fP) - INTEGER \fICOMM, COMM_KEYVAL, IERROR \fP - INTEGER(KIND=MPI_ADDRESS_KIND) \fIATTRIBUTE_VAL\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Comm_set_attr(\fIcomm\fP, \fIcomm_keyval\fP, \fIattribute_val\fP, \fIierror\fP) - TYPE(MPI_Comm), INTENT(IN) :: \fIcomm\fP - INTEGER, INTENT(IN) :: \fIcomm_keyval\fP - INTEGER(KIND=MPI_ADDRESS_KIND), INTENT(IN) :: \fIattribute_val\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT/OUTPUT PARAMETER -.ft R -.TP 1i -comm -Communicator from which attribute will be attached (handle). - -.SH INPUT PARAMETERS -.ft R -.TP 1i -comm_keyval -Key value (integer). -.TP 1i -attribute_val -Attribute value. - -.SH OUTPUT PARAMETER -.ft R -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -MPI_Comm_set_attr stores the stipulated attribute value \fIattribute_val\fP for subsequent retrieval by MPI_Comm_get_attr. If the value is already present, then the outcome is as if MPI_Comm_delete_attr was first called to delete the previous value (and the callback function delete_fn was executed), and a new value was next stored. The call is erroneous if there is no key with value \fIcomm_keyval\fP; in particular MPI_KEYVAL_INVALID is an erroneous key value. The call will fail if the delete_fn function returned an error code other than MPI_SUCCESS. -.sp -This function replaces MPI_Attr_put, the use of which is deprecated. The C binding is identical. The Fortran binding differs in that \fIattribute_val\fP is an address-sized integer. - -.SH FORTRAN 77 NOTES -.ft R -The MPI standard prescribes portable Fortran syntax for -the \fIATTRIBUTE_VAL\fP argument only for Fortran 90. Sun FORTRAN 77 -users may use the non-portable syntax -.sp -.nf - INTEGER*MPI_ADDRESS_KIND \fIATTRIBUTE_VAL\fP -.fi -.sp -where MPI_ADDRESS_KIND is a constant defined in mpif.h -and gives the length of the declared integer in bytes. - -.SH NOTES -.ft R -Values of the permanent attributes MPI_TAG_UB, MPI_HOST, -MPI_IO, and MPI_WTIME_IS_GLOBAL may not be changed. -.sp -The type of the attribute value depends on whether C or Fortran is being used. In C, an attribute value is a pointer (void *); in Fortran, it is a single, address-size integer system for which a pointer does not fit in an integer. -.sp -If an attribute is already present, the delete function (specified when the corresponding keyval was created) will be called. - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for I/O function errors. The error handler may be changed with MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN may be used to cause error values to be returned. Note that MPI does not guarantee that an MPI program can continue past an error. - diff --git a/ompi/mpi/man/man3/MPI_Comm_set_errhandler.3in b/ompi/mpi/man/man3/MPI_Comm_set_errhandler.3in deleted file mode 100644 index 63da5416b78..00000000000 --- a/ompi/mpi/man/man3/MPI_Comm_set_errhandler.3in +++ /dev/null @@ -1,62 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Comm_set_errhandler 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Comm_set_errhandler \fP \- Attaches a new error handler to a communicator. - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -int MPI_Comm_set_errhandler(MPI_Comm \fIcomm\fP, - MPI_Errhandler \fIerrhandler\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_COMM_SET_ERRHANDLER(\fICOMM, ERRHANDLER, IERROR\fP) - INTEGER \fICOMM, ERRHANDLER, IERROR\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Comm_set_errhandler(\fIcomm\fP, \fIerrhandler\fP, \fIierror\fP) - TYPE(MPI_Comm), INTENT(IN) :: \fIcomm\fP - TYPE(MPI_Errhandler), INTENT(IN) :: \fIerrhandler\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT/OUTPUT PARAMETER -.ft R -.TP 1i -comm -Communicator (handle). - -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -errhandler -New error handler for communicator (handle). -.ft R -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -MPI_Comm_set_errhandler attaches a new error handler to a communicator. The error handler must be either a predefined error handler or an error handler created by a call to MPI_Comm_create_errhandler. This call is identical to MPI_Errhandler_set, the use of which is deprecated. - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for I/O function errors. The error handler may be changed with MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN may be used to cause error values to be returned. Note that MPI does not guarantee that an MPI program can continue past an error. - diff --git a/ompi/mpi/man/man3/MPI_Comm_set_info.3in b/ompi/mpi/man/man3/MPI_Comm_set_info.3in deleted file mode 100644 index 38bee95c823..00000000000 --- a/ompi/mpi/man/man3/MPI_Comm_set_info.3in +++ /dev/null @@ -1,103 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines -.\" $COPYRIGHT$ -.TH MPI_Comm_set_info 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Comm_set_info\fP \- Set communicator info hints -. -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -int MPI_Comm_set_info(MPI_Comm \fIcomm\fP, MPI_Info \fIinfo\fP) -. -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_COMM_SET_INFO(\fICOMM, INFO, IERROR\fP) - INTEGER \fICOMM, INFO, IERROR \fP -. -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Comm_set_info(\fIcomm\fP, \fIinfo\fP, \fIierror\fP) - TYPE(MPI_Comm), INTENT(IN) :: \fIcomm\fP - TYPE(MPI_Info), INTENT(IN) :: \fIinfo\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETERS -.ft R -.TP 1i -comm -Communicator on which to set info hints -.TP 1i -info -Info object containing hints to be set on -.I comm -. -.SH OUTPUT PARAMETERS -.TP 1i -IERROR -Fortran only: Error status (integer). -. -.SH DESCRIPTION -.ft R -MPI_COMM_SET_INFO sets new values for the hints of the communicator -associated with -.IR comm . -MPI_COMM_SET_INFO is a collective routine. The info object may be -different on each process, but any info entries that an implementation -requires to be the same on all processes must appear with the same -value in each process's -.I info -object. -.sp -The following info key assertions may be accepted by Open MPI: -.sp -\fImpi_assert_no_any_tag\fP (boolean): If set to true, then the -implementation may assume that the process will not use the -MPI_ANY_TAG wildcard on the given -communicator. -.sp -\fImpi_assert_no_any_source\fP (boolean): If set to true, then -the implementation may assume that the process will not use the -MPI_ANY_SOURCE wildcard on the given communicator. -.sp -\fImpi_assert_exact_length\fP (boolean): If set to true, then the -implementation may assume that the lengths of messages received by the -process are equal to the lengths of the corresponding receive buffers, -for point-to-point communication operations on the given communicator. -.sp -\fImpi_assert_allow_overtaking\fP (boolean): If set to true, then the -implementation may assume that point-to-point communications on the -given communicator do not rely on the non-overtaking rule specified in -MPI-3.1 Section 3.5. In other words, the application asserts that send -operations are not required to be matched at the receiver in the order -in which the send operations were performed by the sender, and receive -operations are not required to be matched in the order in which they -were performed by the receiver. -. -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value -of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for -I/O function errors. The error handler may be changed with -MPI_Comm_set_errhandler; the predefined error handler -MPI_ERRORS_RETURN may be used to cause error values to be -returned. Note that MPI does not guarantee that an MPI program can -continue past an error. -. -.SH SEE ALSO -MPI_Comm_get_info, -MPI_Info_create, -MPI_Info_set, -MPI_Info_free diff --git a/ompi/mpi/man/man3/MPI_Comm_set_name.3in b/ompi/mpi/man/man3/MPI_Comm_set_name.3in deleted file mode 100644 index 0605f7bf2c8..00000000000 --- a/ompi/mpi/man/man3/MPI_Comm_set_name.3in +++ /dev/null @@ -1,83 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2013 Los Alamos National Security, LLC. All rights reserved. -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Comm_set_name 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Comm_set_name\fP \- Associates a name with a communicator. - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -int MPI_Comm_set_name(MPI_Comm \fIcomm\fP, const char *\fIcomm_name\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_COMM_SET_NAME(\fICOMM, COMM_NAME, IERROR\fP) - INTEGER \fICOMM, IERROR \fP - CHARACTER*(*) \fICOMM_NAME\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Comm_set_name(\fIcomm\fP, \fIcomm_name\fP, \fIierror\fP) - TYPE(MPI_Comm), INTENT(IN) :: \fIcomm\fP - CHARACTER(LEN=*), INTENT(IN) :: \fIcomm_name\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT/OUTPUT PARAMETER -.ft R -.TP 1i -comm -Communicator whose identifier is to be set (handle). - -.SH INPUT PARAMETER -.ft R -.TP 1i -comm_name -Character string to be used as the identifier for the communicator (string). - -.SH OUTPUT PARAMETER -.ft R -.TP 1i -IERROR -Fortran only: Error status (integer). - - -.SH DESCRIPTION -.ft R -MPI_Comm_set_name allows a user to associate a name string with a communicator. The character string that is passed to MPI_Comm_set_name is saved inside the MPI library (so it can be freed by the caller immediately after the call, or allocated on the stack). Leading spaces in \fIname\fP are significant, but trailing ones are not. -.sp -MPI_Comm_set_name is a local (noncollective) operation, which affects only the name of the communicator as seen in the process that made the MPI_Comm_set_name call. There is no requirement that the same (or any) name be assigned to a communicator in every process where it exists. -.sp -The length of the name that can be stored is limited to the value of MPI_MAX_OBJECT_NAME in Fortran and MPI_MAX_OBJECT_NAME-1 in C (to allow for the null terminator). Attempts to set names longer than this will result in truncation of the name. MPI_MAX_OBJECT_NAME must have a value of at least 64. - - -.SH NOTES -.ft R -Since MPI_Comm_set_name is provided to help debug code, it is sensible to give the same name to a communicator in all of the processes where it exists, to avoid confusion. -.sp -Regarding name length, under circumstances of store exhaustion, an attempt to set a name of any length could fail; therefore, the value of MPI_MAX_OBJECT_NAME should be viewed only as a strict upper bound on the name length, not a guarantee that setting names of less than this length will always succeed. - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for I/O function errors. The error handler may be changed with MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN may be used to cause error values to be returned. Note that MPI does not guarantee that an MPI program can continue past an error. - -.SH SEE ALSO -.ft R -MPI_Comm_get_name -.sp - - diff --git a/ompi/mpi/man/man3/MPI_Comm_size.3in b/ompi/mpi/man/man3/MPI_Comm_size.3in deleted file mode 100644 index b0fc031a6a8..00000000000 --- a/ompi/mpi/man/man3/MPI_Comm_size.3in +++ /dev/null @@ -1,83 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Comm_size 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Comm_size \fP \- Returns the size of the group associated with a communicator. - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -int MPI_Comm_size(MPI_Comm \fIcomm\fP, int *\fIsize\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_COMM_SIZE(\fICOMM, SIZE, IERROR\fP) - INTEGER \fICOMM, SIZE, IERROR\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Comm_size(\fIcomm\fP, \fIsize\fP, \fIierror\fP) - TYPE(MPI_Comm), INTENT(IN) :: \fIcomm\fP - INTEGER, INTENT(OUT) :: \fIsize\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETER -.ft R -.TP 1i -comm -Communicator (handle). - -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -size -Number of processes in the group of comm (integer). -.ft R -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -This function indicates the number of processes involved in a -communicator. For MPI_COMM_WORLD, it indicates the total number of -processes available. This function is equivalent to accessing the -communicator's group with MPI_Comm_group, computing the size using -MPI_Group_size, and then freeing the temporary group via -MPI_Group_free. If the communicator is an inter-communicator (enables -communication between two groups), this function returns the size of -the local group. To return the size of the remote group, use the -MPI_Comm_remote_size function. -.sp -This call is often used with MPI_Comm_rank to determine the amount of concurrency available for a specific library or program. MPI_Comm_rank indicates the rank of the process that calls it in the range from 0 . . . size-1, where size is the return value of MPI_Comm_size. - -.SH NOTE -.ft R -MPI_COMM_NULL is not considered a valid argument to this function. - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for I/O function errors. The error handler may be changed with MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN may be used to cause error values to be returned. Note that MPI does not guarantee that an MPI program can continue past an error. - -.SH SEE ALSO -.ft R -.sp -.nf -MPI_Comm_group -MPI_Comm_rank -MPI_Comm_compare - diff --git a/ompi/mpi/man/man3/MPI_Comm_spawn.3in b/ompi/mpi/man/man3/MPI_Comm_spawn.3in deleted file mode 100644 index 8e0ced5b6e4..00000000000 --- a/ompi/mpi/man/man3/MPI_Comm_spawn.3in +++ /dev/null @@ -1,234 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2013 Los Alamos National Security, LLC. All rights reserved. -.\" Copyright (c) 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Comm_spawn 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Comm_spawn\fP \- Spawns a number of identical binaries. - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -int MPI_Comm_spawn(const char *\fIcommand\fP, char *\fIargv\fP[], int \fImaxprocs\fP, - MPI_Info \fIinfo\fP, int \fIroot\fP, MPI_Comm \fIcomm\fP, - MPI_Comm *\fIintercomm\fP, int \fIarray_of_errcodes\fP[]) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_COMM_SPAWN(\fICOMMAND, ARGV, MAXPROCS, INFO, ROOT, COMM, - INTERCOMM, ARRAY_OF_ERRCODES, IERROR\fP) - - CHARACTER*(*) \fICOMMAND, ARGV(*)\fP - INTEGER \fIINFO, MAXPROCS, ROOT, COMM, INTERCOMM, - ARRAY_OF_ERRCODES(*), IERROR\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Comm_spawn(\fIcommand\fP, \fIargv\fP, \fImaxprocs\fP, \fIinfo\fP, \fIroot\fP, \fIcomm\fP, \fIintercomm\fP, - \fIarray_of_errcodes\fP, \fIierror\fP) - CHARACTER(LEN=*), INTENT(IN) :: \fIcommand\fP, \fIargv(*)\fP - INTEGER, INTENT(IN) :: \fImaxprocs\fP, \fIroot\fP - TYPE(MPI_Info), INTENT(IN) :: \fIinfo\fP - TYPE(MPI_Comm), INTENT(IN) :: \fIcomm\fP - TYPE(MPI_Comm), INTENT(OUT) :: \fIintercomm\fP - INTEGER :: \fIarray_of_errcodes(*)\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETERS -.ft R -.TP 1i -command -Name of program to be spawned (string, significant only at \fIroot\fP). -.TP 1i -argv -Arguments to \fIcommand\fP (array of strings, significant only at \fIroot\fP). -.TP 1i -maxprocs -Maximum number of processes to start (integer, significant only at \fIroot\fP). -.TP 1i -info -A set of key-value pairs telling the runtime system where and how to start the processes (handle, significant only at \fIroot\fP). -.TP 1i -root -Rank of process in which previous arguments are examined (integer). -.TP 1i -comm -Intracommunicator containing group of spawning processes (handle). - -.SH OUTPUT PARAMETER -.ft R -.TP 1i -intercomm -Intercommunicator between original group and the newly spawned group (handle). -.TP 1i -array_of_errcodes -One code per process (array of integers). -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -MPI_Comm_spawn tries to start \fImaxprocs\fP identical copies of the MPI program specified by \fIcommand\fP, establishing communication with them and returning an intercommunicator. The spawned processes are referred to as children. The children have their own MPI_COMM_WORLD, which is separate from that of the parents. MPI_Comm_spawn is collective over \fIcomm\fP, and also may not return until MPI_Init has been called in the children. Similarly, MPI_Init in the children may not return until all parents have called MPI_Comm_spawn. In this sense, MPI_Comm_spawn in the parents and MPI_Init in the children form a collective operation over the union of parent and child processes. The intercommunicator returned by MPI_Comm_spawn contains the parent processes in the local group and the child processes in the remote group. The ordering of processes in the local and remote groups is the same as the as the ordering of the group of the \fIcomm\fP in the parents and of MPI_COMM_WORLD of the children, respectively. This intercommunicator can be obtained in the children through the function MPI_Comm_get_parent. -.sp -The MPI standard allows an implementation to use the MPI_UNIVERSE_SIZE attribute of MPI_COMM_WORLD to specify the number of processes that will be active in a program. Although this implementation of the MPI standard defines MPI_UNIVERSE_SIZE, it does not allow the user to set its value. If you try to set the value of MPI_UNIVERSE_SIZE, you will get an error message. -.sp -The \fIcommand\fP Argument -.sp -The \fIcommand\fP argument is a string containing the name of a program to be spawned. The string is null-terminated in C. In Fortran, leading and trailing spaces are stripped. MPI looks for the file first in the working directory of the spawning process. -.sp -The \fIargv\fP Argument -.sp -\fIargv\fP is an array of strings containing arguments that are passed -to the program. The first element of \fIargv\fP is the first argument -passed to \fIcommand\fP, not, as is conventional in some contexts, the -command itself. The argument list is terminated by NULL in C -and an empty string in Fortran (note that it is the MPI application's -responsibility to ensure that the last entry of the -.I argv -array is an empty string; the compiler will not automatically insert -it). In Fortran, leading and trailing spaces are always stripped, so -that a string consisting of all spaces is considered an empty -string. The constant MPI_ARGV_NULL may be used in C and Fortran -to indicate an empty argument list. In C, this constant is the -same as NULL. -.sp -In C, the MPI_Comm_spawn argument \fIargv\fP differs from the \fIargv\fP argument of \fImain\fP in two respects. First, it is shifted by one element. Specifically, \fIargv\fP[0] of \fImain\fP contains the name of the program (given by \fIcommand\fP). \fIargv\fP[1] of \fImain\fP corresponds to \fIargv\fP[0] in MPI_Comm_spawn, \fIargv\fP[2] of \fImain\fP to \fIargv\fP[1] of MPI_Comm_spawn, and so on. Second, \fIargv\fP of MPI_Comm_spawn must be null-terminated, so that its length can be determined. Passing an \fIargv\fP of MPI_ARGV_NULL to MPI_Comm_spawn results in \fImain\fP receiving \fIargc\fP of 1 and an \fIargv\fP whose element 0 is the name of the program. -.sp -The \fImaxprocs\fP Argument -.sp -Open MPI tries to spawn \fImaxprocs\fP processes. If it is unable to spawn \fImaxprocs\fP processes, it raises an error of class MPI_ERR_SPAWN. If MPI is able to spawn the specified number of processes, MPI_Comm_spawn returns successfully and the number of spawned processes, \fIm\fP, is given by the size of the remote group of \fIintercomm\fP. -.sp -A spawn call with the default behavior is called hard. A spawn call for which fewer than \fImaxprocs\fP processes may be returned is called soft. -.sp -The \fIinfo\fP Argument -.sp -The \fIinfo\fP argument is an opaque handle of type MPI_Info in C and INTEGER in Fortran. It is a container for a number of user-specified (\fIkey,value\fP) pairs. \fIkey\fP and \fIvalue\fP are strings (null-terminated char* in C, character*(*) in Fortran). Routines to create and manipulate the \fIinfo\fP argument are described in Section 4.10 of the MPI-2 standard. -.sp -For the SPAWN calls, \fIinfo\fP provides additional, implementation-dependent instructions to MPI and the runtime system on how to start processes. An application may pass MPI_INFO_NULL in C or Fortran. Portable programs not requiring detailed control over process locations should use MPI_INFO_NULL. -.sp -The following keys for \fIinfo\fP are recognized in Open MPI. (The reserved values mentioned in Section 5.3.4 of the MPI-2 standard are not implemented.) -.sp -.nf -Key Type Description ---- ---- ----------- - -host char * Host on which the process should be - spawned. See the \fIorte_host\fP man - page for an explanation of how this - will be used. -hostfile char * Hostfile containing the hosts on which - the processes are to be spawned. See - the \fIorte_hostfile\fP man page for - an explanation of how this will be - used. -add-host char * Add the specified host to the list of - hosts known to this job and use it for - the associated process. This will be - used similarly to the -host option. -add-hostfile char * Hostfile containing hosts to be added - to the list of hosts known to this job - and use it for the associated - process. This will be used similarly - to the -hostfile option. -wdir char * Directory where the executable is - located. If files are to be - pre-positioned, then this location is - the desired working directory at time - of execution - if not specified, then - it will automatically be set to - \fIompi_preload_files_dest_dir\fP. -ompi_prefix char * Same as the --prefix command line - argument to mpirun. -ompi_preload_binary bool If set to true, pre-position the - specified executable onto the remote - host. A destination directory must - also be provided. -ompi_preload_files char * A comma-separated list of files that - are to be pre-positioned in addition - to the executable. Note that this - option does not depend upon - \fIompi_preload_binary\fP - files can - be moved to the target even if an - executable is not moved. -ompi_stdin_target char * Comma-delimited list of ranks to - receive stdin when forwarded. -ompi_non_mpi bool If set to true, launching a non-MPI - application; the returned communicator - will be MPI_COMM_NULL. Failure to set - this flag when launching a non-MPI - application will cause both the child - and parent jobs to "hang". -ompi_param char * Pass an OMPI MCA parameter to the - child job. If that parameter already - exists in the environment, the value - will be overwritten by the provided - value. -mapper char * Mapper to be used for this job -map_by char * Mapping directive indicating how - processes are to be mapped (slot, - node, socket, etc.). -rank_by char * Ranking directive indicating how - processes are to be ranked (slot, - node, socket, etc.). -bind_to char * Binding directive indicating how - processes are to be bound (core, slot, - node, socket, etc.). -path char * List of directories to search for - the executable -npernode char * Number of processes to spawn on - each node of the allocation -pernode bool Equivalent to npernode of 1 -ppr char * Spawn specified number of processes - on each of the identified object type -env char * Newline-delimited list of envars to - be passed to the spawned procs -.fi - -\fIbool\fP info keys are actually strings but are evaluated as -follows: if the string value is a number, it is converted to an -integer and cast to a boolean (meaning that zero integers are false -and non-zero values are true). If the string value is -(case-insensitive) "yes" or "true", the boolean is true. If the -string value is (case-insensitive) "no" or "false", the boolean is -false. All other string values are unrecognized, and therefore false. - -.sp -The \fIroot\fP Argument -.sp -All arguments before the \fIroot\fP argument are examined only on the process whose rank in \fIcomm\fP is equal to \fIroot\fP. The value of these arguments on other processes is ignored. -.sp -The \fIarray_of_errcodes\fP Argument -.sp -The \fIarray_of_errcodes\fP is an array of length \fImaxprocs\fP in which MPI reports the status of the processes that MPI was requested to start. If all \fImaxprocs\fP processes were spawned, \fIarray_of_errcodes\fP is filled in with the value MPI_SUCCESS. If anyof the processes are \fInot\fP spawned, \fIarray_of_errcodes\fP is filled in with the value MPI_ERR_SPAWN. In C or Fortran, an application may pass MPI_ERRCODES_IGNORE if it is not interested in the error codes. - -.SH NOTES -.ft R -Completion of MPI_Comm_spawn in the parent does not necessarily mean that MPI_Init has been called in the children (although the returned intercommunicator can be used immediately). - - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for I/O function errors. The error handler may be changed with MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN may be used to cause error values to be returned. Note that MPI does not guarantee that an MPI program can continue past an error. - -.SH SEE ALSO -.ft R -.sp -.nf -MPI_Comm_spawn_multiple(3) -MPI_Comm_get_parent(3) -mpirun(1) - diff --git a/ompi/mpi/man/man3/MPI_Comm_spawn_multiple.3in b/ompi/mpi/man/man3/MPI_Comm_spawn_multiple.3in deleted file mode 100644 index 21414043bde..00000000000 --- a/ompi/mpi/man/man3/MPI_Comm_spawn_multiple.3in +++ /dev/null @@ -1,268 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2013 Los Alamos National Security, LLC. All rights reserved. -.\" Copyright (c) 2010-2018 Cisco Systems, Inc. All rights reserved -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Comm_spawn_multiple 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Comm_spawn_multiple\fP \- Spawns multiple binaries, or the same binary with multiple sets of arguments. - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -int MPI_Comm_spawn_multiple(int \fIcount\fP, char *\fIarray_of_commands\fP[], - char **\fIarray_of_argv\fP[], const int \fIarray_of_maxprocs\fP[], const MPI_Info - \fIarray_of_info\fP[], int \fIroot\fP, MPI_Comm \fIcomm\fP, MPI_Comm *\fIintercomm\fP, - int \fIarray_of_errcodes\fP[]) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_COMM_SPAWN_MULTIPLE(\fICOUNT, ARRAY_OF_COMMANDS, ARRAY_OF_ARGV, - ARRAY_OF_MAXPROCS, ARRAY_OF_INFO, ROOT, COMM, INTERCOMM, - ARRAY_OF_ERRCODES, IERROR\fP) - INTEGER \fICOUNT, ARRAY_OF_INFO(*), ARRAY_OF_MAXPROCS(*), ROOT, - COMM, INTERCOMM, ARRAY_OF_ERRCODES(*), IERROR\fP - CHARACTER*(*) \fIARRAY_OF_COMMANDS\fP(*), \fIARRAY_OF_ARGV\fP(\fICOUNT\fP, *) - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Comm_spawn_multiple(\fIcount\fP, \fIarray_of_commands\fP, \fIarray_of_argv\fP, - \fIarray_of_maxprocs\fP, \fIarray_of_info\fP, \fIroot\fP, \fIcomm\fP, \fIintercomm,\fP - \fIarray_of_errcodes\fP, \fIierror\fP) - INTEGER, INTENT(IN) :: \fIcount\fP, \fIarray_of_maxprocs(*)\fP, \fIroot\fP - CHARACTER(LEN=*), INTENT(IN) :: \fIarray_of_commands(*)\fP - CHARACTER(LEN=*), INTENT(IN) :: \fIarray_of_argv(count\fP, \fI*)\fP - TYPE(MPI_Info), INTENT(IN) :: \fIarray_of_info(*)\fP - TYPE(MPI_Comm), INTENT(IN) :: \fIcomm\fP - TYPE(MPI_Comm), INTENT(OUT) :: \fIintercomm\fP - INTEGER :: \fIarray_of_errcodes(*)\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETERS -.ft R -.TP 1i -count -Number of commands (positive integer, significant to MPI only at \fIroot\fP -- see NOTES). -.TP 1i -array_of_commands -Programs to be executed (array of strings, significant only at \fIroot\fP). -.TP 1i -array_of_argv -Arguments for \fIcommands\fP (array of array of strings, significant only at \fIroot\fP). -.TP 1i -array_of_maxprocs -Maximum number of processes to start for each command (array of integers, significant only at \fIroot\fP). -.TP 1i -array_of_info -Info objects telling the runtime system where and how to start processes (array of handles, significant only at \fIroot\fP). -.TP 1i -root -Rank of process in which previous arguments are examined (integer). -.TP 1i -comm -Intracommunicator containing group of spawning processes (handle). - -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -intercomm -Intercommunicator between original group and the newly spawned group (handle). -.TP 1i -array_of_errcodes -One code per process (array of integers). -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -MPI_Comm_spawn_multiple is identical to MPI_Comm_spawn(3) except that -it can specify multiple executables. The first argument, \fIcount\fP, -indicates the number of executables. The next three arguments are -arrays of the corresponding arguments in MPI_Comm_spawn(3). The next -argument, \fIarray_of_info\fP, is an array of \fIinfo\fP arguments, one -for each executable. See the INFO ARGUMENTS section for more information. -.sp -For the Fortran version of \fIarray_of_argv\fP, the element \fIarray_of_argv\fP(i,j) is the jth argument to command number i. -.sp -In any language, an application may use the constant MPI_ARGVS_NULL (which is likely to be (char ***)0 in C) to specify that no arguments should be passed to any commands. The effect of setting individual elements of \fIarray_of_argv\fP to MPI_ARGV_NULL is not defined. To specify arguments for some commands but not others, the commands without arguments should have a corresponding \fIargv\fP whose first element is null ((char *)0 in C and empty string in Fortran). -.sp -All of the spawned processes have the same MPI_COMM_WORLD. Their ranks in MPI_COMM_WORLD correspond directly to the order in which the commands are specified in MPI_Comm_spawn_multiple. Assume that m1 processes are generated by the first command, m2 by the second, etc. The processes corresponding to the first command have ranks 0, 1,..., m1-1. The processes in the second command have ranks m1, m1+1, ..., m1+m2-1. The processes in the third have ranks m1+m2, m1+m2+1, ..., m1+m2+m3-1, etc. -.sp -The \fIarray_of_errcodes\fP argument is 1-dimensional array of size -.sp -.nf - _ count - \\ n , - /_ i=1 i -.fi -.sp -where i is the ith element of \fIarray_of_maxprocs\fP. Command number \fIi\fP corresponds to the i contiguous slots in this array from element -.sp -.nf - _ _ - _ \fIi\fP-1 | _ \fIi\fP | - \\ n , to | \\ n | -1 - /_ \fIj\fP=1 i | /_ \fIj\fP=1 j | - |_ _| -.fi -.sp -Error codes are treated as for MPI_Comm_spawn(3). - - -.SH INFO ARGUMENTS -The following keys for \fIinfo\fP are recognized in "#PACKAGE_NAME#". (The reserved values mentioned in Section 5.3.4 of the MPI-2 standard are not implemented.) -.sp -.sp -.nf -Key Type Description ---- ---- ----------- - -host char * Comma-separated list of hosts on which - the processes should be spawned. See - the \fIorte_host\fP man page for an - explanation of how this will be used. -hostfile char * Hostfile containing the hosts on which - the processes are to be spawned. See - the \fIorte_hostfile\fP man page for - an explanation of how this will be - used. -add-host char * Add the specified hosts to the list of - hosts known to this job and use it for - the associated processes. This will be - used similarly to the -host option. -add-hostfile char * Hostfile containing hosts to be added - to the list of hosts known to this job - and use it for the associated - process. This will be used similarly - to the -hostfile option. -wdir char * Directory where the executable is - located. If files are to be - pre-positioned, then this location is - the desired working directory at time - of execution - if not specified, then - it will automatically be set to - \fIompi_preload_files_dest_dir\fP. -ompi_prefix char * Same as the --prefix command line - argument to mpirun. -ompi_preload_binary bool If set to true, pre-position the - specified executable onto the remote - host. A destination directory must - also be provided. -ompi_preload_files char * A comma-separated list of files that - are to be pre-positioned in addition - to the executable. Note that this - option does not depend upon - \fIompi_preload_binary\fP - files can - be moved to the target even if an - executable is not moved. -ompi_stdin_target char * Comma-delimited list of ranks to - receive stdin when forwarded. -ompi_non_mpi bool If set to true, launching a non-MPI - application; the returned communicator - will be MPI_COMM_NULL. Failure to set - this flag when launching a non-MPI - application will cause both the child - and parent jobs to "hang". -ompi_param char * Pass an OMPI MCA parameter to the - child job. If that parameter already - exists in the environment, the value - will be overwritten by the provided - value. -mapper char * Mapper to be used for this job -map_by char * Mapping directive indicating how - processes are to be mapped (slot, - node, socket, etc.). -rank_by char * Ranking directive indicating how - processes are to be ranked (slot, - node, socket, etc.). -bind_to char * Binding directive indicating how - processes are to be bound (core, slot, - node, socket, etc.). -path char * List of directories to search for - the executable -npernode char * Number of processes to spawn on - each node of the allocation -pernode bool Equivalent to npernode of 1 -ppr char * Spawn specified number of processes - on each of the identified object type -env char * Newline-delimited list of envars to - be passed to the spawned procs -.fi - -.sp -\fIbool\fP info keys are actually strings but are evaluated as -follows: if the string value is a number, it is converted to an -integer and cast to a boolean (meaning that zero integers are false -and non-zero values are true). If the string value is -(case-insensitive) "yes" or "true", the boolean is true. If the -string value is (case-insensitive) "no" or "false", the boolean is -false. All other string values are unrecognized, and therefore false. - -.sp -Note that if any of the info handles have \fIompi_non_mpi\fP set to -true, then all info handles must have it set to true. If some are set -to true, but others are set to false (or are unset), MPI_ERR_INFO will -be returned. - -.sp -Note that in "#PACKAGE_NAME#", the first array location in \fIarray_of_info\fP is applied to all the commands in \fIarray_of_commands\fP. - -.SH NOTES -The argument \fIcount\fP is interpreted by MPI only at the root, as is \fIarray_of_argv\fP. Since the leading dimension of \fIarray_of_argv\fP is \fIcount\fP, a nonpositive value of \fIcount\fP at a nonroot node could theoretically cause a runtime bounds check error, even though \fIarray_of_argv\fP should be ignored by the subroutine. If this happens, you should explicitly supply a reasonable value of \fIcount\fP on the nonroot nodes. -.sp -Similar to MPI_Comm_spawn(3), it is the application's responsibility -to terminate each individual set of argv in the -.I array_of_argv -argument. In C, each argv array is terminated by a NULL pointer. In -Fortran, each argv array is terminated by an empty string (note that -compilers will not automatically insert this blank string; the -application must ensure to have enough space for an empty string entry -as the last element of the array). -.sp -Other restrictions apply to the -.I array_of_argv -parameter; see MPI_Comm_spawn(3)'s description of the -.I argv -parameter for more details. -.sp -MPI-3.1 implies (but does not directly state) that the argument -\fIarray_of_commands\fP must be an array of strings of length -\fIcount\fP. Unlike the \fIarray_of_argv\fP parameter, -\fIarray_of_commands\fP does not need to be terminated with a NULL -pointer in C or a blank string in Fortran. Older versions of Open MPI -required that \fIarray_of_commands\fP be terminated with a blank -string in Fortran; that is no longer required in this version of Open -MPI. -.sp -Calling MPI_Comm_spawn(3) many times would create many sets of -children with different MPI_COMM_WORLDs, whereas -MPI_Comm_spawn_multiple creates children with a single MPI_COMM_WORLD, -so the two methods are not completely equivalent. Also if you need to -spawn multiple executables, you may get better performance by using -MPI_Comm_spawn_multiple instead of calling MPI_Comm_spawn(3) several -times. - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for I/O function errors. The error handler may be changed with MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN may be used to cause error values to be returned. Note that MPI does not guarantee that an MPI program can continue past an error. - -.SH SEE ALSO -.ft R -.sp -.nf -MPI_Comm_spawn(3) -MPI_Comm_get_parent(3) -mpirun(1) diff --git a/ompi/mpi/man/man3/MPI_Comm_split.3in b/ompi/mpi/man/man3/MPI_Comm_split.3in deleted file mode 100644 index 3dd360e680a..00000000000 --- a/ompi/mpi/man/man3/MPI_Comm_split.3in +++ /dev/null @@ -1,101 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Comm_split 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Comm_split \fP \- Creates new communicators based on colors and keys. - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -int MPI_Comm_split(MPI_Comm \fIcomm\fP, int\fI color\fP, int\fI key\fP, - MPI_Comm *\fInewcomm\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_COMM_SPLIT(\fICOMM, COLOR, KEY, NEWCOMM, IERROR\fP) - INTEGER \fICOMM, COLOR, KEY, NEWCOMM, IERROR\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Comm_split(\fIcomm\fP, \fIcolor\fP, \fIkey\fP, \fInewcomm\fP, \fIierror\fP) - TYPE(MPI_Comm), INTENT(IN) :: \fIcomm\fP - INTEGER, INTENT(IN) :: \fIcolor\fP, \fIkey\fP - TYPE(MPI_Comm), INTENT(OUT) :: \fInewcomm\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETERS -.ft R -.TP 1i -comm -Communicator (handle). -.TP 1i -color -Control of subset assignment (nonnegative integer). -.TP 1i -key -Control of rank assignment (integer). - -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -newcomm -New communicator (handle). -.ft R -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -This function partitions the group associated with comm into disjoint subgroups, one for each value of color. Each subgroup contains all processes of the same color. Within each subgroup, the processes are ranked in the order defined by the value of the argument key, with ties broken according to their rank in the old group. A new communicator is created for each subgroup and returned in newcomm. A process may supply the color value MPI_UNDEFINED, in which case newcomm returns MPI_COMM_NULL. This is a collective call, but each process is permitted to provide different values for color and key. -.sp -When you call MPI_Comm_split on an inter-communicator, the processes on the left with the same color as those on the right combine to create a new inter-communicator. The key argument describes the relative rank of processes on each side of the inter-communicator. The function returns MPI_COMM_NULL for those colors that are specified on only one side of the inter-communicator, or for those that specify MPI_UNDEFINED as the color. -.sp -A call to MPI_Comm_create(\fIcomm\fP, \fIgroup\fP, \fInewcomm\fP) is equivalent to a call to MPI_Comm_split(\fIcomm\fP, \fIcolor\fP,\fI key\fP, \fInewcomm\fP), where all members of \fIgroup\fP provide \fIcolor\fP = 0 and \fIkey\fP = rank in group, and all processes that are not members of \fIgroup\fP provide \fIcolor\fP = MPI_UNDEFINED. The function MPI_Comm_split allows more general partitioning of a group into one or more subgroups with optional reordering. -.sp -The value of \fIcolor\fP must be nonnegative or MPI_UNDEFINED. - -.SH NOTES -.ft R -This is an extremely powerful mechanism for -dividing a single communicating group of processes into k subgroups, with k -chosen implicitly by the user (by the number of colors asserted over all -the processes). Each resulting communicator will be nonoverlapping. Such a division could be useful for defining a hierarchy of computations, such as for multigrid or linear algebra. -.sp -Multiple calls to MPI_Comm_split can be used to overcome the requirement that any call have no overlap of the resulting communicators (each process is of only one color per call). In this way, multiple overlapping communication structures can be created. Creative use of the color and key in such splitting operations is encouraged. -.sp -Note that, for a fixed color, the keys need not be unique. It is MPI_Comm_split's responsibility to sort processes in ascending order according to this key, and to break ties in a consistent way. If all the keys are specified in the same way, then all the processes in a given color will have the relative rank order as they did in their parent group. (In general, they will have different ranks.) -.sp -Essentially, making the key value zero for all processes of a given color -means that one needn't really pay attention to the rank-order of the processes in the new communicator. - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for I/O function errors. The error handler may be changed with MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN may be used to cause error values to be returned. Note that MPI does not guarantee that an MPI program can continue past an error. - -.SH SEE ALSO -.ft R -.sp -MPI_Comm_create -.br -MPI_Intercomm_create -.br -MPI_Comm_dup -.br -MPI_Comm_free - - diff --git a/ompi/mpi/man/man3/MPI_Comm_split_type.3in b/ompi/mpi/man/man3/MPI_Comm_split_type.3in deleted file mode 100644 index 09df426faea..00000000000 --- a/ompi/mpi/man/man3/MPI_Comm_split_type.3in +++ /dev/null @@ -1,165 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2013 Los Alamos National Security, LLC. All rights reserved. -.\" Copyright (c) 2010-2015 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" $COPYRIGHT$ -.TH MPI_Comm_split_type 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Comm_split_type \fP \- Creates new communicators based on colors and keys. - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -int MPI_Comm_split_type(MPI_Comm \fIcomm\fP, int\fI split_type\fP, int\fI key\fP, - MPI_Info info, MPI_Comm *\fInewcomm\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_COMM_SPLIT_TYPE(\fICOMM, SPLIT_TYPE, KEY, INFO, NEWCOMM, IERROR\fP) - INTEGER \fICOMM, SPLIT_TYPE, KEY, INFO, NEWCOMM, IERROR\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Comm_split_type(\fIcomm\fP, \fIsplit_type\fP, \fIkey\fP, \fIinfo\fP, \fInewcomm\fP, \fIierror\fP) - TYPE(MPI_Comm), INTENT(IN) :: \fIcomm\fP - INTEGER, INTENT(IN) :: \fIsplit_type\fP, \fIkey\fP - TYPE(MPI_Info), INTENT(IN) :: \fIinfo\fP - TYPE(MPI_Comm), INTENT(OUT) :: \fInewcomm\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETERS -.ft R -.TP 1i -comm -Communicator (handle). -.TP 1i -split_type -Type of processes to be grouped together (integer). -.TP 1i -key -Control of rank assignment (integer). -.TP 1i -info -Info argument (handle). - -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -newcomm -New communicator (handle). -.ft R -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -This function partitions the group associated with \fIcomm\fP into disjoint subgroups, based on -the type specied by \fIsplit_type\fP. Each subgroup contains all processes of the same type. -Within each subgroup, the processes are ranked in the order defined by the value of the -argument \fIkey\fP, with ties broken according to their rank in the old group. A new communicator -is created for each subgroup and returned in newcomm. This is a collective call; -all processes must provide the same \fIsplit_type\fP, but each process is permitted to provide -different values for key. An exception to this rule is that a process may supply the type -value MPI_UNDEFINED, in which case newcomm returns MPI_COMM_NULL. - -.SH SPLIT TYPES -.ft R -.TP 1i -MPI_COMM_TYPE_SHARED -This type splits the communicator into subcommunicators, each of which can create a shared memory region. - -.ft R -.TP 1i -OMPI_COMM_TYPE_NODE -Synonym for MPI_COMM_TYPE_SHARED. -.ft R -.TP 1i -OMPI_COMM_TYPE_HWTHREAD -This type splits the communicator into subcommunicators, each of which belongs to the same hardware thread. -.ft R -.TP 1i -OMPI_COMM_TYPE_CORE -This type splits the communicator into subcommunicators, each of which belongs to the same core/processing unit. -.ft R -.TP 1i -OMPI_COMM_TYPE_L1CACHE -This type splits the communicator into subcommunicators, each of which belongs to the same L1 cache. -.ft R -.TP 1i -OMPI_COMM_TYPE_L2CACHE -This type splits the communicator into subcommunicators, each of which belongs to the same L2 cache. -.ft R -.TP 1i -OMPI_COMM_TYPE_L3CACHE -This type splits the communicator into subcommunicators, each of which belongs to the same L3 cache. -.ft R -.TP 1i -OMPI_COMM_TYPE_SOCKET -This type splits the communicator into subcommunicators, each of which belongs to the same socket. -.ft R -.TP 1i -OMPI_COMM_TYPE_NUMA -This type splits the communicator into subcommunicators, each of which belongs to the same NUMA-node. -.ft R -.TP 1i -OMPI_COMM_TYPE_BOARD -This type splits the communicator into subcommunicators, each of which belongs to the same board. -.ft R -.TP 1i -OMPI_COMM_TYPE_HOST -This type splits the communicator into subcommunicators, each of which belongs to the same host. -.ft R -.TP 1i -OMPI_COMM_TYPE_CU -This type splits the communicator into subcommunicators, each of which belongs to the same computational unit. -.ft R -.TP 1i -OMPI_COMM_TYPE_CLUSTER -This type splits the communicator into subcommunicators, each of which belongs to the same cluster. - -.SH NOTES -.sp -The communicator keys denoted with an -.I OMPI_ -prefix instead of an -.I MPI_ -prefix are specific to Open MPI, and are not part of the MPI -standard. Their use should be protected by the -.I OPEN_MPI -C preprocessor macro. - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value -of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for -I/O function errors. The error handler may be changed with -MPI_Comm_set_errhandler; the predefined error handler -MPI_ERRORS_RETURN may be used to cause error values to be -returned. Note that MPI does not guarantee that an MPI program can -continue past an error. - -.SH SEE ALSO -.ft R -.sp -MPI_Comm_create -.br -MPI_Intercomm_create -.br -MPI_Comm_dup -.br -MPI_Comm_free -.br -MPI_Comm_split - diff --git a/ompi/mpi/man/man3/MPI_Comm_test_inter.3in b/ompi/mpi/man/man3/MPI_Comm_test_inter.3in deleted file mode 100644 index 69ce53ef97c..00000000000 --- a/ompi/mpi/man/man3/MPI_Comm_test_inter.3in +++ /dev/null @@ -1,87 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Comm_test_inter 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Comm_test_inter \fP \- Tests to see if a comm is an intercommunicator. - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -int MPI_Comm_test_inter(MPI_Comm \fIcomm\fP, int\fI *flag\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_COMM_TEST_INTER(\fICOMM, FLAG, IERROR\fP) - INTEGER \fICOMM, IERROR\fP - LOGICAL \fIFLAG \fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Comm_test_inter(\fIcomm\fP, \fIflag\fP, \fIierror\fP) - TYPE(MPI_Comm), INTENT(IN) :: \fIcomm\fP - LOGICAL, INTENT(OUT) :: \fIflag\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETER -.ft R -.TP 1i -comm -Communicator (handle). - -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -flag (Logical.) -.ft R -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -This local routine allows the calling process to determine the type of a communicator. It returns true for an intercommunicator, false for an intracommunicator. -.sp -The type of communicator also affects the value returned by three other functions. When dealing with an intracommunicator (enables communication within a single group), the functions listed below return the expected values, group size, group, and rank. When dealing with an inter-communicator, however, they return the following values: -.sp -.nf -MPI_Comm_size Returns the size of the local group. -MPI_Comm_group Returns the local group. -MPI_Comm_rank Returns the rank in the local group. -.fi -.sp -To return the remote group and remote group size of an inter-communicator, use the MPI_Comm_remote_group and MPI_Comm_remote_size functions. -.sp -The operation MPI_Comm_compare is valid for intercommunicators. Both communicators must be either intra- or intercommunicators, or else MPI_UNEQUAL results. Both corresponding local and remote groups must compare correctly to get the results MPI_CONGRUENT and MPI_SIMILAR. In particular, it is possible for MPI_SIMILAR to result because either the local or remote groups were similar but not identical. -.sp -The following accessors provide consistent access to the remote group of an -intercommunicator: MPI_Comm_remote_size, MPI_Comm_remote_group. -.sp -The intercommunicator accessors (MPI_Comm_test_inter, MPI_Comm_remote_size, MPI_Comm_remote_group) are all local operations. - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for I/O function errors. The error handler may be changed with MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN may be used to cause error values to be returned. Note that MPI does not guarantee that an MPI program can continue past an error. - -.SH SEE ALSO -.ft R -.sp -.nf -MPI_Comm_remote_group -MPI_Comm_remote_size -MPI_Intercomm_create -MPI_Intercomm_merge - diff --git a/ompi/mpi/man/man3/MPI_Compare_and_swap.3in b/ompi/mpi/man/man3/MPI_Compare_and_swap.3in deleted file mode 100644 index 0a28b12420a..00000000000 --- a/ompi/mpi/man/man3/MPI_Compare_and_swap.3in +++ /dev/null @@ -1,112 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2013-2015 Los Alamos National Security, LLC. All rights reserved. -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" $COPYRIGHT$ -.TH MPI_Compare_and_swap 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Compare_and_swap\fP \- Perform RMA compare-and-swap - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -int MPI_Compare_and_swap(const void *\fIorigin_addr\fP, const void *\fIcompare_addr\fP, - void *\fIresult_addr\fP, MPI_Datatype \fIdatatype\fP, int \fItarget_rank\fP, - MPI_Aint \fItarget_disp\fP, MPI_Win \fIwin\fP) - -.fi -.SH Fortran Syntax (see FORTRAN 77 NOTES) -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_COMPARE_AND_SWAP(\fIORIGIN_ADDR, COMPARE_ADDR, RESULT_ADDR, DATATYPE, TARGET_RANK, - TARGET_DISP, WIN, IERROR\fP) - \fIORIGIN_ADDR\fP, \fICOMPARE_ADDR\fP, \fIRESULT_ADDR\fP(*) - INTEGER(KIND=MPI_ADDRESS_KIND) \fITARGET_DISP\fP - INTEGER \fIDATATYPE, TARGET_RANK, WIN, IERROR \fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Compare_and_swap(\fIorigin_addr\fP, \fIcompare_addr\fP, \fIresult_addr\fP, \fIdatatype\fP, - \fItarget_rank\fP, \fItarget_disp\fP, \fIwin\fP, \fIierror\fP) - TYPE(*), DIMENSION(..), INTENT(IN) :: \fIorigin_addr\fP, \fIcompare_addr\fP - TYPE(*), DIMENSION(..) :: \fIresult_addr\fP - TYPE(MPI_Datatype), INTENT(IN) :: \fIdatatype\fP - INTEGER, INTENT(IN) :: \fItarget_rank\fP - INTEGER(KIND=MPI_ADDRESS_KIND), INTENT(IN) :: \fItarget_disp\fP - TYPE(MPI_Win), INTENT(IN) :: \fIwin\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETERS -.ft R -.TP 1i -origin_addr -Initial address of buffer (choice). -.ft R -.TP -compare_addr -Initial address of compare buffer (choice). -.ft R -.TP -result_addr -Initial address of result buffer (choice). -.ft R -.TP -datatype -Data type of the entry in origin, result, and target buffers (handle). -.ft R -.TP 1i -target_rank -Rank of target (nonnegative integer). -.ft R -.TP 1i -target_disp -Displacement from start of window to beginning of target buffer (nonnegative integer). -.ft R -.TP 1i -win -Window object (handle). - -.SH OUTPUT PARAMETER -.ft R -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -This function compares one element of type \fIdatatype\fP in the compare buffer \fIcompare_addr\fP with the buffer at offset \fItarget_disp\fP in the target window specified by \fItarget_rank\fP and \fIwin\fP and replaces the value at the target with the value in the origin buffer \fIorigin_addr\fP if the compare buffer and the target buffer are identical. The original value at the target is returned in the buffer \fIresult_addr\fP. The parameter \fIdatatype\fP must belong to one of the following categories of predefined datatypes: C integer, Fortran integer, Logical, Multi-language types, or Byte as specified in MPI-3 § 5.9.2 on page 176. -.sp -The origin and result buffers (\fIorigin_addr\fP and \fIresult_addr\fP) must be disjoint. - -.SH FORTRAN 77 NOTES -.ft R -The MPI standard prescribes portable Fortran syntax for -the \fITARGET_DISP\fP argument only for Fortran 90. FORTRAN 77 -users may use the non-portable syntax -.sp -.nf - INTEGER*MPI_ADDRESS_KIND \fITARGET_DISP\fP -.fi -.sp -where MPI_ADDRESS_KIND is a constant defined in mpif.h -and gives the length of the declared integer in bytes. - -.SH NOTES -It is the user's responsibility to guarantee that, when -using the accumulate functions, the target displacement argument is such -that accesses to the window are properly aligned according to the data -type arguments in the call to the \fBMPI_Compare_and_swap\fP function. - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for I/O function errors. The error handler -may be changed with \fBMPI_Comm_set_errhandler\fP; the predefined error handler MPI_ERRORS_RETURN may be used to cause error values to be returned. Note that MPI does not guarantee that an MPI program can continue past an error. diff --git a/ompi/mpi/man/man3/MPI_Dims_create.3in b/ompi/mpi/man/man3/MPI_Dims_create.3in deleted file mode 100644 index 3d2b02be2df..00000000000 --- a/ompi/mpi/man/man3/MPI_Dims_create.3in +++ /dev/null @@ -1,86 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Dims_create 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Dims_create \fP \- Creates a division of processors in a Cartesian grid. - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -int MPI_Dims_create(int \fInnodes\fP, int\fI ndims\fP, int\fI dims\fP[]) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_DIMS_CREATE(\fINNODES, NDIMS, DIMS, IERROR\fP) - INTEGER \fINNODES, NDIMS, DIMS(*), IERROR\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Dims_create(\fInnodes\fP, \fIndims\fP, \fIdims\fP, \fIierror\fP) - INTEGER, INTENT(IN) :: \fInnodes\fP, \fIndims\fP - INTEGER, INTENT(INOUT) :: \fIdims(ndims)\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETERS -.ft R -.TP 1i -nnodes -Number of nodes in a grid (integer). -.TP 1i -ndims -Number of Cartesian dimensions (integer). - -.SH IN/OUT PARAMETER -.TP 1i -dims -Integer array of size ndims specifying the number of nodes in each dimension. - -.SH OUTPUT PARAMETER -.ft R -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -For Cartesian topologies, the function MPI_Dims_create helps the user select a balanced distribution of processes per coordinate direction, depending on the number of processes in the group to be balanced and optional constraints that can be specified by the user. One use is to partition all the processes (the size of MPI_COMM_WORLD's group) into an n-dimensional topology. -.sp -The entries in the array \fIdims\fP are set to describe a Cartesian grid with \fIndims\fP dimensions and a total of \fInnodes\fP nodes. The dimensions are set to be as close to each other as possible, using an appropriate divisibility algorithm. The caller may further constrain the operation of this routine by specifying elements of array dims. If dims[i] is set to a positive number, the routine will not modify the number of nodes in dimension i; only those entries where dims[i] = 0 are modified by the call. -.sp -Negative input values of dims[i] are erroneous. An error will occur if -nnodes is not a multiple of ((pi) over (i, dims[i] != 0)) dims[i]. -.sp -For dims[i] set by the call, dims[i] will be ordered in nonincreasing order. Array dims is suitable for use as input to routine MPI_Cart_create. MPI_Dims_create is local. -.sp -\fBExample:\fP -.nf - -dims -before dims -call function call on return ------------------------------------------------------ -(0,0) MPI_Dims_create(6, 2, dims) (3,2) -(0,0) MPI_Dims_create(7, 2, dims) (7,1) -(0,3,0) MPI_Dims_create(6, 3, dims) (2,3,1) -(0,3,0) MPI_Dims_create(7, 3, dims) erroneous call ------------------------------------------------------- - -.fi -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for I/O function errors. The error handler may be changed with MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN may be used to cause error values to be returned. Note that MPI does not guarantee that an MPI program can continue past an error. - diff --git a/ompi/mpi/man/man3/MPI_Dist_graph_create.3in b/ompi/mpi/man/man3/MPI_Dist_graph_create.3in deleted file mode 100644 index 3f48eedf901..00000000000 --- a/ompi/mpi/man/man3/MPI_Dist_graph_create.3in +++ /dev/null @@ -1,139 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2013 Los Alamos National Security, LLC. All rights reserved. -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2020 FUJITSU LIMITED. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Dist_graph_create 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Dist_graph_create \fP \- Makes a new communicator to which topology information has been attached. - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -int MPI_Dist_graph_create(MPI_Comm \fIcomm_old\fP, int\fI n\fP, const int\fI sources[]\fP, - const int\fI degrees[]\fP, const int\fI destinations\fP[], const int\fI weights\fP[], - MPI_Info info, int\fI reorder\fP, MPI_Comm\fI *comm_dist_graph\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_DIST_GRAPH_CREATE(\fICOMM_OLD, N, SOURCES, DEGREES, DESTINATIONS, WEIGHTS, - INFO, REORDER, COMM_DIST_GRAPH, IERROR\fP) - INTEGER \fICOMM_OLD, N, SOURCES(*), DEGRES(*), WEIGHTS(*), INFO\fP - INTEGER \fICOMM_DIST_GRAPH, IERROR\fP - LOGICAL \fIREORDER\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Dist_Graph_create(\fIcomm_old\fP, \fIn\fP, \fIsources\fP, \fIdegrees\fP, \fIdestinations\fP, \fIweights\fP, - \fIinfo\fP, \fIreorder\fP, \fIcomm_dist_graph\fP, \fIierror\fP) - TYPE(MPI_Comm), INTENT(IN) :: \fIcomm_old\fP - INTEGER, INTENT(IN) :: \fIn\fP, \fIsources(n)\fP, \fIdegrees(n)\fP, \fIdestinations(*)\fP - INTEGER, INTENT(IN) :: \fIweights(*)\fP - TYPE(MPI_Info), INTENT(IN) :: \fIinfo\fP - LOGICAL, INTENT(IN) :: reorder - TYPE(MPI_Comm), INTENT(OUT) :: \fIcomm_dist_graph\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETERS -.ft R -.TP 1i -comm_old -Input communicator without topology (handle). -.TP 1i -n -Number of source nodes for which this process specifies edges (non-negative integer). -.TP 1i -sources -Array containing the \fIn\fP source nodes for which this process specifies edges (array of non-negative integers). -.TP 1i -degrees -Array specifying the number of destinations for each source node in the source node array (array of non-negative integers). -.TP 1i -destinations -Destination nodes for the source nodes in the source node array (array of non-negative integers). -.TP 1i -weights -Weights for source to destination edges (array of non-negative integers). -.TP 1i -info -Hints on optimization and interpretation of weights (handle). -.TP 1i -reorder -Ranking may be reordered (true) or not (false) (logical). - -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -comm_dist_graph -Communicator with distributed graph topology added (handle). -.ft R -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -MPI_Dist_graph_create creates a new communicator \fIcomm_dist_graph\fP with distrubuted -graph topology and returns a handle to the new communicator. The number of processes in -\fIcomm_dist_graph\fP is identical to the number of processes in \fIcomm_old\fP. Concretely, each process calls the -constructor with a set of directed (source,destination) communication edges as described below. -Every process passes an array of \fIn\fP source nodes in the \fIsources\fP array. For each source node, a -non-negative number of destination nodes is specied in the \fIdegrees\fP array. The destination -nodes are stored in the corresponding consecutive segment of the \fIdestinations\fP array. More -precisely, if the i-th node in sources is s, this specifies \fIdegrees\fP[i] \fIedges\fP (s,d) with d of the j-th -such edge stored in \fIdestinations\fP[\fIdegrees\fP[0]+...+\fIdegrees\fP[i-1]+j]. The weight of this edge is -stored in \fIweights\fP[\fIdegrees\fP[0]+...+\fIdegrees\fP[i-1]+j]. Both the \fIsources\fP and the \fIdestinations\fP arrays -may contain the same node more than once, and the order in which nodes are listed as -destinations or sources is not signicant. Similarly, different processes may specify edges -with the same source and destination nodes. Source and destination nodes must be process -ranks of comm_old. Different processes may specify different numbers of source and -destination nodes, as well as different source to destination edges. This allows a fully distributed -specification of the communication graph. Isolated processes (i.e., processes with -no outgoing or incoming edges, that is, processes that do not occur as source or destination -node in the graph specication) are allowed. The call to MPI_Dist_graph_create is collective. - -If reorder = false, all processes will have the same rank in comm_dist_graph as in -comm_old. If reorder = true then the MPI library is free to remap to other processes (of -comm_old) in order to improve communication on the edges of the communication graph. -The weight associated with each edge is a hint to the MPI library about the amount or -intensity of communication on that edge, and may be used to compute a \"best\" reordering. - -.SH WEIGHTS -.ft R -Weights are specied as non-negative integers and can be used to influence the process -remapping strategy and other internal MPI optimizations. For instance, approximate count -arguments of later communication calls along specic edges could be used as their edge -weights. Multiplicity of edges can likewise indicate more intense communication between -pairs of processes. However, the exact meaning of edge weights is not specied by the MPI -standard and is left to the implementation. An application can supply the special value -MPI_UNWEIGHTED for the weight array to indicate that all edges have the same (effectively no) -weight. It is erroneous to supply MPI_UNWEIGHTED for some but not -all processes of comm_old. If the graph is weighted but \fIn\fP = 0, then MPI_WEIGHTS_EMPTY -or any arbitrary array may be passed to weights. Note that MPI_UNWEIGHTED and -MPI_WEIGHTS_EMPTY are not special weight values; rather they are special values for the -total array argument. In Fortran, MPI_UNWEIGHTED and MPI_WEIGHTS_EMPTY are objects -like MPI_BOTTOM (not usable for initialization or assignment). See MPI-3 § 2.5.4. - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for I/O function errors. The error handler may be changed with MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN may be used to cause error values to be returned. Note that MPI does not guarantee that an MPI program can continue past an error. - -.SH SEE ALSO -.ft R -.sp -MPI_Dist_graph_create_adjacent -MPI_Dist_graph_neighbors -MPI_Dist_graph_neighbors_count - diff --git a/ompi/mpi/man/man3/MPI_Dist_graph_create_adjacent.3in b/ompi/mpi/man/man3/MPI_Dist_graph_create_adjacent.3in deleted file mode 100644 index 91da9628dbe..00000000000 --- a/ompi/mpi/man/man3/MPI_Dist_graph_create_adjacent.3in +++ /dev/null @@ -1,134 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2013 Los Alamos National Security, LLC. All rights reserved. -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2020 FUJITSU LIMITED. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Dist_graph_create_adjacent 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Dist_graph_create_adjacent \fP \- Makes a new communicator to which topology information has been attached. - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -int MPI_Dist_graph_create_adjacent(MPI_Comm \fIcomm_old\fP, int\fI indegree\fP, const int\fI sources[]\fP, - const int\fI sourceweights[]\fP, int\fI outdegree\fP, const int\fI destinations\fP[], const int\fI destweights\fP[], - MPI_Info info, int\fI reorder\fP, MPI_Comm\fI *comm_dist_graph\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_DIST_GRAPH_CREATE_ADJACENT(\fICOMM_OLD, INDEGREE, SOURCES, SOURCEWEIGHTS, OUTDEGREE, - DESTINATIONS, DESTWEIGHTS, INFO, REORDER, COMM_DIST_GRAPH, IERROR\fP) - INTEGER \fICOMM_OLD, INDEGREE, SOURCES(*), SOURCEWEIGHTS(*), OUTDEGREE, DESTINATIONS(*), DESTWEIGHTS(*), INFO\fP - INTEGER \fICOMM_DIST_GRAPH, IERROR\fP - LOGICAL \fIREORDER\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Dist_Graph_create_adjacent(\fIcomm_old\fP, \fIndegree\fP, \fIsources\fP, \fIsourceweights\fP, - \fIoutdegree\fP, \fIdestinations\fP, \fIdestweights\fP, \fIinfo\fP, \fIreorder\fP, - \fIcomm_dist_graph\fP, \fIierror\fP) - TYPE(MPI_Comm), INTENT(IN) :: \fIcomm_old\fP - INTEGER, INTENT(IN) :: \fIindegree\fP, \fIsources(indegree)\fP, \fIoutdegree\fP, \fIdestinations(outdegree)\fP - INTEGER, INTENT(IN) :: \fIsourceweights(*)\fP, \fIdestweights(*)\fP - TYPE(MPI_Info), INTENT(IN) :: \fIinfo\fP - LOGICAL, INTENT(IN) :: reorder - TYPE(MPI_Comm), INTENT(OUT) :: \fIcomm_dist_graph\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETERS -.ft R -.TP 1i -comm_old -Input communicator without topology (handle). -.TP 1i -indegree -Size of \fIsources\fP and \fIsourceweights\fP arrays (non-negative integer). -.TP 1i -sources -Ranks of processes for which the calling process is a destination (array of non-negative integers). -.TP 1i -sourceweights -Weights of the edges into the calling process (array of non-negative integers). -.TP 1i -outdegree -Size of \fIdestinations\fP and \fIdestweights\fP arrays (non-negative integer). -.TP 1i -destinations -Ranks of processes for which the calling process is a source (array of non-negative integers). -.TP 1i -destweights -Weights of the edges out of the calling process (array of non-negative integers). -.TP 1i -info -Hints on optimization and interpretation of weights (handle). -.TP 1i -reorder -Ranking may be reordered (true) or not (false) (logical). - -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -comm_dist_graph -Communicator with distributed graph topology added (handle). -.ft R -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -MPI_Dist_graph_create_adjacent creats a new communicator \fIcomm_dist_graph\fP with distrubuted -graph topology and returns a handle to the new communicator. The number of processes in -\fIcomm_dist_graph\fP is identical to the number of processes in \fIcomm_old\fP. Each process passes all -information about its incoming and outgoing edges in the virtual distributed graph topology. -The calling processes must ensure that each edge of the graph is described in the source -and in the destination process with the same weights. If there are multiple edges for a given -(source,dest) pair, then the sequence of the weights of these edges does not matter. The -complete communication topology is the combination of all edges shown in the \fIsources\fP arrays -of all processes in comm_old, which must be identical to the combination of all edges shown -in the \fIdestinations\fP arrays. Source and destination ranks must be process ranks of comm_old. -This allows a fully distributed specication of the communication graph. Isolated processes -(i.e., processes with no outgoing or incoming edges, that is, processes that have specied -indegree and outdegree as zero and thus do not occur as source or destination rank in the -graph specication) are allowed. The call to MPI_Dist_graph_create_adjacent is collective. - -.SH WEIGHTS -.ft R -Weights are specied as non-negative integers and can be used to influence the process -remapping strategy and other internal MPI optimizations. For instance, approximate count -arguments of later communication calls along specic edges could be used as their edge -weights. Multiplicity of edges can likewise indicate more intense communication between -pairs of processes. However, the exact meaning of edge weights is not specied by the MPI -standard and is left to the implementation. An application can supply the special value -MPI_UNWEIGHTED for the weight array to indicate that all edges have the same (effectively -no) weight. It is erroneous to supply MPI_UNWEIGHTED for some but not all processes of -comm_old. If the graph is weighted but \fIindegree\fP or \fIoutdegree\fP is zero, then -MPI_WEIGHTS_EMPTY or any arbitrary array may be passed to sourceweights or destweights -respectively. Note that MPI_UNWEIGHTED and MPI_WEIGHTS_EMPTY are not special weight values; -rather they are special values for the total array argument. In Fortran, MPI_UNWEIGHTED -and MPI_WEIGHTS_EMPTY are objects like MPI_BOTTOM (not usable for initialization or -assignment). See MPI-3 § 2.5.4. - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for I/O function errors. The error handler may be changed with MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN may be used to cause error values to be returned. Note that MPI does not guarantee that an MPI program can continue past an error. - -.SH SEE ALSO -.ft R -.sp -MPI_Dist_graph_create -MPI_Dist_graph_neighbors -MPI_Dist_graph_neighbors_count - diff --git a/ompi/mpi/man/man3/MPI_Dist_graph_neighbors.3in b/ompi/mpi/man/man3/MPI_Dist_graph_neighbors.3in deleted file mode 100644 index 2ad96f625e2..00000000000 --- a/ompi/mpi/man/man3/MPI_Dist_graph_neighbors.3in +++ /dev/null @@ -1,95 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2013 Los Alamos National Security, LLC. All rights reserved. -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" $COPYRIGHT$ -.TH MPI_Dist_graph_neighbors 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Dist_graph_neighbors \fP \- Returns the neighbors of the calling process in a distributed graph topology. - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -int MPI_Dist_graph_neighbors(MPI_Comm \fIcomm\fP, int \fImaxindegree\fP, int \fIsources\fP[], int \fIsourceweights\fP[], - int \fImaxoutdegree\fP, int \fIdestinations\fP[], int \fIdestweights\fP[]) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_DIST_GRAPH_NEIGHBORS(COMM, MAXINDEGREE, SOURCES, SOURCEWEIGHTS, - MAXOUTDEGREE, DESTINATIONS, DESTWEIGHTS, IERROR) - INTEGER COMM, MAXINDEGREE, SOURCES(*), SOURCEWEIGHTS(*), MAXOUTDEGREE, - DESTINATIONS(*), DESTWEIGHTS(*), IERROR - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Dist_Graph_neighbors(\fIcomm\fP, \fImaxindegree\fP, \fIsources\fP, \fIsourceweights\fP, - \fImaxoutdegree\fP, \fIdestinations\fP, \fIdestweights\fP, \fIierror\fP) - TYPE(MPI_Comm), INTENT(IN) :: \fIcomm\fP - INTEGER, INTENT(IN) :: \fImaxindegree\fP, \fImaxoutdegree\fP - INTEGER, INTENT(OUT) :: \fIsources(maxindegree)\fP, \fIdestinations(maxoutdegree)\fP - INTEGER :: sourceweights(*), destweights(*) - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETERS -.ft R -.TP 1i -comm -Communicator with distributed graph topology (handle). -.TP 1i -maxindegree -Size of \fIsources\fP and \fIsourceweights\fP arrays (non-negative integer). -.TP 1i -maxoutdegree -Size of \fIdestinations\fP and \fIdestweights\fP arrays (non-negative integer). - -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -sources -Processes for which the calling process is a destination (array of non-negative integers). -.TP 1i -sourceweights -Weights of the edges into the calling process (array of non-negative integers). -.TP 1i -destinations -Processes for which the calling process is a source (array of non-negative integers). -.TP 1i -destweights -Weights of the edges out of the calling process (array of non-negative integers). -.ft R -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -MPI_Dist_graph_neighbors returns the source and destination ranks in a distributed graph topology -for the calling process. This call will return up to \fImaxindegree\fP source ranks in the \fIsources\fP array -and up to \fImaxoutdegree\fP destination ranks in the \fIdestinations\fP array. If weights were -specified at the time of the communicator's creation then the associated weights -are returned in the \fIsourceweights\fP and \fI destweights\fP arrays. If the communicator -was created with MPI_Dist_graph_create_adjacent then the order of the values in \fIsources\fP and -\fIdestinations\fP is identical to the input that was used by the process with the same rank in -comm_old in the creation call. - -.fi -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for I/O function errors. The error handler may be changed with MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN may be used to cause error values to be returned. Note that MPI does not guarantee that an MPI program can continue past an error. - -.SH SEE ALSO -.ft R -.sp -MPI_Dist_graph_neighbors_count - diff --git a/ompi/mpi/man/man3/MPI_Dist_graph_neighbors_count.3in b/ompi/mpi/man/man3/MPI_Dist_graph_neighbors_count.3in deleted file mode 100644 index 7a24fe2c04a..00000000000 --- a/ompi/mpi/man/man3/MPI_Dist_graph_neighbors_count.3in +++ /dev/null @@ -1,74 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2013 Los Alamos National Security, LLC. All rights reserved. -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" $COPYRIGHT$ -.TH MPI_Dist_graph_neighbors_count 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Dist_graph_neighbors_count \fP \- Returns the number of in and out edges for the calling processes in a distributed graph topology and a flag indicating whether the distributed graph is weighted. - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -int MPI_Dist_graph_neighbors_count(MPI_Comm \fIcomm\fP, int\fI *indegree\fP, - int\fI *outdegree\fP, int\fI *weighted\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_DIST_GRAPH_NEIGHBORS_COUNT(\fICOMM, INDEGREE, OUTDEGREE, WEIGHTED, IERROR\fP) - INTEGER \fICOMM, INDEGREE, OUTDEGREE, IERROR\fP - LOGICAL \fIWEIGHTED\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Dist_graph_neighbors_count(\fIcomm\fP, \fIindegree\fP, \fIoutdegree\fP, \fIweighted\fP, \fIierror\fP) - TYPE(MPI_Comm), INTENT(IN) :: \fIcomm\fP - INTEGER, INTENT(IN) :: \fIindegree\fP, \fIoutdegree\fP - INTEGER, INTENT(OUT) :: \fIweighted\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETERS -.ft R -.TP 1i -comm -Communicator with distributed graph topology (handle). - -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -indegree -Number of edges into this process (non-negative integer). -.TP 1i -outdegree -Number of edges out of this process (non-negative integer). -.TP 1i -weighted -False if MPI_UNWEIGHTED was supplied during creation, true otherwise (logical). -.ft R -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -MPI_Dist_graph_neighbors_count and MPI_Graph_neighbors provide adjacency information for a distributed graph topology. MPI_Dist_graph_neighbors_count returns the number of sources and destinations for the calling process. - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for I/O function errors. The error handler may be changed with MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN may be used to cause error values to be returned. Note that MPI does not guarantee that an MPI program can continue past an error. - -.SH SEE ALSO -.ft R -.sp -MPI_Dist_graph_neighbors diff --git a/ompi/mpi/man/man3/MPI_Errhandler_create.3in b/ompi/mpi/man/man3/MPI_Errhandler_create.3in deleted file mode 100644 index 806c4f3999b..00000000000 --- a/ompi/mpi/man/man3/MPI_Errhandler_create.3in +++ /dev/null @@ -1,79 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Errhandler_create 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Errhandler_create \fP \- Creates an MPI-style error handler -- use of this routine is deprecated. - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -int MPI_Errhandler_create(MPI_Handler_function *\fIfunction\fP, - MPI_Errhandler *\fIerrhandler\fP) - -.fi -.SH Fortran Syntax -.nf -INCLUDE 'mpif.h' -MPI_ERRHANDLER_CREATE(\fIFUNCTION, ERRHANDLER, IERROR\fP) - EXTERNAL \fIFUNCTION\fP - INTEGER \fIERRHANDLER, IERROR\fP - - -.fi -.SH INPUT PARAMETER -.ft R -.TP 1i -function -User-defined error handling procedure. - -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -errhandler -MPI error handler (handle). -.ft R -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -Note that use of this routine is \fIdeprecated\fP as of MPI-2. Please use MPI_Comm_create_errhandler instead. -.sp -Registers the user routine function for use as an MPI error handler. Returns in errhandler a handle to the registered error handler. -.sp -In the C language, the user routine should be a C function of type MPI_Handler_function, which is defined as -.sp -.nf - typedef void (MPI_Handler_function)(MPI_Comm *, int *, \&...); -.fi -.sp -The first argument is the communicator in use. The second is the error code -to be returned by the MPI routine that raised the error. If the routine would have returned MPI_ERR_IN_STATUS, it is the error code returned in the status for the request that caused the error handler to be invoked. The remaining arguments are stdargs arguments whose number and meaning is implementation-dependent. An implementation should clearly document these arguments. Addresses are used so that the handler may be written in Fortran. - -.SH NOTE -.ft R -The MPI-1 Standard states that an implementation may make the output value (errhandler) simply the address of the function. However, the action of MPI_Errhandler_ free makes this impossible, since it is required to set the value of the argument to MPI_ERRHANDLER_NULL. In addition, the actual error handler must remain until all communicators that use it are freed. - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for I/O function errors. The error handler may be changed with MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN may be used to cause error values to be returned. Note that MPI does not guarantee that an MPI program can continue past an error. - -.SH SEE ALSO -.br -MPI_Comm_create_errhandler -.br -MPI_Comm_get_errhandler -.br -MPI_Comm_set_errhandler - - - diff --git a/ompi/mpi/man/man3/MPI_Errhandler_free.3in b/ompi/mpi/man/man3/MPI_Errhandler_free.3in deleted file mode 100644 index b7accb78894..00000000000 --- a/ompi/mpi/man/man3/MPI_Errhandler_free.3in +++ /dev/null @@ -1,68 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Errhandler_free 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Errhandler_free \fP \- Frees an MPI-style error handler. - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -int MPI_Errhandler_free(MPI_Errhandler *\fIerrhandler\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_ERRHANDLER_FREE(\fIERRHANDLER, IERROR\fP) - INTEGER \fIERRHANDLER, IERROR\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Errhandler_free(\fIerrhandler\fP, \fIierror\fP) - TYPE(MPI_Errhandler), INTENT(INOUT) :: \fIerrhandler\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETER -.ft R -.TP 1i -errhandler -MPI error handler (handle). Set to MPI_ERRHANDLER_NULL on exit. - -.SH OUTPUT PARAMETER -.ft R -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -Marks the error handler associated with errhandler for deallocation and sets errhandler to MPI_ERRHANDLER_NULL. The error handler will be deallocated after all communicators associated with it have been deallocated. - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for I/O function errors. The error handler may be changed with MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN may be used to cause error values to be returned. Note that MPI does not guarantee that an MPI program can continue past an error. - -.SH SEE ALSO -.ft R -.sp -MPI_Comm_create_errhandler -.br -MPI_Comm_get_errhandler -.br -MPI_Comm_set_errhandler - - - - diff --git a/ompi/mpi/man/man3/MPI_Errhandler_get.3in b/ompi/mpi/man/man3/MPI_Errhandler_get.3in deleted file mode 100644 index 8a23805dce9..00000000000 --- a/ompi/mpi/man/man3/MPI_Errhandler_get.3in +++ /dev/null @@ -1,66 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Errhandler_get 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Errhandler_get \fP \- Gets the error handler for a communicator -- use of this routine is deprecated. - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -int MPI_Errhandler_get(MPI_Comm \fIcomm\fP, MPI_Errhandler\fI *errhandler\fP) - -.fi -.SH Fortran Syntax -.nf -INCLUDE 'mpif.h' -MPI_ERRHANDLER_GET(\fICOMM, ERRHANDLER, IERROR\fP) - INTEGER \fICOMM, ERRHANDLER, IERROR\fP - - -.fi -.SH INPUT PARAMETER -.ft R -.TP 1i -comm -Communicator to get the error handler from (handle). - -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -errhandler -MPI error handler currently associated with communicator (handle). -.ft R -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -Note that use of this routine is \fIdeprecated\fP as of MPI-2. Please use MPI_Comm_get_errhandler instead. -.sp -Returns in errhandler (a handle to) the error handler that is currently associated with communicator comm. -.sp -\fBExample:\fP A library function may register at its entry point the current error handler for a communicator, set its own private error handler for this communicator, and restore before exiting the previous error handler. - - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for I/O function errors. The error handler may be changed with MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN may be used to cause error values to be returned. Note that MPI does not guarantee that an MPI program can continue past an error. - -.SH SEE ALSO -.ft R -.sp -MPI_Comm_create_errhandler -.br -MPI_Comm_get_errhandler -.br -MPI_Comm_set_errhandler - diff --git a/ompi/mpi/man/man3/MPI_Errhandler_set.3in b/ompi/mpi/man/man3/MPI_Errhandler_set.3in deleted file mode 100644 index 928cae994e1..00000000000 --- a/ompi/mpi/man/man3/MPI_Errhandler_set.3in +++ /dev/null @@ -1,62 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Errhandler_set 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Errhandler_set \fP \- Sets the error handler for a communicator -- use of this routine is deprecated. - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -int MPI_Errhandler_set(MPI_Comm \fIcomm\fP, MPI_Errhandler \fIerrhandler\fP) - -.fi -.SH Fortran Syntax -.nf -INCLUDE 'mpif.h' -MPI_ERRHANDLER_SET(\fICOMM, ERRHANDLER, IERROR\fP) - INTEGER \fICOMM, ERRHANDLER, IERROR\fP - - -.fi -.SH INPUT PARAMETERS -.ft R -.TP 1i -comm -Communicator to set the error handler for (handle). -.TP 1i -errhandler -New MPI error handler for communicator (handle). - -.SH OUTPUT PARAMETER -.ft R -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -Note that use of this routine is \fIdeprecated\fP as of MPI-2. Please use MPI_Comm_set_errhandler instead. -.sp -Associates the new error handler errhandler with communicator comm at the calling process. Note that an error handler is always associated with the communicator. - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for I/O function errors. The error handler may be changed with MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN may be used to cause error values to be returned. Note that MPI does not guarantee that an MPI program can continue past an error. - -.SH SEE ALSO -.ft R -.sp -MPI_Comm_create_errhandler -.br -MPI_Comm_get_errhandler -.br -MPI_Comm_set_errhandler - - diff --git a/ompi/mpi/man/man3/MPI_Error_class.3in b/ompi/mpi/man/man3/MPI_Error_class.3in deleted file mode 100644 index 21397da88c1..00000000000 --- a/ompi/mpi/man/man3/MPI_Error_class.3in +++ /dev/null @@ -1,67 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Error_class 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Error_class \fP \- Converts an error code into an error class. - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -int MPI_Error_class(int \fIerrorcode\fP, int\fI *errorclass\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_ERROR_CLASS(\fIERRORCODE, ERRORCLASS, IERROR\fP) - INTEGER \fIERRORCODE, ERRORCLASS, IERROR\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Error_class(\fIerrorcode\fP, \fIerrorclass\fP, \fIierror\fP) - INTEGER, INTENT(IN) :: \fIerrorcode\fP - INTEGER, INTENT(OUT) :: \fIerrorclass\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETER -.ft R -.TP 1i -errorcode -Error code returned by an MPI routine. - -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -errorclass -Error class associated with errorcode. -.ft R -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -The function MPI_Error_class maps each standard error code (error class) onto itself. - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for I/O function errors. The error handler may be changed with MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN may be used to cause error values to be returned. Note that MPI does not guarantee that an MPI program can continue past an error. - -.SH SEE ALSO -.ft R -.sp -MPI_Error_string - - diff --git a/ompi/mpi/man/man3/MPI_Error_string.3in b/ompi/mpi/man/man3/MPI_Error_string.3in deleted file mode 100644 index 4bf346eea31..00000000000 --- a/ompi/mpi/man/man3/MPI_Error_string.3in +++ /dev/null @@ -1,75 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Error_string 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Error_string \fP \- Returns a string for a given error code. - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -int MPI_Error_string(int \fIerrorcode\fP, char\fI *string\fP, int\fI *resultlen\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_ERROR_STRING(\fIERRORCODE, STRING, RESULTLEN, IERROR\fP) - INTEGER \fIERRORCODE, RESULTLEN, IERROR\fP - CHARACTER*(*) \fISTRING\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Error_string(\fIerrorcode\fP, \fIstring\fP, \fIresultlen\fP, \fIierror\fP) - INTEGER, INTENT(IN) :: \fIerrorcode\fP - CHARACTER(LEN=MPI_MAX_ERROR_STRING), INTENT(OUT) :: \fIstring\fP - INTEGER, INTENT(OUT) :: \fIresultlen\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETER -.ft R -.TP 1i -errorcode -Error code returned by an MPI routine or an MPI error class. - -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -string -Text that corresponds to the errorcode. -.TP 1i -resultlen -Length of string. -.ft R -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -Returns the error string associated with an error code or class. The argument string must represent storage that is at least MPI_MAX_ERROR_STRING characters long. -.sp -The number of characters actually written is returned in the output -argument, resultlen. - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for I/O function errors. The error handler may be changed with MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN may be used to cause error values to be returned. Note that MPI does not guarantee that an MPI program can continue past an error. - -.SH SEE ALSO -.ft R -.sp -MPI_Error_class - - diff --git a/ompi/mpi/man/man3/MPI_Exscan.3in b/ompi/mpi/man/man3/MPI_Exscan.3in deleted file mode 100644 index c550f8625ea..00000000000 --- a/ompi/mpi/man/man3/MPI_Exscan.3in +++ /dev/null @@ -1,184 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2013 Los Alamos National Security, LLC. All rights reserved. -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Exscan 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" - -.SH NAME -\fBMPI_Exscan, MPI_Iexscan\fP \- Computes an exclusive scan (partial reduction) - -.SH SYNTAX -.ft R - -.SH C Syntax -.nf -#include -int MPI_Exscan(const void *\fIsendbuf\fP, void *\fIrecvbuf\fP, int \fIcount\fP, - MPI_Datatype \fIdatatype\fP, MPI_Op \fIop\fP, MPI_Comm \fIcomm\fP) - -int MPI_Iexscan(const void *\fIsendbuf\fP, void *\fIrecvbuf\fP, int \fIcount\fP, - MPI_Datatype \fIdatatype\fP, MPI_Op \fIop\fP, MPI_Comm \fIcomm\fP, - MPI_Request \fI*request\fP) - -int MPI_Exscan_init(const void *\fIsendbuf\fP, void *\fIrecvbuf\fP, int \fIcount\fP, - MPI_Datatype \fIdatatype\fP, MPI_Op \fIop\fP, MPI_Comm \fIcomm\fP, - MPI_Info \fIinfo\fP, MPI_Request \fI*request\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_EXSCAN(\fISENDBUF, RECVBUF, COUNT, DATATYPE, OP, COMM, IERROR\fP) - \fISENDBUF(*), RECVBUF(*)\fP - INTEGER \fICOUNT, DATATYPE, OP, COMM, IERROR\fP - -MPI_IEXSCAN(\fISENDBUF, RECVBUF, COUNT, DATATYPE, OP, COMM, REQUEST, IERROR\fP) - \fISENDBUF(*), RECVBUF(*)\fP - INTEGER \fICOUNT, DATATYPE, OP, COMM, REQUEST, IERROR\fP - -MPI_EXSCAN_INIT(\fISENDBUF, RECVBUF, COUNT, DATATYPE, OP, COMM, INFO, REQUEST, IERROR\fP) - \fISENDBUF(*), RECVBUF(*)\fP - INTEGER \fICOUNT, DATATYPE, OP, COMM, INFO, REQUEST, IERROR\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Exscan(\fIsendbuf\fP, \fIrecvbuf\fP, \fIcount\fP, \fIdatatype\fP, \fIop\fP, \fIcomm\fP, \fIierror\fP) - TYPE(*), DIMENSION(..), INTENT(IN) :: \fIsendbuf\fP - TYPE(*), DIMENSION(..) :: \fIrecvbuf\fP - INTEGER, INTENT(IN) :: \fIcount\fP - TYPE(MPI_Datatype), INTENT(IN) :: \fIdatatype\fP - TYPE(MPI_Op), INTENT(IN) :: \fIop\fP - TYPE(MPI_Comm), INTENT(IN) :: \fIcomm\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -MPI_Iexscan(\fIsendbuf\fP, \fIrecvbuf\fP, \fIcount\fP, \fIdatatype\fP, \fIop\fP, \fIcomm\fP, \fIrequest\fP, \fIierror\fP) - TYPE(*), DIMENSION(..), INTENT(IN), ASYNCHRONOUS :: \fIsendbuf\fP - TYPE(*), DIMENSION(..), ASYNCHRONOUS :: \fIrecvbuf\fP - INTEGER, INTENT(IN) :: \fIcount\fP - TYPE(MPI_Datatype), INTENT(IN) :: \fIdatatype\fP - TYPE(MPI_Op), INTENT(IN) :: \fIop\fP - TYPE(MPI_Comm), INTENT(IN) :: \fIcomm\fP - TYPE(MPI_Request), INTENT(OUT) :: \fIrequest\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -MPI_Exscan_init(\fIsendbuf\fP, \fIrecvbuf\fP, \fIcount\fP, \fIdatatype\fP, \fIop\fP, \fIcomm\fP, \fIinfo\fP, \fIrequest\fP, \fIierror\fP) - TYPE(*), DIMENSION(..), INTENT(IN), ASYNCHRONOUS :: \fIsendbuf\fP - TYPE(*), DIMENSION(..), ASYNCHRONOUS :: \fIrecvbuf\fP - INTEGER, INTENT(IN) :: \fIcount\fP - TYPE(MPI_Datatype), INTENT(IN) :: \fIdatatype\fP - TYPE(MPI_Op), INTENT(IN) :: \fIop\fP - TYPE(MPI_Comm), INTENT(IN) :: \fIcomm\fP - TYPE(MPI_Info), INTENT(IN) :: \fIinfo\fP - TYPE(MPI_Request), INTENT(OUT) :: \fIrequest\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETERS -.ft R -.TP 1i -sendbuf -Send buffer (choice). -.TP 1i -count -Number of elements in input buffer (integer). -.TP 1i -datatype -Data type of elements of input buffer (handle). -.TP 1i -op -Operation (handle). -.TP 1i -comm -Communicator (handle). - -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -recvbuf -Receive buffer (choice). -.TP 1i -request -Request (handle, non-blocking only). -.ft R -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -MPI_Exscan is used to perform an exclusive prefix reduction on data -distributed across the calling processes. The operation returns, in -the \fIrecvbuf\fP of the process with rank i, the reduction -(calculated according to the function \fIop\fP) of the values in the -\fIsendbuf\fPs of processes with ranks 0, ..., i-1. Compare this with -the functionality of MPI_Scan, which calculates over the range 0, ..., -i (inclusive). The type of operations supported, their semantics, and -the constraints on send and receive buffers are as for MPI_Reduce. -.sp -The value in \fIrecvbuf\fP on process 0 is undefined and unreliable -as \fIrecvbuf\fP is not significant for process 0. The value of -\fIrecvbuf\fP on process 1 is always the value in \fIsendbuf\fP on -process 0. -.sp -.SH USE OF IN-PLACE OPTION -The `in place' option for intracommunicators is specified by passing MPI_IN_PLACE in the \fIsendbuf\fP argument. In this case, the input data is taken from the receive buffer, and replaced by the output data. -.sp -Note that MPI_IN_PLACE is a special kind of value; it has the same restrictions on its use as MPI_BOTTOM. -.sp -Because the in-place option converts the receive buffer into a send-and-receive buffer, a Fortran binding that includes INTENT must mark these as INOUT, not OUT. -.sp - -.SH NOTES -.ft R -MPI does not specify which process computes which operation. In -particular, both processes 0 and 1 may participate in the computation -even though the results for both processes' \fIrecvbuf\fP are -degenerate. Therefore, all processes, including 0 and 1, must provide -the same \fIop\fP. -.sp -It can be argued, from a mathematical perspective, that the definition -of MPI_Exscan is unsatisfactory because the output at process 0 is -undefined. The "mathematically correct" output for process 0 would be -the unit element of the reduction operation. However, such a -definition of an exclusive scan would not work with user-defined -\fIop\fP functions as there is no way for MPI to "know" the unit value -for these custom operations. - -.SH NOTES ON COLLECTIVE OPERATIONS -.ft R -The reduction functions of type MPI_Op do not return an error value. -As a result, if the functions detect an error, all they can do is -either call MPI_Abort or silently skip the problem. Thus, if the -error handler is changed from MPI_ERRORS_ARE_FATAL to something else -(e.g., MPI_ERRORS_RETURN), then no error may be indicated. -.sp -The reason for this is the performance problems in ensuring that -all collective routines return the same error value. - -.SH ERRORS -.ft R -Almost all MPI routines return an error value; C routines as -the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for -I/O function errors. The error handler may be changed with -MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN -may be used to cause error values to be returned. Note that MPI does not -guarantee that an MPI program can continue past an error. -.sp -See the MPI man page for a full list of MPI error codes. - -.SH SEE ALSO -.ft R -.nf -MPI_Op_create -MPI_Reduce -MPI_Scan - diff --git a/ompi/mpi/man/man3/MPI_Exscan_init.3in b/ompi/mpi/man/man3/MPI_Exscan_init.3in deleted file mode 100644 index c2ff4cf3254..00000000000 --- a/ompi/mpi/man/man3/MPI_Exscan_init.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/MPI_Exscan.3 diff --git a/ompi/mpi/man/man3/MPI_Fetch_and_op.3in b/ompi/mpi/man/man3/MPI_Fetch_and_op.3in deleted file mode 100644 index fc380a3aef7..00000000000 --- a/ompi/mpi/man/man3/MPI_Fetch_and_op.3in +++ /dev/null @@ -1,124 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2013-2015 Los Alamos National Security, LLC. All rights reserved. -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" $COPYRIGHT$ -.TH MPI_Fetch_and_op 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Fetch_and_op\fP \- Combines the contents of the origin buffer with that of a target buffer and returns the target buffer value. - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -int MPI_Fetch_and_op(const void *\fIorigin_addr\fP, void *\fIresult_addr\fP, - MPI_Datatype \fIdatatype\fP, int \fItarget_rank\fP, MPI_Aint \fItarget_disp\fP, - MPI_Op \fIop\fP, MPI_Win \fIwin\fP) - -.fi -.SH Fortran Syntax (see FORTRAN 77 NOTES) -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_FETCH_AND_OP(\fIORIGIN_ADDR, RESULT_ADDR, DATATYPE, TARGET_RANK, - TARGET_DISP, OP, WIN, IERROR\fP) - \fIORIGIN_ADDR\fP, \fIRESULT_ADDR\fP(*) - INTEGER(KIND=MPI_ADDRESS_KIND) \fITARGET_DISP\fP - INTEGER \fIDATATYPE, TARGET_RANK, OP, WIN, IERROR \fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Fetch_and_op(\fIorigin_addr\fP, \fIresult_addr\fP, \fIdatatype\fP, \fItarget_rank\fP, - \fItarget_disp\fP, \fIop\fP, \fIwin\fP, \fIierror\fP) - TYPE(*), DIMENSION(..), INTENT(IN) :: \fIorigin_addr\fP - TYPE(*), DIMENSION(..) :: \fIresult_addr\fP - TYPE(MPI_Datatype), INTENT(IN) :: \fIdatatype\fP - INTEGER, INTENT(IN) :: \fItarget_rank\fP - INTEGER(KIND=MPI_ADDRESS_KIND), INTENT(IN) :: \fItarget_disp\fP - TYPE(MPI_Op), INTENET(IN) :: \fIop\fP - TYPE(MPI_Win), INTENT(IN) :: \fIwin\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETERS -.ft R -.TP 1i -origin_addr -Initial address of buffer (choice). -.ft R -.TP -result_addr -Initial address of result buffer (choice). -.ft R -.TP -datatype -Data type of the entry in origin, result, and target buffers (handle). -.ft R -.TP 1i -target_rank -Rank of target (nonnegative integer). -.ft R -.TP 1i -target_disp -Displacement from start of window to beginning of target buffer (nonnegative integer). -.ft R -.TP 1i -op -Reduce operation (handle). -.ft R -.TP 1i -win -Window object (handle). - -.SH OUTPUT PARAMETER -.ft R -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -Accumulate one element of type \fIdatatype\fP from the origin buffer (\fIorigin_addr\fP) to the buffer at offset \fItarget_disp\fP, in the target window specified by \fItarget_rank\fP and \fIwin\fP, using the operation \fIop\fP and return in the result buffer \fIresult_addr\fP the contents of the target buffer before the accumulation. -.sp -The origin and result buffers (\fIorigin_addr\fP and \fIresult_addr\fP) must be disjoint. Any of the predefined operations for \fBMPI_Rreduce\fP, as well as MPI_NO_OP or MPI_REPLACE, can be specified as \fIop\fP; user-defined functions cannot be used. The \fIdatatype\fP argument must be a predefined datatype. The operation is executed atomically. -.sp -A new predefined operation, MPI_REPLACE, is defined. It corresponds to the associative function f(a, b) =b; that is, the current value in the target memory is replaced by the value supplied by the origin. -.sp -A new predefined operation, MPI_NO_OP, is defined. It corresponds to the assiciative function f(a, b) = a; that is the current value in the target memory is returned in the result buffer at the origin and no operation is performed on the target buffer. - -.SH FORTRAN 77 NOTES -.ft R -The MPI standard prescribes portable Fortran syntax for -the \fITARGET_DISP\fP argument only for Fortran 90. FORTRAN 77 -users may use the non-portable syntax -.sp -.nf - INTEGER*MPI_ADDRESS_KIND \fITARGET_DISP\fP -.fi -.sp -where MPI_ADDRESS_KIND is a constant defined in mpif.h -and gives the length of the declared integer in bytes. - -.SH NOTES -It is the user's responsibility to guarantee that, when -using the accumulate functions, the target displacement argument is such -that accesses to the window are properly aligned according to the data -type arguments in the call to the MPI_Fetch_and_op function. - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for I/O function errors. The error handler -may be changed with \fBMPI_Comm_set_errhandler\fP; the predefined error handler MPI_ERRORS_RETURN may be used to cause error values to be returned. Note that MPI does not guarantee that an MPI program can continue past an error. - -.SH SEE ALSO -.ft R -.sp -MPI_Get_accumulate -.br -MPI_Reduce diff --git a/ompi/mpi/man/man3/MPI_File_c2f.3in b/ompi/mpi/man/man3/MPI_File_c2f.3in deleted file mode 100644 index a13fce697dd..00000000000 --- a/ompi/mpi/man/man3/MPI_File_c2f.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/MPI_Comm_f2c.3 diff --git a/ompi/mpi/man/man3/MPI_File_call_errhandler.3in b/ompi/mpi/man/man3/MPI_File_call_errhandler.3in deleted file mode 100644 index ecf27696f5b..00000000000 --- a/ompi/mpi/man/man3/MPI_File_call_errhandler.3in +++ /dev/null @@ -1,77 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_File_call_errhandler 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" - -.SH NAME -\fBMPI_File_call_errhandler\fP \- Passes the supplied error code to the -error handler assigned to a file - -.SH SYNTAX -.ft R - -.SH C Syntax -.nf -#include -int MPI_File_call_errhandler(MPI_File \fIfh\fP, int \fIerrorcode\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_FILE_CALL_ERRHANDLER(\fIFH, ERRORCODE, IERROR\fP) - INTEGER \fIFH, IERRORCODE, IERROR\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_File_call_errhandler(\fIfh\fP, \fIerrorcode\fP, \fIierror\fP) - TYPE(MPI_File), INTENT(IN) :: \fIfh\fP - INTEGER, INTENT(IN) :: \fIerrorcode\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETERS -.ft R -.TP 1.4i -fh -file with error handler (handle). -.ft R -.TP 1.4i -errorcode -MPI error code (integer). - -.SH OUTPUT PARAMETER -.ft R -.TP 1.4i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -This function invokes the error handler assigned to the file handle -\fIfh\fP with the supplied error code \fIerrorcode\fP. If the error -handler was successfully called, the process is not aborted, and the -error handler returns, this function returns MPI_SUCCESS. -.sp -Unlike errors on communicators and windows, the default errorhandler -for files is MPI_ERRORS_RETURN. - -.SH ERRORS -.ft R -Almost all MPI routines return an error value; C routines as -the value of the function and Fortran routines in the last argument. -.sp -See the MPI man page for a full list of MPI error codes. - -.SH SEE ALSO -.ft R -.nf -MPI_File_create_errhandler -MPI_File_set_errhandler - diff --git a/ompi/mpi/man/man3/MPI_File_close.3in b/ompi/mpi/man/man3/MPI_File_close.3in deleted file mode 100644 index 0f1b2a9f9d1..00000000000 --- a/ompi/mpi/man/man3/MPI_File_close.3in +++ /dev/null @@ -1,66 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright 2015-2016 Research Organization for Information Science -.\" and Technology (RIST). All rights reserved. -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_File_close 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_File_close\fP \- Closes a file (collective). - -.SH SYNTAX -.ft R -.nf -.SH C Syntax -.nf -#include -int MPI_File_close(MPI_File \fI*fh\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_FILE_CLOSE(\fIFH\fP, \fIIERROR\fP) - INTEGER \fIFH, IERROR\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_File_close(\fIfh\fP, \fIierror\fP) - TYPE(MPI_File), INTENT(INOUT) :: \fIfh\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT/OUTPUT PARAMETER -.ft R -.TP 1i -fh -File handle (handle). - -.SH OUTPUT PARAMETER -.ft R -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -MPI_File_close first synchronizes file state, then closes the file -associated with -.I fh. -MPI_File_close is a collective routine. The user is responsible for -ensuring that all outstanding requests associated with -.I fh -have completed before calling MPI_File_close. - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. For MPI I/O function errors, the default error handler is set to MPI_ERRORS_RETURN. The error handler may be changed with MPI_File_set_errhandler; the predefined error handler MPI_ERRORS_ARE_FATAL may be used to make I/O errors fatal. Note that MPI does not guarantee that an MPI program can continue past an error. - - diff --git a/ompi/mpi/man/man3/MPI_File_create_errhandler.3in b/ompi/mpi/man/man3/MPI_File_create_errhandler.3in deleted file mode 100644 index c5cabc644c4..00000000000 --- a/ompi/mpi/man/man3/MPI_File_create_errhandler.3in +++ /dev/null @@ -1,89 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright 2009-2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_File_create_errhandler 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_File_create_errhandler \fP \- Creates an MPI-style error handler that can be attached to a file. - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -int MPI_File_create_errhandler(MPI_File_errhandler_function \fI*function\fP, - MPI_Errhandler \fI*errhandler\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_FILE_CREATE_ERRHANDLER(\fIFUNCTION, ERRHANDLER, IERROR\fP) - EXTERNAL \fIFUNCTION\fP - INTEGER \fIERRHANDLER, IERROR\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_File_create_errhandler(\fIfile_errhandler_fn\fP, \fIerrhandler\fP, \fIierror\fP) - PROCEDURE(MPI_File_errhandler_function) :: \fIfile_errhandler_fn\fP - TYPE(MPI_Errhandler), INTENT(OUT) :: \fIerrhandler\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH DEPRECATED TYPE NAME NOTE -.ft R -MPI-2.2 deprecated the MPI_File_errhandler_fn and -MPI::file::Errhandler_fn types in favor of -MPI_File_errhandler_function and MPI::File::Errhandler_function, -respectively. Open MPI supports both names (indeed, the _fn names are -typedefs to the _function names). - -.SH INPUT PARAMETER -.ft R -.TP 1i -function -User-defined error handling procedure (function). - -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -errhandler -MPI error handler (handle). -.ft R -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -Registers the user routine \fIfunction\fP for use as an MPI error handler. Returns in errhandler a handle to the registered error handler. -.sp -In the C language, the user routine \fIfunction\fP should be a C function of type MPI_File_errhandler_function, which is defined as -.sp -.nf - typedef void (MPI_File_errhandler_function)(MPI_File *, int *, - \&...); -.fi -.sp -The first argument to \fIfunction\fP is the file in use. The second is the error code -to be returned by the MPI routine that raised the error. -.sp -In the Fortran language, the user routine should be of the form: -.sp -.nf - SUBROUTINE FILE_ERRHANDLER_FUNCTION(FILE, ERROR_CODE, ...) - INTEGER FILE, ERROR_CODE -.fi - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. For MPI I/O function errors, the default error handler is set to MPI_ERRORS_RETURN. The error handler may be changed with MPI_File_set_errhandler; the predefined error handler MPI_ERRORS_ARE_FATAL may be used to make I/O errors fatal. Note that MPI does not guarantee that an MPI program can continue past an error. - - diff --git a/ompi/mpi/man/man3/MPI_File_delete.3in b/ompi/mpi/man/man3/MPI_File_delete.3in deleted file mode 100644 index 72d9cee190f..00000000000 --- a/ompi/mpi/man/man3/MPI_File_delete.3in +++ /dev/null @@ -1,68 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2013 Los Alamos National Security, LLC. All rights reserved. -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright 2015-2016 Research Organization for Information Science -.\" and Technology (RIST). All rights reserved. -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_File_delete 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_File_delete\fP \- Deletes a file. - -.SH SYNTAX -.ft R -.nf -.SH C Syntax -.nf -#include -int MPI_File_delete(const char \fI*filename\fP, MPI_Info \fIinfo\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_FILE_DELETE(\fIFILENAME\fP, \fIINFO\fP, \fIIERROR\fP) - CHARACTER*(*) \fIFILENAME\fP - INTEGER \fIINFO, IERROR\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_File_delete(\fIfilename\fP, \fIinfo\fP, \fIierror\fP) - CHARACTER(LEN=*), INTENT(IN) :: \fIfilename\fP - TYPE(MPI_Info), INTENT(IN) :: \fIinfo\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETERS -.ft R -.TP 1i -filename -Name of file to delete (string). -.TP 1i -info -Info object (handle). - -.SH OUTPUT PARAMETER -.ft R -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -MPI_File_delete deletes the file identified by the file name -\fIfilename\fP, provided it is not currently open by any process. It is an error to delete the file with MPI_File_delete if some process has it open, but MPI_File_delete does not check this. If the file does not exist, MPI_File_delete returns an error in the class MPI_ERR_NO_SUCH_FILE. -.sp - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. For MPI I/O function errors, the default error handler is set to MPI_ERRORS_RETURN. The error handler may be changed with MPI_File_set_errhandler; the predefined error handler MPI_ERRORS_ARE_FATAL may be used to make I/O errors fatal. Note that MPI does not guarantee that an MPI program can continue past an error. - - diff --git a/ompi/mpi/man/man3/MPI_File_f2c.3in b/ompi/mpi/man/man3/MPI_File_f2c.3in deleted file mode 100644 index f2a362310d8..00000000000 --- a/ompi/mpi/man/man3/MPI_File_f2c.3in +++ /dev/null @@ -1,2 +0,0 @@ -.\" -*- nroff -*- -.so man3/MPI_Comm_f2c.3 diff --git a/ompi/mpi/man/man3/MPI_File_get_amode.3in b/ompi/mpi/man/man3/MPI_File_get_amode.3in deleted file mode 100644 index 8100dadba93..00000000000 --- a/ompi/mpi/man/man3/MPI_File_get_amode.3in +++ /dev/null @@ -1,67 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright 2015-2016 Research Organization for Information Science -.\" and Technology (RIST). All rights reserved. -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_File_get_amode 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_File_get_amode\fP \- Returns access mode associated with an open file. - -.SH SYNTAX -.ft R -.nf -.SH C Syntax -.nf -#include -int MPI_File_get_amode(MPI_File \fIfh\fP, int \fI*amode\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_FILE_GET_AMODE(\fIFH\fP, \fIAMODE\fP, \fIIERROR\fP) - INTEGER \fIFH, AMODE, IERROR\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_File_get_amode(\fIfh\fP, \fIamode\fP, \fIierror\fP) - TYPE(MPI_File), INTENT(IN) :: \fIfh\fP - INTEGER, INTENT(OUT) :: \fIamode\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETER -.ft R -.TP 1i -fh -File handle (handle). - -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -amode -File access mode used to open the file (integer). -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R - -MPI_File_get_amode returns, in -.I amode, -the access mode associated with the open file -.I fh. - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. For MPI I/O function errors, the default error handler is set to MPI_ERRORS_RETURN. The error handler may be changed with MPI_File_set_errhandler; the predefined error handler MPI_ERRORS_ARE_FATAL may be used to make I/O errors fatal. Note that MPI does not guarantee that an MPI program can continue past an error. - diff --git a/ompi/mpi/man/man3/MPI_File_get_atomicity.3in b/ompi/mpi/man/man3/MPI_File_get_atomicity.3in deleted file mode 100644 index 3d9631cd740..00000000000 --- a/ompi/mpi/man/man3/MPI_File_get_atomicity.3in +++ /dev/null @@ -1,72 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright 2015-2016 Research Organization for Information Science -.\" and Technology (RIST). All rights reserved. -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_File_get_atomicity 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_File_get_atomicity\fP \- Returns current consistency semantics for data-access operations. - -.SH SYNTAX -.ft R -.nf -.SH C Syntax -.nf -#include -int MPI_File_get_atomicity(MPI_File \fIfh\fP, int \fI*flag\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_FILE_GET_ATOMICITY(\fIFH\fP, \fIFLAG\fP, \fIIERROR\fP) - INTEGER \fIFH, IERROR\fP - LOGICAL \fIFLAG\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_File_get_atomicity(\fIfh\fP, \fIflag\fP, \fIierror\fP) - TYPE(MPI_File), INTENT(IN) :: \fIfh\fP - LOGICAL, INTENT(OUT) :: \fIflag\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETER -.ft R -.TP 1i -fh -File handle (handle). - -.SH OUTPUT PARAMETER -.ft R -.TP 1i -flag -true if atomic mode is enabled, false if nonatomic mode is enabled (boolean). -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -MPI_File_get_atomicity returns the current consistency semantics for -data access operations on the set of file handles created by one -collective MPI_File_open. If \fIflag\fP is -.I true, -atomic mode is currently enabled; if -.I flag -is -.I false, -nonatomic mode is currently enabled. - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. For MPI I/O function errors, the default error handler is set to MPI_ERRORS_RETURN. The error handler may be changed with MPI_File_set_errhandler; the predefined error handler MPI_ERRORS_ARE_FATAL may be used to make I/O errors fatal. Note that MPI does not guarantee that an MPI program can continue past an error. - diff --git a/ompi/mpi/man/man3/MPI_File_get_byte_offset.3in b/ompi/mpi/man/man3/MPI_File_get_byte_offset.3in deleted file mode 100644 index 67bd05731a3..00000000000 --- a/ompi/mpi/man/man3/MPI_File_get_byte_offset.3in +++ /dev/null @@ -1,86 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright 2015-2016 Research Organization for Information Science -.\" and Technology (RIST). All rights reserved. -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_File_get_byte_offset 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_File_get_byte_offset\fP \- Converts a view-relative offset into an absolute byte position. - -.SH SYNTAX -.ft R -.nf -.SH C Syntax -.nf -#include -int MPI_File_get_byte_offset(MPI_File \fIfh\fP, MPI_Offset \fIoffset\fP, - MPI_Offset \fI*disp\fP) - -.fi -.SH Fortran Syntax (see FORTRAN 77 NOTES) -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_FILE_GET_BYTE_OFFSET(\fIFH\fP, \fIOFFSET\fP, \fIDISP\fP, \fIIERROR\fP) - INTEGER \fIFH, IERROR\fP - INTEGER(KIND=MPI_OFFSET_KIND) \fIOFFSET, DISP\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_File_get_byte_offset(\fIfh\fP, \fIoffset\fP, \fIdisp\fP, \fIierror\fP) - TYPE(MPI_File), INTENT(IN) :: \fIfh\fP - INTEGER(KIND=MPI_OFFSET_KIND), INTENT(IN) :: \fIoffset\fP - INTEGER(KIND=MPI_OFFSET_KIND), INTENT(OUT) :: \fIdisp\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETERS -.ft R -.TP 1i -fh -File handle (handle). -.ft R -.TP 1i -offset -Offset (integer). - -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -disp -Absolute byte position of offset (integer). -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -MPI_File_get_byte_offset converts an offset specified for the current view to its corresponding displacement value, or absolute byte position, from the beginning of the file. The absolute byte position of \fIoffset\fP relative to the current view of \fIfh\fP is returned in \fIdisp\fP. - -.SH FORTRAN 77 NOTES -.ft R -The MPI standard prescribes portable Fortran syntax for -the \fIOFFSET\fP and \fIDISP\fP arguments only for Fortran 90. Sun FORTRAN 77 -users may use the non-portable syntax -.sp -.nf - INTEGER*MPI_OFFSET_KIND \fIOFFSET\fP -or - INTEGER*MPI_OFFSET_KIND \fIDISP\fP -.fi -.sp -where MPI_OFFSET_KIND is a constant defined in mpif.h -and gives the length of the declared integer in bytes. - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. For MPI I/O function errors, the default error handler is set to MPI_ERRORS_RETURN. The error handler may be changed with MPI_File_set_errhandler; the predefined error handler MPI_ERRORS_ARE_FATAL may be used to make I/O errors fatal. Note that MPI does not guarantee that an MPI program can continue past an error. - - diff --git a/ompi/mpi/man/man3/MPI_File_get_errhandler.3in b/ompi/mpi/man/man3/MPI_File_get_errhandler.3in deleted file mode 100644 index 1f26f9e5b47..00000000000 --- a/ompi/mpi/man/man3/MPI_File_get_errhandler.3in +++ /dev/null @@ -1,64 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright 2016 Research Organization for Information Science -.\" and Technology (RIST). All rights reserved. -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_File_get_errhandler 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_File_get_errhandler \fP \- Gets the error handler for a file. - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -int MPI_File_get_errhandler(MPI_File \fIfile\fP, MPI_Errhandler\fI - *errhandler\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_FILE_GET_ERRHANDLER(\fIFILE, ERRHANDLER, IERROR\fP) - INTEGER \fIFILE, ERRHANDLER, IERROR\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_File_get_errhandler(\fIfile\fP, \fIerrhandler\fP, \fIierror\fP) - TYPE(MPI_File), INTENT(IN) :: \fIfile\fP - TYPE(MPI_Errhandler), INTENT(OUT) :: \fIerrhandler\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETER -.ft R -.TP 1i -file -File (handle). - -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -errhandler -MPI error handler currently associated with file (handle). -.ft R -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -Returns in \fIerrhandler\fP (a handle to) the error handler that is currently associated with file \fIfile\fP. - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. For MPI I/O function errors, the default error handler is set to MPI_ERRORS_RETURN. The error handler may be changed with MPI_File_set_errhandler; the predefined error handler MPI_ERRORS_ARE_FATAL may be used to make I/O errors fatal. Note that MPI does not guarantee that an MPI program can continue past an error. - diff --git a/ompi/mpi/man/man3/MPI_File_get_group.3in b/ompi/mpi/man/man3/MPI_File_get_group.3in deleted file mode 100644 index cd945151d98..00000000000 --- a/ompi/mpi/man/man3/MPI_File_get_group.3in +++ /dev/null @@ -1,69 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright 2015-2016 Research Organization for Information Science -.\" and Technology (RIST). All rights reserved. -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_File_get_group 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_File_get_group\fP \- Returns a duplicate of the process group of a file. - -.SH SYNTAX -.ft R -.nf -.SH C Syntax -.nf -#include -int MPI_File_get_group(MPI_File \fIfh\fP, MPI_Group \fI*group\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_FILE_GET_GROUP(\fIFH\fP, \fIGROUP\fP, \fIIERROR\fP) - INTEGER \fIFH, GROUP, IERROR\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_File_get_group(\fIfh\fP, \fIgroup\fP, \fIierror\fP) - TYPE(MPI_File), INTENT(IN) :: \fIfh\fP - TYPE(MPI_Group), INTENT(OUT) :: \fIgroup\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETER -.ft R -.TP 1i -fh -File handle (handle). - -.SH OUTPUT PARAMETERS -.TP 1i -group -Group that opened the file (handle). -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -MPI_File_get_group returns a duplicate of the group of the communicator -used to open the file associated with -.I fh. -The group is returned in -.I group. -The user is responsible for freeing -.I group, -using MPI_Group_free. - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. For MPI I/O function errors, the default error handler is set to MPI_ERRORS_RETURN. The error handler may be changed with MPI_File_set_errhandler; the predefined error handler MPI_ERRORS_ARE_FATAL may be used to make I/O errors fatal. Note that MPI does not guarantee that an MPI program can continue past an error. - diff --git a/ompi/mpi/man/man3/MPI_File_get_info.3in b/ompi/mpi/man/man3/MPI_File_get_info.3in deleted file mode 100644 index d97cefa4c7c..00000000000 --- a/ompi/mpi/man/man3/MPI_File_get_info.3in +++ /dev/null @@ -1,105 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright 2015-2016 Research Organization for Information Science -.\" and Technology (RIST). All rights reserved. -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_File_get_info 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_File_get_info\fP \- Returns a new info object containing values for current hints associated with a file. - - -.SH SYNTAX -.ft R -.nf -.SH C Syntax -.nf -#include -int MPI_File_get_info(MPI_File \fIfh\fP, MPI_Info \fI*info_used\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_FILE_GET_INFO(\fIFH\fP, \fIINFO_USED\fP, \fIIERROR\fP) - INTEGER \fIFH, INFO_USED, IERROR\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_File_get_info(\fIfh\fP, \fIinfo_used\fP, \fIierror\fP) - TYPE(MPI_File), INTENT(IN) :: \fIfh\fP - TYPE(MPI_Info), INTENT(OUT) :: \fIinfo_used\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETER -.ft R -.TP 1i -fh -File handle (handle). - -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -info_used -New info object (handle). -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -MPI_File_get_info returns a new info object containing all the hints that the system currently associates with the file \fIfh\fP. The current setting of all hints actually used by the system related to this open file is returned in \fIinfo_used\fP. The user is responsible for freeing \fIinfo_used\fP via MPI_Info_free. - -Note that the set of hints returned in \fIinfo_used\fP may be greater or smaller than the set of hints passed in to MPI_File_open, MPI_File_set_view, and MPI_File_set_info, as the system may not recognize some hints set by the user, and may automatically set other hints that the user has not requested to be set. See the HINTS section for a list of hints that can be set. - -.SH HINTS -.ft R -The following hints can be used as values for the \fIinfo_used\fP argument. -.sp -SETTABLE HINTS: -.sp -- shared_file_timeout: Amount of time (in seconds) to wait for access to the -shared file pointer before exiting with MPI_ERR_TIMEDOUT. -.sp -- rwlock_timeout: Amount of time (in seconds) to wait for obtaining a read or -write lock on a contiguous chunk of a UNIX file before exiting with MPI_ERR_TIMEDOUT. -.sp -- noncoll_read_bufsize: Maximum size of the buffer used by -MPI I/O to satisfy read requests in -the noncollective data-access routines. (See NOTE, below.) -.sp -- noncoll_write_bufsize: Maximum size of the buffer used by -MPI I/O to satisfy write requests in -the noncollective data-access routines. (See NOTE, below.) -.sp -- coll_read_bufsize: Maximum size of the buffer used by MPI -I/O to satisfy read requests in the -collective data-access routines. (See NOTE, below.) -.sp -- coll_write_bufsize: Maximum size of the buffer used by MPI -I/O to satisfy write requests in the -collective data-access routines. (See NOTE, below.) -.sp -NOTE: A buffer size smaller than the distance (in bytes) in a UNIX file between the first byte and the last byte of the access request causes MPI I/O to iterate and perform multiple UNIX read() or write() calls. If the request includes multiple noncontiguous chunks of data, and the buffer size is greater than the size of those chunks, then the UNIX read() or write() (made at the MPI I/O level) will access data not requested by this process in order to reduce the total number of write() calls made. If this is not desirable behavior, you should reduce this buffer size to equal the size of the contiguous chunks within the aggregate request. -.sp -- mpiio_concurrency: (boolean) controls whether nonblocking -I/O routines can bind an extra thread to an LWP. -.sp -- mpiio_coll_contiguous: (boolean) controls whether subsequent collective data accesses will request collectively contiguous regions of the file. -.sp -NON-SETTABLE HINTS: -.sp -- filename: Access this hint to get the name of the file. - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. For MPI I/O function errors, the default error handler is set to MPI_ERRORS_RETURN. The error handler may be changed with MPI_File_set_errhandler; the predefined error handler MPI_ERRORS_ARE_FATAL may be used to make I/O errors fatal. Note that MPI does not guarantee that an MPI program can continue past an error. - diff --git a/ompi/mpi/man/man3/MPI_File_get_position.3in b/ompi/mpi/man/man3/MPI_File_get_position.3in deleted file mode 100644 index 6405a62baf4..00000000000 --- a/ompi/mpi/man/man3/MPI_File_get_position.3in +++ /dev/null @@ -1,81 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright 2015-2016 Research Organization for Information Science -.\" and Technology (RIST). All rights reserved. -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_File_get_position 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_File_get_position\fP \- Returns the current position of the individual file pointer. - -.SH SYNTAX -.ft R -.nf -.SH C Syntax -.nf -#include -int MPI_File_get_position(MPI_File \fIfh\fP, MPI_Offset \fI*offset\fP) - -.fi -.SH Fortran Syntax (see FORTRAN 77 NOTES) -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_FILE_GET_POSITION(\fIFH\fP, \fIOFFSET\fP, \fIIERROR\fP) - INTEGER \fIFH, IERROR\fP - INTEGER(KIND=MPI_OFFSET_KIND) \fIOFFSET\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_File_get_position(\fIfh\fP, \fIoffset\fP, \fIierror\fP) - TYPE(MPI_File), INTENT(IN) :: \fIfh\fP - INTEGER(KIND=MPI_OFFSET_KIND), INTENT(OUT) :: \fIoffset\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETER -.ft R -.TP 1i -fh -File handle (handle). - -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -offset -Offset of the individual file pointer (integer). -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -MPI_File_get_position returns, in -.I offset, -the current position of the individual file pointer in -.I etype -units relative to the current displacement and file type. - -.SH FORTRAN 77 NOTES -.ft R -The MPI standard prescribes portable Fortran syntax for -the \fIOFFSET\fP argument only for Fortran 90. Sun FORTRAN 77 -users may use the non-portable syntax -.sp -.nf - INTEGER*MPI_OFFSET_KIND \fIOFFSET\fP -.fi -.sp -where MPI_ADDRESS_KIND is a constant defined in mpif.h -and gives the length of the declared integer in bytes. - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. For MPI I/O function errors, the default error handler is set to MPI_ERRORS_RETURN. The error handler may be changed with MPI_File_set_errhandler; the predefined error handler MPI_ERRORS_ARE_FATAL may be used to make I/O errors fatal. Note that MPI does not guarantee that an MPI program can continue past an error. - diff --git a/ompi/mpi/man/man3/MPI_File_get_position_shared.3in b/ompi/mpi/man/man3/MPI_File_get_position_shared.3in deleted file mode 100644 index 568ee4b3420..00000000000 --- a/ompi/mpi/man/man3/MPI_File_get_position_shared.3in +++ /dev/null @@ -1,82 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright 2015 Research Organization for Information Science -.\" and Technology (RIST). All rights reserved. -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" Copyright (c) 2020 FUJITSU LIMITED. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_File_get_position_shared 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_File_get_position_shared\fP \- Returns the current position of the shared file pointer. - -.SH SYNTAX -.ft R -.nf -.SH C Syntax -.nf -#include -int MPI_File_get_position_shared(MPI_File \fIfh\fP, MPI_Offset \fI*offset\fP) - -.fi -.SH Fortran Syntax (see FORTRAN 77 NOTES) -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_FILE_GET_POSITION_SHARED(\fIFH\fP, \fIOFFSET\fP, \fIIERROR\fP) - INTEGER \fIFH, IERROR\fP - INTEGER(KIND=MPI_OFFSET_KIND) \fIOFFSET\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_File_get_position_shared(\fIfh\fP, \fIoffset\fP, \fIierror\fP) - TYPE(MPI_File), INTENT(IN) :: \fIfh\fP - INTEGER(KIND=MPI_OFFSET_KIND), INTENT(OUT) :: \fIoffset\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETER -.ft R -.TP 1i -fh -File handle (handle). - -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -offset -Offset of the shared file pointer (integer). -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -MPI_File_get_position_shared returns, in -.I offset, -the current position of the shared file pointer in -.I etype -units relative to the current displacement and file type. - -.SH FORTRAN 77 NOTES -.ft R -The MPI standard prescribes portable Fortran syntax for -the \fIOFFSET\fP argument only for Fortran 90. Sun FORTRAN 77 -users may use the non-portable syntax -.sp -.nf - INTEGER*MPI_OFFSET_KIND \fIOFFSET\fP -.fi -.sp -where MPI_ADDRESS_KIND is a constant defined in mpif.h -and gives the length of the declared integer in bytes. - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. For MPI I/O function errors, the default error handler is set to MPI_ERRORS_RETURN. The error handler may be changed with MPI_File_set_errhandler; the predefined error handler MPI_ERRORS_ARE_FATAL may be used to make I/O errors fatal. Note that MPI does not guarantee that an MPI program can continue past an error. - diff --git a/ompi/mpi/man/man3/MPI_File_get_size.3in b/ompi/mpi/man/man3/MPI_File_get_size.3in deleted file mode 100644 index e13aae9823e..00000000000 --- a/ompi/mpi/man/man3/MPI_File_get_size.3in +++ /dev/null @@ -1,85 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright 2015-2016 Research Organization for Information Science -.\" and Technology (RIST). All rights reserved. -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_File_get_size 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_File_get_size\fP \- Returns the current size of the file. - -.SH SYNTAX -.ft R -.nf -.SH C Syntax -.nf -#include -int MPI_File_get_size(MPI_File \fIfh\fP, MPI_Offset \fI*size\fP) - -.fi -.SH Fortran Syntax (see FORTRAN 77 NOTES) -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_FILE_GET_SIZE(\fIFH\fP, \fISIZE\fP, \fIIERROR\fP) - INTEGER \fIFH, ERROR\fP - INTEGER(KIND=MPI_OFFSET_KIND) \fISIZE\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_File_get_size(\fIfh\fP, \fIsize\fP, \fIierror\fP) - TYPE(MPI_File), INTENT(IN) :: \fIfh\fP - INTEGER(KIND=MPI_OFFSET_KIND), INTENT(OUT) :: \fIsize\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETERS -.ft R -.TP 1i -fh -File handle (handle). -.TP 1i -size -Size of the file in bytes (integer). - -.SH OUTPUT PARAMETER -.ft R -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -MPI_File_get_size returns, in -.I size -, the current size in bytes of the file associated with the file handle -\fIfh\fP. Note that the file size returned by Solaris may not represent the number of bytes physically allocated for the file in those cases where all bytes in this file have not been written at least once. - -.SH FORTRAN 77 NOTES -.ft R -The MPI standard prescribes portable Fortran syntax for -the \fISIZE\fP argument only for Fortran 90. Sun FORTRAN 77 -users may use the non-portable syntax -.sp -.nf - INTEGER*MPI_OFFSET_KIND \fISIZE\fP -.fi -.sp -where MPI_ADDRESS_KIND is a constant defined in mpif.h -and gives the length of the declared integer in bytes. - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. For MPI I/O function errors, the default error handler is set to MPI_ERRORS_RETURN. The error handler may be changed with MPI_File_set_errhandler; the predefined error handler MPI_ERRORS_ARE_FATAL may be used to make I/O errors fatal. Note that MPI does not guarantee that an MPI program can continue past an error. - -.SH SEE ALSO -.br -MPI_File_preallocate -.br - diff --git a/ompi/mpi/man/man3/MPI_File_get_type_extent.3in b/ompi/mpi/man/man3/MPI_File_get_type_extent.3in deleted file mode 100644 index 2199129ed25..00000000000 --- a/ompi/mpi/man/man3/MPI_File_get_type_extent.3in +++ /dev/null @@ -1,88 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright 2015-2016 Research Organization for Information Science -.\" and Technology (RIST). All rights reserved. -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_File_get_type_extent 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_File_get_type_extent\fP \- Returns the extent of the data type in a file. - -.SH SYNTAX -.ft R -.nf -.SH C Syntax -.nf -#include -int MPI_File_get_type_extent(MPI_File \fIfh\fP, MPI_Datatype - \fIdatatype\fP, MPI_Aint \fI*extent\fP) - -.fi -.SH Fortran Syntax (see FORTRAN 77 NOTES) -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_FILE_GET_TYPE_EXTENT(\fIFH\fP, \fIDATATYPE\fP, \fIEXTENT\fP, \fIIERROR\fP) - INTEGER \fIFH, DATATYPE, IERROR\fP - INTEGER(KIND=MPI_ADDRESS_KIND) \fIEXTENT\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_File_get_type_extent(\fIfh\fP, \fIdatatype\fP, \fIextent\fP, \fIierror\fP) - TYPE(MPI_File), INTENT(IN) :: \fIfh\fP - TYPE(MPI_Datatype), INTENT(IN) :: \fIdatatype\fP - INTEGER(KIND=MPI_ADDRESS_KIND), INTENT(OUT) :: \fIextent\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETERS -.ft R -.TP 1i -fh -File handle (handle). -.ft R -.TP 1i -datatype -Data type (handle). - - -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -extent -Data type extent (integer). -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -MPI_File_get_type_extent can be used to calculate \fIextent\fP for \fIdatatype\fP in the file. The extent is the same for all processes accessing the file associated with \fIfh\fP. If the current view uses a user-defined data representation, MPI_File_get_type_extent uses the \fIdtype_file_extent_fn\fP callback to calculate the extent. - -.SH FORTRAN 77 NOTES -.ft R -The MPI standard prescribes portable Fortran syntax for -the \fIEXTENT\fP argument only for Fortran 90. FORTRAN 77 -users may use the non-portable syntax -.sp -.nf - INTEGER*MPI_ADDRESS_KIND \fIEXTENT\fP -.fi -.sp -where MPI_ADDRESS_KIND is a constant defined in mpif.h -and gives the length of the declared integer in bytes. - -.SH NOTES -.ft R -If the file data representation is other than "native," care must be taken in constructing etypes and file types. Any of the data-type constructor functions may be used; however, for those functions that accept displacements in bytes, the displacements must be specified in terms of their values in the file for the file data representation being used. MPI will interpret these byte displacements as is; no scaling will be done. The function MPI_File_get_type_extent can be used to calculate the extents of data types in the file. For etypes and file types that are portable data types, MPI will scale any displacements in the data types to match the file data representation. Data types passed as arguments to read/write routines specify the data layout in memory; therefore, they must always be constructed using displacements corresponding to displacements in memory. - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. For MPI I/O function errors, the default error handler is set to MPI_ERRORS_RETURN. The error handler may be changed with MPI_File_set_errhandler; the predefined error handler MPI_ERRORS_ARE_FATAL may be used to make I/O errors fatal. Note that MPI does not guarantee that an MPI program can continue past an error. - diff --git a/ompi/mpi/man/man3/MPI_File_get_view.3in b/ompi/mpi/man/man3/MPI_File_get_view.3in deleted file mode 100644 index aee619c2119..00000000000 --- a/ompi/mpi/man/man3/MPI_File_get_view.3in +++ /dev/null @@ -1,101 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright 2015-2016 Research Organization for Information Science -.\" and Technology (RIST). All rights reserved. -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_File_get_view 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_File_get_view\fP \- Returns the process's view of data in the file. - -.SH SYNTAX -.ft R -.nf -.SH C Syntax -.nf -#include -int MPI_File_get_view(MPI_File \fIfh\fP, MPI_Offset \fI*disp\fP, - MPI_Datatype \fI*etype\fP, MPI_Datatype \fI*filetype\fP, - char \fI*datarep\fP) - -.fi -.SH Fortran Syntax (see FORTRAN 77 NOTES) -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_FILE_GET_VIEW(\fIFH\fP, \fIDISP\fP, \fIETYPE\fP, - \fIFILETYPE\fP, \fIDATAREP\fP, \fIIERROR\fP) - INTEGER \fIFH, ETYPE, FILETYPE, IERROR\fP - CHARACTER*(*) \fIDATAREP\fP - INTEGER(KIND=MPI_OFFSET_KIND) \fIDISP\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_File_get_view(\fIfh\fP, \fIdisp\fP, \fIetype\fP, \fIfiletype\fP, \fIdatarep\fP, \fIierror\fP) - TYPE(MPI_File), INTENT(IN) :: \fIfh\fP - INTEGER(KIND=MPI_OFFSET_KIND), INTENT(OUT) :: \fIdisp\fP - TYPE(MPI_Datatype), INTENT(OUT) :: \fIetype\fP, \fIfiletype\fP - CHARACTER(LEN=*), INTENT(OUT) :: \fIdatarep\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETER -.ft R -.TP 1i -fh -File handle (handle). - -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -disp -Displacement (integer). -.TP 1i -etype -Elementary data type (handle). -.TP 1i -filetype -File type (handle). See Restrictions, below. -.TP 1i -datarep -Data representation (string). -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -The MPI_File_get_view routine returns the process's view of the data -in the file. The current values of the displacement, etype, and -filetype are returned in -.I disp, -.I etype, -and -.I filetype, -respectively. -.sp -The MPI_File_get_view interface allows the user to pass a data-representation string via the \fIdatarep\fP argument. - -.SH FORTRAN 77 NOTES -.ft R -The MPI standard prescribes portable Fortran syntax for -the \fIDISP\fP argument only for Fortran 90. FORTRAN 77 -users may use the non-portable syntax. -.sp -.nf - INTEGER*MPI_OFFSET_KIND \fIDISP\fP -.fi -.sp -where MPI_OFFSET_KIND is a constant defined in mpif.h -and gives the length of the declared integer in bytes. - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. For MPI I/O function errors, the default error handler is set to MPI_ERRORS_RETURN. The error handler may be changed with MPI_File_set_errhandler; the predefined error handler MPI_ERRORS_ARE_FATAL may be used to make I/O errors fatal. Note that MPI does not guarantee that an MPI program can continue past an error. - diff --git a/ompi/mpi/man/man3/MPI_File_iread.3in b/ompi/mpi/man/man3/MPI_File_iread.3in deleted file mode 100644 index df4ca42536e..00000000000 --- a/ompi/mpi/man/man3/MPI_File_iread.3in +++ /dev/null @@ -1,96 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright 2015-2016 Research Organization for Information Science -.\" and Technology (RIST). All rights reserved. -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_File_iread 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_File_iread\fP \- Reads a file starting at the location specified by the individual file pointer (nonblocking, noncollective). - -.SH SYNTAX -.ft R -.nf -.SH C Syntax -.nf -#include -int MPI_File_iread(MPI_File \fIfh\fP, void \fI*buf\fP, int \fIcount\fP, - MPI_Datatype \fIdatatype\fP, MPI_Request \fI*request\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_FILE_IREAD(\fIFH\fP, \fIBUF\fP, \fICOUNT\fP, \fIDATATYPE\fP, \fIREQUEST\fP, \fIIERROR\fP) - \fIBUF(*)\fP - INTEGER \fIFH, COUNT, DATATYPE, REQUEST, IERROR\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_File_iread(\fIfh\fP, \fIbuf\fP, \fIcount\fP, \fIdatatype\fP, \fIrequest\fP, \fIierror\fP) - TYPE(MPI_File), INTENT(IN) :: \fIfh\fP - TYPE(*), DIMENSION(..), ASYNCHRONOUS :: \fIbuf\fP - INTEGER, INTENT(IN) :: \fIcount\fP - TYPE(MPI_Datatype), INTENT(IN) :: \fIdatatype\fP - TYPE(MPI_Request), INTENT(OUT) :: \fIrequest\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT/OUTPUT PARAMETER -.ft R -.TP 1i -fh -File handle (handle). - -.SH INPUT PARAMETERS -.ft R -.TP 1i -count -Number of elements in the buffer (integer). -.ft R -.TP 1i -datatype -Data type of each buffer element (handle). - -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -buf -Initial address of buffer (choice). -.ft R -.TP 1i -request -Request object (handle). -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -MPI_File_iread is a nonblocking version of MPI_File_read. It attempts to read from the file associated with -.I fh -at the current individual file pointer position maintained by the system in which a total number of -.I count -data items having -.I datatype -type are read into the user's buffer -.I buf. -The data is taken out of those parts of the -file specified by the current view. MPI_File_iread stores the -number of data-type elements actually read in -.I status. -All other fields of -.I status -are undefined. It is erroneous to call this function if MPI_MODE_SEQUENTIAL mode was specified when the file was opened. - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. For MPI I/O function errors, the default error handler is set to MPI_ERRORS_RETURN. The error handler may be changed with MPI_File_set_errhandler; the predefined error handler MPI_ERRORS_ARE_FATAL may be used to make I/O errors fatal. Note that MPI does not guarantee that an MPI program can continue past an error. - diff --git a/ompi/mpi/man/man3/MPI_File_iread_all.3in b/ompi/mpi/man/man3/MPI_File_iread_all.3in deleted file mode 100644 index 374604761a9..00000000000 --- a/ompi/mpi/man/man3/MPI_File_iread_all.3in +++ /dev/null @@ -1,96 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright 2015-2016 Research Organization for Information Science -.\" and Technology (RIST). All rights reserved. -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_File_iread_all 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_File_iread_all\fP \- Reads a file starting at the location specified by the individual file pointer (nonblocking, collective). - -.SH SYNTAX -.ft R -.nf -.SH C Syntax -.nf -#include -int MPI_File_iread_all(MPI_File \fIfh\fP, void \fI*buf\fP, int \fIcount\fP, - MPI_Datatype \fIdatatype\fP, MPI_Request \fI*request\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_FILE_IREAD_ALL(\fIFH\fP, \fIBUF\fP, \fICOUNT\fP, \fIDATATYPE\fP, \fIREQUEST\fP, \fIIERROR\fP) - \fIBUF(*)\fP - INTEGER \fIFH, COUNT, DATATYPE, REQUEST, IERROR\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_File_iread_all(\fIfh\fP, \fIbuf\fP, \fIcount\fP, \fIdatatype\fP, \fIrequest\fP, \fIierror\fP) - TYPE(MPI_File), INTENT(IN) :: \fIfh\fP - TYPE(*), DIMENSION(..) :: \fIbuf\fP - INTEGER, INTENT(IN) :: \fIcount\fP - TYPE(MPI_Datatype), INTENT(IN) :: \fIdatatype\fP - TYPE(MPI_Request), INTENT(OUT) :: \fIrequest\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT/OUTPUT PARAMETER -.ft R -.TP 1i -fh -File handle (handle). - -.SH INPUT PARAMETERS -.ft R -.TP 1i -count -Number of elements in the buffer (integer). -.ft R -.TP 1i -datatype -Data type of each buffer element (handle). - -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -buf -Initial address of buffer (choice). -.ft R -.TP 1i -request -Request object (handle). -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -MPI_File_iread_all is a nonblocking version of MPI_File_read_all. It attempts to read from the file associated with -.I fh -at the current individual file pointer position maintained by the system in which a total number of -.I count -data items having -.I datatype -type are read into the user's buffer -.I buf. -The data is taken out of those parts of the -file specified by the current view. MPI_File_iread_all stores the -number of data-type elements actually read in -.I status. -All other fields of -.I status -are undefined. It is erroneous to call this function if MPI_MODE_SEQUENTIAL mode was specified when the file was opened. - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. For MPI I/O function errors, the default error handler is set to MPI_ERRORS_RETURN. The error handler may be changed with MPI_File_set_errhandler; the predefined error handler MPI_ERRORS_ARE_FATAL may be used to make I/O errors fatal. Note that MPI does not guarantee that an MPI program can continue past an error. - diff --git a/ompi/mpi/man/man3/MPI_File_iread_at.3in b/ompi/mpi/man/man3/MPI_File_iread_at.3in deleted file mode 100644 index cc65fa0c679..00000000000 --- a/ompi/mpi/man/man3/MPI_File_iread_at.3in +++ /dev/null @@ -1,123 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright 2015-2016 Research Organization for Information Science -.\" and Technology (RIST). All rights reserved. -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_File_iread_at 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_File_iread_at\fP \- Reads a file at an explicitly specified offset (nonblocking, noncollective). - -.SH SYNTAX -.ft R -.nf -.SH C Syntax -.nf -#include -int MPI_File_iread_at(MPI_File \fIfh\fP, MPI_Offset \fIoffset\fP, - void \fI*buf\fP, int \fIcount\fP, MPI_Datatype \fIdatatype\fP, - MPI_Request \fI*request\fP) - -.fi -.SH Fortran Syntax (see FORTRAN 77 NOTES) -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_FILE_IREAD_AT(\fIFH\fP, \fIOFFSET\fP, \fIBUF\fP, \fICOUNT\fP, \fIDATATYPE\fP, \fIREQUEST\fP, \fIIERROR\fP) - \fIBUF\fP(*) - INTEGER \fIFH, COUNT, DATATYPE, REQUEST, IERROR\fP - INTEGER(KIND=MPI_OFFSET_KIND) \fIOFFSET\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_File_iread_at(\fIfh\fP, \fIoffset\fP, \fIbuf\fP, \fIcount\fP, \fIdatatype\fP, \fIrequest\fP, \fIierror\fP) - TYPE(MPI_File), INTENT(IN) :: \fIfh\fP - INTEGER(KIND=MPI_OFFSET_KIND), INTENT(IN) :: \fIoffset\fP - TYPE(*), DIMENSION(..), ASYNCHRONOUS :: \fIbuf\fP - INTEGER, INTENT(IN) :: \fIcount\fP - TYPE(MPI_Datatype), INTENT(IN) :: \fIdatatype\fP - TYPE(MPI_Request), INTENT(OUT) :: \fIrequest\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETERS -.ft R -.TP 1i -fh -File handle (handle). -.ft R -.TP 1i -offset -File offset (integer). -.ft R -.TP 1i -count -Number of elements in the buffer (integer). -.ft R -.TP 1i -datatype -Data type of each buffer element (handle). - -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -buf -Initial address of the buffer (choice). -.ft R -.TP 1i -request -Request object (handle). -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -MPI_File_iread_at is the nonblocking version of MPI_File_read_at. - -MPI_File_iread_at is a nonblocking routine that attempts to read from the file associated with -.I fh -at the -.I offset -position a total number of -.I count -data items having -.I datatype -type into the user's buffer -.I buf. -The -.I offset -is in etype units relative to the current view. That is, holes are not counted -when locating an offset. The data is taken out of those parts of the -file specified by the current view. MPI_File_iread_at stores the -number of -.I datatype -elements actually read in -.I status. -All other fields of -.I status -are undefined. - -.SH FORTRAN 77 NOTES -.ft R -The MPI standard prescribes portable Fortran syntax for -the \fIOFFSET\fP argument only for Fortran 90. Sun FORTRAN 77 -users may use the non-portable syntax -.sp -.nf - INTEGER*MPI_OFFSET_KIND \fIOFFSET\fP -.fi -.sp -where MPI_OFFSET_KIND is a constant defined in mpif.h -and gives the length of the declared integer in bytes. - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. For MPI I/O function errors, the default error handler is set to MPI_ERRORS_RETURN. The error handler may be changed with MPI_File_set_errhandler; the predefined error handler MPI_ERRORS_ARE_FATAL may be used to make I/O errors fatal. Note that MPI does not guarantee that an MPI program can continue past an error. - diff --git a/ompi/mpi/man/man3/MPI_File_iread_at_all.3in b/ompi/mpi/man/man3/MPI_File_iread_at_all.3in deleted file mode 100644 index c0c93bd67f8..00000000000 --- a/ompi/mpi/man/man3/MPI_File_iread_at_all.3in +++ /dev/null @@ -1,123 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright 2015-2016 Research Organization for Information Science -.\" and Technology (RIST). All rights reserved. -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_File_iread_at_all 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_File_iread_at_all\fP \- Reads a file at an explicitly specified offset (nonblocking, collective). - -.SH SYNTAX -.ft R -.nf -.SH C Syntax -.nf -#include -int MPI_File_iread_at_all(MPI_File \fIfh\fP, MPI_Offset \fIoffset\fP, - void \fI*buf\fP, int \fIcount\fP, MPI_Datatype \fIdatatype\fP, - MPI_Request \fI*request\fP) - -.fi -.SH Fortran Syntax (see FORTRAN 77 NOTES) -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_FILE_IREAD_AT_ALL(\fIFH\fP, \fIOFFSET\fP, \fIBUF\fP, \fICOUNT\fP, \fIDATATYPE\fP, \fIREQUEST\fP, \fIIERROR\fP) - \fIBUF\fP(*) - INTEGER \fIFH, COUNT, DATATYPE, REQUEST, IERROR\fP - INTEGER(KIND=MPI_OFFSET_KIND) \fIOFFSET\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_File_iread_at_all(\fIfh\fP, \fIoffset\fP, \fIbuf\fP, \fIcount\fP, \fIdatatype\fP, \fIrequest\fP, \fIierror\fP) - TYPE(MPI_File), INTENT(IN) :: \fIfh\fP - INTEGER(KIND=MPI_OFFSET_KIND), INTENT(IN) :: offset - TYPE(*), DIMENSION(..) :: \fIbuf\fP - INTEGER, INTENT(IN) :: \fIcount\fP - TYPE(MPI_Datatype), INTENT(IN) :: \fIdatatype\fP - TYPE(MPI_Request), INTENT(OUT) :: \fIrequest\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETERS -.ft R -.TP 1i -fh -File handle (handle). -.ft R -.TP 1i -offset -File offset (integer). -.ft R -.TP 1i -count -Number of elements in the buffer (integer). -.ft R -.TP 1i -datatype -Data type of each buffer element (handle). - -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -buf -Initial address of the buffer (choice). -.ft R -.TP 1i -request -Request object (handle). -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -MPI_File_iread_at_all is the nonblocking version of MPI_File_read_at_all. - -MPI_File_iread_at_all is a nonblocking routine that attempts to read from the file associated with -.I fh -at the -.I offset -position a total number of -.I count -data items having -.I datatype -type into the user's buffer -.I buf. -The -.I offset -is in etype units relative to the current view. That is, holes are not counted -when locating an offset. The data is taken out of those parts of the -file specified by the current view. MPI_File_iread_at_all stores the -number of -.I datatype -elements actually read in -.I status. -All other fields of -.I status -are undefined. - -.SH FORTRAN 77 NOTES -.ft R -The MPI standard prescribes portable Fortran syntax for -the \fIOFFSET\fP argument only for Fortran 90. Sun FORTRAN 77 -users may use the non-portable syntax -.sp -.nf - INTEGER*MPI_OFFSET_KIND \fIOFFSET\fP -.fi -.sp -where MPI_OFFSET_KIND is a constant defined in mpif.h -and gives the length of the declared integer in bytes. - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. For MPI I/O function errors, the default error handler is set to MPI_ERRORS_RETURN. The error handler may be changed with MPI_File_set_errhandler; the predefined error handler MPI_ERRORS_ARE_FATAL may be used to make I/O errors fatal. Note that MPI does not guarantee that an MPI program can continue past an error. - diff --git a/ompi/mpi/man/man3/MPI_File_iread_shared.3in b/ompi/mpi/man/man3/MPI_File_iread_shared.3in deleted file mode 100644 index 0fbbdf39492..00000000000 --- a/ompi/mpi/man/man3/MPI_File_iread_shared.3in +++ /dev/null @@ -1,82 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright 2015-2016 Research Organization for Information Science -.\" and Technology (RIST). All rights reserved. -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_File_iread_shared 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_File_iread_shared\fP \- Reads a file using the shared file pointer (nonblocking, noncollective). - -.SH SYNTAX -.ft R -.nf -.SH C Syntax -.nf -#include -int MPI_File_iread_shared(MPI_File \fIfh\fP, void \fI*buf\fP, int \fIcount\fP, - MPI_Datatype \fIdatatype\fP, MPI_Request \fI*request\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_FILE_IREAD_SHARED(\fIFH\fP, \fIBUF\fP, \fICOUNT\fP, \fIDATATYPE\fP, \fIREQUEST\fP, \fIIERROR\fP) - \fIBUF(*)\fP - INTEGER \fIFH, COUNT, DATATYPE, REQUEST, IERROR\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_File_iread_shared(\fIfh\fP, \fIbuf\fP, \fIcount\fP, \fIdatatype\fP, \fIrequest\fP, \fIierror\fP) - TYPE(MPI_File), INTENT(IN) :: \fIfh\fP - TYPE(*), DIMENSION(..), ASYNCHRONOUS :: \fIbuf\fP - INTEGER, INTENT(IN) :: \fIcount\fP - TYPE(MPI_Datatype), INTENT(IN) :: \fIdatatype\fP - TYPE(MPI_Request), INTENT(OUT) :: \fIrequest\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT/OUTPUT PARAMETER -.ft R -.TP 1i -fh -File handle (handle). - -.SH INPUT PARAMETERS -.ft R -.TP 1i -count -Number of elements in buffer (integer). -.ft R -.TP 1i -datatype -Data type of each buffer element (handle). - -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -buf -Initial address of buffer (choice). -.ft R -.TP 1i -request -Request object (handle). -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -MPI_File_iread_shared is a nonblocking version of the MPI_File_read_shared interface. It uses the shared file pointer to read files. The order of serialization among the processors is not deterministic for this noncollective routine, so you need to use other methods of synchronization to impose a particular order among processors. - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. For MPI I/O function errors, the default error handler is set to MPI_ERRORS_RETURN. The error handler may be changed with MPI_File_set_errhandler; the predefined error handler MPI_ERRORS_ARE_FATAL may be used to make I/O errors fatal. Note that MPI does not guarantee that an MPI program can continue past an error. - diff --git a/ompi/mpi/man/man3/MPI_File_iwrite.3in b/ompi/mpi/man/man3/MPI_File_iwrite.3in deleted file mode 100644 index f0850bdd6ec..00000000000 --- a/ompi/mpi/man/man3/MPI_File_iwrite.3in +++ /dev/null @@ -1,101 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2013 Los Alamos National Security, LLC. All rights reserved. -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright 2015-2016 Research Organization for Information Science -.\" and Technology (RIST). All rights reserved. -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_File_iwrite 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_File_iwrite\fP \- Writes a file starting at the location specified by the individual file pointer (nonblocking, noncollective). - -.SH SYNTAX -.ft R -.nf -.SH C Syntax -.nf -#include -int MPI_File_iwrite(MPI_File \fIfh\fP, const void \fI*buf\fP, int \fIcount\fP, - MPI_Datatype \fIdatatype\fP, MPI_Request \fI*request\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_FILE_IWRITE(\fIFH\fP, \fIBUF\fP, \fICOUNT\fP, \fIDATATYPE\fP, \fIREQUEST\fP, \fIIERROR\fP) - \fIBUF(*)\fP - INTEGER \fIFH, COUNT, DATATYPE, REQUEST, IERROR\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_File_iwrite(\fIfh\fP, \fIbuf\fP, \fIcount\fP, \fIdatatype\fP, \fIrequest\fP, \fIierror\fP) - TYPE(MPI_File), INTENT(IN) :: \fIfh\fP - TYPE(*), DIMENSION(..), INTENT(IN), ASYNCHRONOUS :: \fIbuf\fP - INTEGER, INTENT(IN) :: \fIcount\fP - TYPE(MPI_Datatype), INTENT(IN) :: \fIdatatype\fP - TYPE(MPI_Request), INTENT(OUT) :: \fIrequest\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT/OUTPUT PARAMETER -.ft R -.TP 1i -fh -File handle (handle). - -.SH INPUT PARAMETERS -.ft R -.TP 1i -buf -Initial address of buffer (choice). -.ft R -.TP 1i -count -Number of elements in buffer (integer). -.ft R -.TP 1i -datatype -Data type of each buffer element (handle). - -.SH OUTPUT PARAMETER -.ft R -.TP 1i -request -Request object (handle). -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -MPI_File_iwrite is a nonblocking version of the MPI_File_write interface. It attempts to write into the file associated with -.I fh -(at the current individual file pointer position maintained by the system) a total number of -.I count -data items having -.I datatype -type from the user's buffer -.I buf. -The data is written into those parts of the -file specified by the current view. MPI_File_iwrite stores the -number of -.I datatype -elements actually written in -.I status. -All other fields of -.I status -are undefined. -.sp -It is erroneous to call this function if MPI_MODE_SEQUENTIAL mode was specified when the file was open. - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. For MPI I/O function errors, the default error handler is set to MPI_ERRORS_RETURN. The error handler may be changed with MPI_File_set_errhandler; the predefined error handler MPI_ERRORS_ARE_FATAL may be used to make I/O errors fatal. Note that MPI does not guarantee that an MPI program can continue past an error. - diff --git a/ompi/mpi/man/man3/MPI_File_iwrite_all.3in b/ompi/mpi/man/man3/MPI_File_iwrite_all.3in deleted file mode 100644 index 1440a502b08..00000000000 --- a/ompi/mpi/man/man3/MPI_File_iwrite_all.3in +++ /dev/null @@ -1,101 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2013 Los Alamos National Security, LLC. All rights reserved. -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright 2015-2016 Research Organization for Information Science -.\" and Technology (RIST). All rights reserved. -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_File_iwrite_all 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_File_iwrite_all\fP \- Writes a file starting at the location specified by the individual file pointer (nonblocking, collective). - -.SH SYNTAX -.ft R -.nf -.SH C Syntax -.nf -#include -int MPI_File_iwrite_all(MPI_File \fIfh\fP, const void \fI*buf\fP, int \fIcount\fP, - MPI_Datatype \fIdatatype\fP, MPI_Request \fI*request\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_FILE_IWRITE_ALL(\fIFH\fP, \fIBUF\fP, \fICOUNT\fP, \fIDATATYPE\fP, \fIREQUEST\fP, \fIIERROR\fP) - \fIBUF(*)\fP - INTEGER \fIFH, COUNT, DATATYPE, REQUEST, IERROR\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_File_iwrite_all(\fIfh\fP, \fIbuf\fP, \fIcount\fP, \fIdatatype\fP, \fIrequest\fP, \fIierror\fP) - TYPE(MPI_File), INTENT(IN) :: \fIfh\fP - TYPE(*), DIMENSION(..) :: \fIbuf\fP - INTEGER, INTENT(IN) :: \fIcount\fP - TYPE(MPI_Datatype), INTENT(IN) :: \fIdatatype\fP - TYPE(MPI_Request), INTENT(OUT) :: \fIrequest\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT/OUTPUT PARAMETER -.ft R -.TP 1i -fh -File handle (handle). - -.SH INPUT PARAMETERS -.ft R -.TP 1i -buf -Initial address of buffer (choice). -.ft R -.TP 1i -count -Number of elements in buffer (integer). -.ft R -.TP 1i -datatype -Data type of each buffer element (handle). - -.SH OUTPUT PARAMETER -.ft R -.TP 1i -request -Request object (handle). -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -MPI_File_iwrite_all is a nonblocking version of the MPI_File_write_all interface. It attempts to write into the file associated with -.I fh -(at the current individual file pointer position maintained by the system) a total number of -.I count -data items having -.I datatype -type from the user's buffer -.I buf. -The data is written into those parts of the -file specified by the current view. MPI_File_iwrite_all stores the -number of -.I datatype -elements actually written in -.I status. -All other fields of -.I status -are undefined. -.sp -It is erroneous to call this function if MPI_MODE_SEQUENTIAL mode was specified when the file was open. - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. For MPI I/O function errors, the default error handler is set to MPI_ERRORS_RETURN. The error handler may be changed with MPI_File_set_errhandler; the predefined error handler MPI_ERRORS_ARE_FATAL may be used to make I/O errors fatal. Note that MPI does not guarantee that an MPI program can continue past an error. - diff --git a/ompi/mpi/man/man3/MPI_File_iwrite_at.3in b/ompi/mpi/man/man3/MPI_File_iwrite_at.3in deleted file mode 100644 index 4adb62d77f0..00000000000 --- a/ompi/mpi/man/man3/MPI_File_iwrite_at.3in +++ /dev/null @@ -1,125 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2013 Los Alamos National Security, LLC. All rights reserved. -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright 2015-2016 Research Organization for Information Science -.\" and Technology (RIST). All rights reserved. -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_File_iwrite_at 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_File_iwrite_at\fP \- Writes a file at an explicitly specified offset (nonblocking, noncollective). - -.SH SYNTAX -.ft R -.nf -.SH C Syntax -.nf -#include -int MPI_File_iwrite_at(MPI_File \fIfh\fP, MPI_Offset \fIoffset\fP, - const void \fI*buf\fP, int \fIcount\fP, MPI_Datatype \fIdatatype\fP, MPI_Request \fI*request\fP) - -.fi -.SH Fortran Syntax (see FORTRAN 77 NOTES) -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_FILE_IWRITE_AT(\fIFH\fP, \fIOFFSET\fP, \fIBUF\fP, \fICOUNT\fP, \fIDATATYPE\fP, \fIREQUEST\fP, \fIIERROR\fP) - \fIBUF\fP(*) - INTEGER \fIFH, COUNT, DATATYPE, REQUEST, IERROR\fP - INTEGER(KIND=MPI_OFFSET_KIND) \fIOFFSET\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_File_iwrite_at(\fIfh\fP, \fIoffset\fP, \fIbuf\fP, \fIcount\fP, \fIdatatype\fP, \fIrequest\fP, \fIierror\fP) - TYPE(MPI_File), INTENT(IN) :: \fIfh\fP - INTEGER(KIND=MPI_OFFSET_KIND), INTENT(IN) :: \fIoffset\fP - TYPE(*), DIMENSION(..), INTENT(IN), ASYNCHRONOUS :: \fIbuf\fP - INTEGER, INTENT(IN) :: \fIcount\fP - TYPE(MPI_Datatype), INTENT(IN) :: \fIdatatype\fP - TYPE(MPI_Request), INTENT(OUT) :: \fIrequest\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT/OUTPUT PARAMETER -.ft R -.TP 1i -fh -File handle (handle). - -.SH INPUT PARAMETERS -.ft R -.TP 1i -offset -File offset (integer). -.ft R -.TP 1i -buf -Initial address of buffer (choice). -.ft R -.TP 1i -count -Number of elements in buffer (integer). -.ft R -.TP 1i -datatype -Data type of each buffer element (handle). - -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -request -Request object (handle). -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -MPI_File_iwrite_at is a nonblocking version of MPI_File_write_at. It attempts to write into the file associated with -.I fh -(at the -.I offset -position) a total number of -.I count -data items having -.I datatype -type from the user's buffer -.I buf. -The offset is in -.I etype -units relative to the current view. That is, holes are not counted -when locating an offset. The data is written into those parts of the -file specified by the current view. MPI_File_iwrite_at stores the -number of -.I datatype -elements actually written in -.I status. -All other fields of -.I status -are undefined. The request structure can be passed to MPI_Wait or MPI_Test, which will return a status with the number of bytes actually accessed. -.sp -It is erroneous to call this function if MPI_MODE_SEQUENTIAL mode was specified when the file was open. - -.SH FORTRAN 77 NOTES -.ft R -The MPI standard prescribes portable Fortran syntax for -the \fIOFFSET\fP argument only for Fortran 90. FORTRAN 77 -users may use the non-portable syntax -.sp -.nf - INTEGER*MPI_OFFSET_KIND \fIOFFSET\fP -.fi -.sp -where MPI_OFFSET_KIND is a constant defined in mpif.h -and gives the length of the declared integer in bytes. - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. For MPI I/O function errors, the default error handler is set to MPI_ERRORS_RETURN. The error handler may be changed with MPI_File_set_errhandler; the predefined error handler MPI_ERRORS_ARE_FATAL may be used to make I/O errors fatal. Note that MPI does not guarantee that an MPI program can continue past an error. - diff --git a/ompi/mpi/man/man3/MPI_File_iwrite_at_all.3in b/ompi/mpi/man/man3/MPI_File_iwrite_at_all.3in deleted file mode 100644 index 3b9db67c415..00000000000 --- a/ompi/mpi/man/man3/MPI_File_iwrite_at_all.3in +++ /dev/null @@ -1,125 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2013 Los Alamos National Security, LLC. All rights reserved. -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright 2015-2016 Research Organization for Information Science -.\" and Technology (RIST). All rights reserved. -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_File_iwrite_at_all 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_File_iwrite_at_all\fP \- Writes a file at an explicitly specified offset (nonblocking, collective). - -.SH SYNTAX -.ft R -.nf -.SH C Syntax -.nf -#include -int MPI_File_iwrite_at_all(MPI_File \fIfh\fP, MPI_Offset \fIoffset\fP, - const void \fI*buf\fP, int \fIcount\fP, MPI_Datatype \fIdatatype\fP, MPI_Request \fI*request\fP) - -.fi -.SH Fortran Syntax (see FORTRAN 77 NOTES) -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_FILE_IWRITE_AT_ALL(\fIFH\fP, \fIOFFSET\fP, \fIBUF\fP, \fICOUNT\fP, \fIDATATYPE\fP, \fIREQUEST\fP, \fIIERROR\fP) - \fIBUF\fP(*) - INTEGER \fIFH, COUNT, DATATYPE, REQUEST, IERROR\fP - INTEGER(KIND=MPI_OFFSET_KIND) \fIOFFSET\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_File_iwrite_at_all(\fIfh\fP, \fIoffset\fP, \fIbuf\fP, \fIcount\fP, \fIdatatype\fP, \fIrequest\fP, \fIierror\fP) - TYPE(MPI_File), INTENT(IN) :: \fIfh\fP - INTEGER(KIND=MPI_OFFSET_KIND), INTENT(IN) :: offset - TYPE(*), DIMENSION(..) :: \fIbuf\fP - INTEGER, INTENT(IN) :: \fIcount\fP - TYPE(MPI_Datatype), INTENT(IN) :: \fIdatatype\fP - TYPE(MPI_Request), INTENT(OUT) :: \fIrequest\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT/OUTPUT PARAMETER -.ft R -.TP 1i -fh -File handle (handle). - -.SH INPUT PARAMETERS -.ft R -.TP 1i -offset -File offset (integer). -.ft R -.TP 1i -buf -Initial address of buffer (choice). -.ft R -.TP 1i -count -Number of elements in buffer (integer). -.ft R -.TP 1i -datatype -Data type of each buffer element (handle). - -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -request -Request object (handle). -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -MPI_File_iwrite_at_all is a nonblocking version of MPI_File_write_at_all. It attempts to write into the file associated with -.I fh -(at the -.I offset -position) a total number of -.I count -data items having -.I datatype -type from the user's buffer -.I buf. -The offset is in -.I etype -units relative to the current view. That is, holes are not counted -when locating an offset. The data is written into those parts of the -file specified by the current view. MPI_File_iwrite_at_all stores the -number of -.I datatype -elements actually written in -.I status. -All other fields of -.I status -are undefined. The request structure can be passed to MPI_Wait or MPI_Test, which will return a status with the number of bytes actually accessed. -.sp -It is erroneous to call this function if MPI_MODE_SEQUENTIAL mode was specified when the file was open. - -.SH FORTRAN 77 NOTES -.ft R -The MPI standard prescribes portable Fortran syntax for -the \fIOFFSET\fP argument only for Fortran 90. FORTRAN 77 -users may use the non-portable syntax -.sp -.nf - INTEGER*MPI_OFFSET_KIND \fIOFFSET\fP -.fi -.sp -where MPI_OFFSET_KIND is a constant defined in mpif.h -and gives the length of the declared integer in bytes. - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. For MPI I/O function errors, the default error handler is set to MPI_ERRORS_RETURN. The error handler may be changed with MPI_File_set_errhandler; the predefined error handler MPI_ERRORS_ARE_FATAL may be used to make I/O errors fatal. Note that MPI does not guarantee that an MPI program can continue past an error. - diff --git a/ompi/mpi/man/man3/MPI_File_iwrite_shared.3in b/ompi/mpi/man/man3/MPI_File_iwrite_shared.3in deleted file mode 100644 index 33c9f2285eb..00000000000 --- a/ompi/mpi/man/man3/MPI_File_iwrite_shared.3in +++ /dev/null @@ -1,84 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2013 Los Alamos National Security, LLC. All rights reserved. -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright 2015-2016 Research Organization for Information Science -.\" and Technology (RIST). All rights reserved. -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_File_iwrite_shared 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_File_iwrite_shared\fP \- Writes a file using the shared file pointer (nonblocking, noncollective). - - - -.SH SYNTAX -.ft R -.nf -.SH C Syntax -.nf -#include -int MPI_File_iwrite_shared(MPI_File \fIfh\fP, const void \fI*buf\fP, int \fIcount\fP, MPI_Datatype - \fIdatatype\fP, MPI_Request \fI*request\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_FILE_IWRITE_SHARED(\fIFH\fP, \fIBUF\fP, \fICOUNT\fP, \fIDATATYPE\fP, \fIREQUEST\fP, \fIIERROR\fP) - \fIBUF(*)\fP - INTEGER \fIFH, COUNT, DATATYPE, REQUEST, IERROR\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_File_iwrite_shared(\fIfh\fP, \fIbuf\fP, \fIcount\fP, \fIdatatype\fP, \fIrequest\fP, \fIierror\fP) - TYPE(MPI_File), INTENT(IN) :: \fIfh\fP - TYPE(*), DIMENSION(..), INTENT(IN), ASYNCHRONOUS :: \fIbuf\fP - INTEGER, INTENT(IN) :: \fIcount\fP - TYPE(MPI_Datatype), INTENT(IN) :: \fIdatatype\fP - TYPE(MPI_Request), INTENT(OUT) :: \fIrequest\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT/OUTPUT PARAMETER -.ft R -.TP 1i -fh -File handle (handle). - -.SH INPUT PARAMETERS -.ft R -.TP 1i -count -Number of elements in buffer (integer). -.ft R -.TP 1i -datatype -Data type of each buffer element (handle). - -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -buf -Initial address of buffer (choice). -.TP 1i -request -Request object (handle). -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -MPI_File_iwrite_shared is a nonblocking routine that uses the shared file pointer to write files. The order of serialization is not deterministic for this noncollective routine, so you need to use other methods of synchronization to impose a particular order. - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. For MPI I/O function errors, the default error handler is set to MPI_ERRORS_RETURN. The error handler may be changed with MPI_File_set_errhandler; the predefined error handler MPI_ERRORS_ARE_FATAL may be used to make I/O errors fatal. Note that MPI does not guarantee that an MPI program can continue past an error. - diff --git a/ompi/mpi/man/man3/MPI_File_open.3in b/ompi/mpi/man/man3/MPI_File_open.3in deleted file mode 100644 index 32ac6c4c229..00000000000 --- a/ompi/mpi/man/man3/MPI_File_open.3in +++ /dev/null @@ -1,191 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2013 Los Alamos National Security, LLC. All rights reserved. -.\" Copyright (c) 2010-2015 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright 2015-2016 Research Organization for Information Science -.\" and Technology (RIST). All rights reserved. -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_File_open 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_File_open\fP \- Opens a file (collective). -.SH SYNTAX -.ft R -.nf -.SH C Syntax -.nf -#include -int MPI_File_open(MPI_Comm \fIcomm\fP, const char \fI*filename\fP, - int \fIamode\fP, MPI_Info \fIinfo\fP, - MPI_File \fI*fh\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_FILE_OPEN(\fICOMM\fP, \fIFILENAME\fP, \fIAMODE\fP, \fIINFO\fP, \fIFH\fP, \fIIERROR\fP) - CHARACTER*(*) \fIFILENAME\fP - INTEGER \fICOMM, AMODE, INFO, FH, IERROR\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_File_open(\fIcomm\fP, \fIfilename\fP, \fIamode\fP, \fIinfo\fP, \fIfh\fP, \fIierror\fP) - TYPE(MPI_Comm), INTENT(IN) :: \fIcomm\fP - CHARACTER(LEN=*), INTENT(IN) :: \fIfilename\fP - INTEGER, INTENT(IN) :: \fIamode\fP - TYPE(MPI_Info), INTENT(IN) :: \fIinfo\fP - TYPE(MPI_File), INTENT(OUT) :: \fIfh\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETERS -.ft R -.TP 1i -comm -Communicator (handle). -.TP 1i -filename -Name of file to open (string). -.TP 1i -amode -File access mode (integer). -.TP 1i -info -Info object (handle). - -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -fh -New file handle (handle). -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -MPI_File_open opens the file identified by the filename -.I -filename -on all processes in the -.I comm -communicator group. MPI_File_open is a collective routine; all processes -must provide the same value for -.I amode, -and all processes must provide filenames that reference the same -file which are textually identical (note: Open MPI I/O plugins may -have restrictions on characters that can be used in filenames. For -example, the ROMIO plugin may disallow the colon (":") character from -appearing in a filename). A process can open a file independently of -other processes by using the MPI_COMM_SELF communicator. The file -handle returned, -.I fh, -can be subsequently used to access the file until the file is closed -using MPI_File_close. Before calling MPI_Finalize, the user is required to -close (via MPI_File_close) all files that were opened with MPI_File_open. Note -that the communicator -.I comm -is unaffected by MPI_File_open and continues to be usable in all MPI -routines. Furthermore, use of -.I comm -will not interfere with I/O behavior. -.sp -Initially, all processes view the file as a linear byte stream; that is, the -.I etype -and -.I filetype -are both MPI_BYTE. The file view can be changed via the MPI_File_set_view routine. -.sp -The following access modes are supported (specified in amode, in a bit-vector OR in one of the following integer constants): -.TP .5i - o -MPI_MODE_APPEND -.TP .5i - o -MPI_MODE_CREATE -- Create the file if it does not exist. -.TP .5i - o -MPI_MODE_DELETE_ON_CLOSE -.TP .5i - o -MPI_MODE_EXCL -- Error creating a file that already exists. -.TP .5i - o -MPI_MODE_RDONLY -- Read only. -.TP .5i - o -MPI_MODE_RDWR -- Reading and writing. -.TP .5i - o -MPI_MODE_SEQUENTIAL -.TP .5i - o -MPI_MODE_WRONLY -- Write only. -.TP .5i - o -MPI_MODE_UNIQUE_OPEN -.RE -.sp -The modes MPI_MODE_RDONLY, MPI_MODE_RDWR, MPI_MODE_WRONLY, and MPI_MODE_CREATE have -identical semantics to their POSIX counterparts. It is erroneous to -specify MPI_MODE_CREATE in conjunction with MPI_MODE_RDONLY. Errors related to -the access mode are raised in the class MPI_ERR_AMODE. -.sp -On single-node clusters, files are opened by default using nonatomic mode file consistency -semantics. The more stringent atomic-mode consistency semantics, required for atomicity of overlapping accesses, are the default when processors in a communicator group reside on more than one node. -This setting can be changed using -MPI_File_set_atomicity. -.sp -The MPI_File_open interface allows the user to pass information via the \fIinfo\fP argument. It can be set to MPI_INFO_NULL. See the HINTS section for a list of hints that can be set. - -.SH HINTS -.ft R -The following hints can be used as values for the \fIinfo\fP argument. -.sp -SETTABLE HINTS: -.sp -- MPI_INFO_NULL -.sp -- shared_file_timeout: Amount of time (in seconds) to wait for access to the -shared file pointer before exiting with MPI_ERR_TIMEDOUT. -.sp -- rwlock_timeout: Amount of time (in seconds) to wait for obtaining a read or -write lock on a contiguous chunk of a UNIX file before exiting with MPI_ERR_TIMEDOUT. -.sp -- noncoll_read_bufsize: Maximum size of the buffer used by -MPI I/O to satisfy multiple noncontiguous read requests in -the noncollective data-access routines. (See NOTE, below.) -.sp -- noncoll_write_bufsize: Maximum size of the buffer used by -MPI I/O to satisfy multiple noncontiguous write requests in -the noncollective data-access routines. (See NOTE, below.) -.sp -- coll_read_bufsize: Maximum size of the buffer used by MPI -I/O to satisfy multiple noncontiguous read requests in the -collective data-access routines. (See NOTE, below.) -.sp -- coll_write_bufsize: Maximum size of the buffer used by MPI -I/O to satisfy multiple noncontiguous write requests in the -collective data-access routines. (See NOTE, below.) -.sp -NOTE: A buffer size smaller than the distance (in bytes) in a UNIX file between the first byte and the last byte of the access request causes MPI I/O to iterate and perform multiple UNIX read() or write() calls. If the request includes multiple noncontiguous chunks of data, and the buffer size is greater than the size of those chunks, then the UNIX read() or write() (made at the MPI I/O level) will access data not requested by this process in order to reduce the total number of write() calls made. If this is not desirable behavior, you should reduce this buffer size to equal the size of the contiguous chunks within the aggregate request. -.sp -- mpiio_concurrency: (boolean) controls whether nonblocking -I/O routines can bind an extra thread to an LWP. -.sp -- mpiio_coll_contiguous: (boolean) controls whether subsequent collective data accesses will request collectively contiguous regions of the file. -.sp -NON-SETTABLE HINTS: -.sp -- filename: Access this hint to get the name of the file. - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. For MPI I/O function errors, the default error handler is set to MPI_ERRORS_RETURN. The error handler may be changed with MPI_File_set_errhandler; the predefined error handler MPI_ERRORS_ARE_FATAL may be used to make I/O errors fatal. Note that MPI does not guarantee that an MPI program can continue past an error. - diff --git a/ompi/mpi/man/man3/MPI_File_preallocate.3in b/ompi/mpi/man/man3/MPI_File_preallocate.3in deleted file mode 100644 index e6acaf3472d..00000000000 --- a/ompi/mpi/man/man3/MPI_File_preallocate.3in +++ /dev/null @@ -1,90 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright 2015-2016 Research Organization for Information Science -.\" and Technology (RIST). All rights reserved. -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_File_preallocate 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_File_preallocate\fP \- Preallocates a specified amount of storage space at the beginning of a file (collective). - -.SH SYNTAX -.ft R -.nf -.SH C Syntax -.nf -#include -int MPI_File_preallocate(MPI_File \fIfh\fP, MPI_Offset \fIsize\fP) - -.fi -.SH Fortran Syntax (see FORTRAN 77 NOTES) -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_FILE_PREALLOCATE(\fIFH\fP, \fISIZE\fP, \fIIERROR\fP) - INTEGER \fIFH, IERROR\fP - INTEGER(KIND=MPI_OFFSET_KIND) \fISIZE\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_File_preallocate(\fIfh\fP, \fIsize\fP, \fIierror\fP) - TYPE(MPI_File), INTENT(IN) :: \fIfh\fP - INTEGER(KIND=MPI_OFFSET_KIND), INTENT(IN) :: \fIsize\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT/OUTPUT PARAMETER -.ft R -.TP 1i -fh -File handle (handle). - -.SH INPUT PARAMETER -.ft R -.TP 1i -size -Size to preallocate file, in bytes (integer). - -.SH OUTPUT PARAMETER -.ft R -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -MPI_File_preallocate ensures that storage space is allocated for the first \fIsize\fP bytes of the file associated with \fIfh\fP. MPI_File_preallocate can be a very time-consuming operation. - -MPI_File_preallocate is collective; all processes in the group must pass identical values for \fIsize\fP. Regions of the file that have previously been written are unaffected. For newly allocated regions of the file, MPI_File_preallocate has the same effect as writing undefined data. If size is larger than the current file size, the file size increases to \fIsize\fP. If \fIsize\fP is less than or equal to the current file size, the file size is unchanged. - -The treatment of file pointers, pending nonblocking accesses, and file consistency is the same as with MPI_File_set_size. If MPI_MODE_SEQUENTIAL mode was specified when the file was opened, it is erroneous to call this routine. - -.SH FORTRAN 77 NOTES -.ft R -The MPI standard prescribes portable Fortran syntax for -the \fISIZE\fP argument only for Fortran 90. FORTRAN 77 -users may use the non-portable syntax -.sp -.nf - INTEGER*MPI_OFFSET_KIND \fISIZE\fP -.fi -.sp -where MPI_OFFSET_KIND is a constant defined in mpif.h -and gives the length of the declared integer in bytes. - -.SH NOTES -.ft R -When using the collective routine MPI_File_set_size on a UNIX file, if the size that is set is smaller than the current file size, the file is truncated at the position defined by size. If the size is set to be larger than the current file size, the file size becomes the set size. When the file size is increased this way with MPI_File_set_size, new regions are created in the file with displacements between the old file size and the larger, newly set file size. -.sp -Sun MPI I/O does not necessarily allocate file space for such new regions. You may reserve file space either by using MPI_File_preallocate or by performing a read or write to certain bytes. - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. For MPI I/O function errors, the default error handler is set to MPI_ERRORS_RETURN. The error handler may be changed with MPI_File_set_errhandler; the predefined error handler MPI_ERRORS_ARE_FATAL may be used to make I/O errors fatal. Note that MPI does not guarantee that an MPI program can continue past an error. - diff --git a/ompi/mpi/man/man3/MPI_File_read.3in b/ompi/mpi/man/man3/MPI_File_read.3in deleted file mode 100644 index f8b355f3721..00000000000 --- a/ompi/mpi/man/man3/MPI_File_read.3in +++ /dev/null @@ -1,94 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright 2015-2016 Research Organization for Information Science -.\" and Technology (RIST). All rights reserved. -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_File_read 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_File_read\fP \- Reads a file starting at the location specified by the individual file pointer (blocking, noncollective). - -.SH SYNTAX -.ft R -.nf -.SH C Syntax -.nf -#include -int MPI_File_read(MPI_File \fIfh\fP, void \fI*buf\fP, - int \fIcount\fP, MPI_Datatype \fIdatatype\fP, MPI_Status \fI*status\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_FILE_READ(\fIFH\fP, \fIBUF\fP, \fICOUNT\fP, - \fIDATATYPE\fP, \fISTATUS\fP, \fIIERROR\fP) - \fIBUF(*)\fP - INTEGER \fIFH, COUNT, DATATYPE, STATUS(MPI_STATUS_SIZE),IERROR\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_File_read(\fIfh\fP, \fIbuf\fP, \fIcount\fP, \fIdatatype\fP, \fIstatus\fP, \fIierror\fP) - TYPE(MPI_File), INTENT(IN) :: \fIfh\fP - TYPE(*), DIMENSION(..) :: \fIbuf\fP - INTEGER, INTENT(IN) :: \fIcount\fP - TYPE(MPI_Datatype), INTENT(IN) :: \fIdatatype\fP - TYPE(MPI_Status) :: \fIstatus\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETERS -.ft R -.TP 1i -fh -File handle (handle). -.TP 1i -count -Number of elements in buffer (integer). -.TP 1i -datatype -Data type of each buffer element (handle). - -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -buf -Initial address of buffer (integer). -.TP 1i -status -Status object (status). -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -MPI_File_read attempts to read from the file associated with -.I fh -(at the current individual file pointer position maintained by the system) a total number of -.I count -data items having -.I datatype -type into the user's buffer -.I buf. -The data is taken out of those parts of the -file specified by the current view. MPI_File_read stores the -number of data-type elements actually read in -.I status. -All other fields of -.I status -are undefined. -.sp -It is erroneous to call this function if MPI_MODE_SEQUENTIAL mode was specified when the file was opened. - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. For MPI I/O function errors, the default error handler is set to MPI_ERRORS_RETURN. The error handler may be changed with MPI_File_set_errhandler; the predefined error handler MPI_ERRORS_ARE_FATAL may be used to make I/O errors fatal. Note that MPI does not guarantee that an MPI program can continue past an error. - diff --git a/ompi/mpi/man/man3/MPI_File_read_all.3in b/ompi/mpi/man/man3/MPI_File_read_all.3in deleted file mode 100644 index 038060df403..00000000000 --- a/ompi/mpi/man/man3/MPI_File_read_all.3in +++ /dev/null @@ -1,95 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright 2015-2016 Research Organization for Information Science -.\" and Technology (RIST). All rights reserved. -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_File_read_all 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_File_read_all\fP \- Reads a file starting at the locations specified by individual file pointers (blocking, collective). - -.SH SYNTAX -.ft R -.nf -.SH C Syntax -.nf -#include -int MPI_File_read_all(MPI_File \fIfh\fP, void \fI*buf\fP, - int \fIcount\fP, MPI_Datatype \fIdatatype\fP, MPI_Status \fI*status\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_FILE_READ_ALL(\fIFH\fP, \fIBUF\fP, \fICOUNT\fP, - \fIDATATYPE\fP, \fISTATUS\fP, \fIIERROR\fP) - \fIBUF(*)\fP - INTEGER \fIFH, COUNT, DATATYPE, STATUS(MPI_STATUS_SIZE),IERROR\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_File_read_all(\fIfh\fP, \fIbuf\fP, \fIcount\fP, \fIdatatype\fP, \fIstatus\fP, \fIierror\fP) - TYPE(MPI_File), INTENT(IN) :: \fIfh\fP - TYPE(*), DIMENSION(..) :: \fIbuf\fP - INTEGER, INTENT(IN) :: \fIcount\fP - TYPE(MPI_Datatype), INTENT(IN) :: \fIdatatype\fP - TYPE(MPI_Status) :: \fIstatus\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETERS -.ft R -.TP 1i -fh -File handle (handle). -.TP 1i -count -Number of elements in buffer (integer). -.TP 1i -datatype -Data type of each buffer element (handle). - -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -buf -Initial address of buffer (choice). -.TP 1i -status -Status object (status). -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -MPI_File_read_all is a collective routine that attempts to read from the file associated with -.I fh -(at the current individual file pointer position maintained by the system) a total number of -.I count -data items having -.I datatype -type into the user's buffer -.I buf. -The data is taken out of those parts of the -file specified by the current view. MPI_File_read_all stores the -number of data-type elements actually read in -.I status. -All other fields of -.I status -are undefined. -.sp -It is erroneous to call this function if MPI_MODE_SEQUENTIAL mode was specified when the file was opened. - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. For MPI I/O function errors, the default error handler is set to MPI_ERRORS_RETURN. The error handler may be changed with MPI_File_set_errhandler; the predefined error handler MPI_ERRORS_ARE_FATAL may be used to make I/O errors fatal. Note that MPI does not guarantee that an MPI program can continue past an error. - - diff --git a/ompi/mpi/man/man3/MPI_File_read_all_begin.3in b/ompi/mpi/man/man3/MPI_File_read_all_begin.3in deleted file mode 100644 index 0fdf627e35c..00000000000 --- a/ompi/mpi/man/man3/MPI_File_read_all_begin.3in +++ /dev/null @@ -1,90 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright 2015-2016 Research Organization for Information Science -.\" and Technology (RIST). All rights reserved. -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_File_read_all_begin 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_File_read_all_begin\fP \- Reads a file starting at the locations specified by individual file pointers; beginning part of a split collective routine (nonblocking). - -.SH SYNTAX -.ft R -.nf -.SH C Syntax -.nf -#include -int MPI_File_read_all_begin(MPI_File \fIfh\fP, void \fI*buf\fP, - int \fIcount\fP, MPI_Datatype \fIdatatype\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_FILE_READ_ALL_BEGIN(\fIFH\fP, \fIBUF\fP, \fICOUNT\fP, \fIDATATYPE\fP, \fIIERROR\fP) - \fIBUF\fP(*) - INTEGER \fIFH, COUNT, DATATYPE, IERROR\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_File_read_all_begin(\fIfh\fP, \fIbuf\fP, \fIcount\fP, \fIdatatype\fP, \fIierror\fP) - TYPE(MPI_File), INTENT(IN) :: \fIfh\fP - TYPE(*), DIMENSION(..), ASYNCHRONOUS :: \fIbuf\fP - INTEGER, INTENT(IN) :: \fIcount\fP - TYPE(MPI_Datatype), INTENT(IN) :: \fIdatatype\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT/OUTPUT PARAMETER -.ft R -.TP 1i -fh -File handle (handle). - -.SH INPUT PARAMETERS -.ft R -.TP 1i -count -Number of elements in buffer (integer). -.ft R -.TP 1i -datatype -Data type of each buffer element (handle). - -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -buf -Initial address of buffer (choice). -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -MPI_File_read_all_begin is the beginning part of a split collective operation that attempts to read from the file associated with -.I fh -(at the current individual file pointer position maintained by the system) a total number of -.I count -data items having -.I datatype -type into the user's buffer -.I buf. -The data is taken out of those parts of the -file specified by the current view. - -.SH NOTES -.ft R -All the nonblocking collective routines for data access are "split" into two routines, each with _begin or _end as a suffix. These split collective routines are subject to the semantic rules described in Section 9.4.5 of the MPI-2 standard. - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. For MPI I/O function errors, the default error handler is set to MPI_ERRORS_RETURN. The error handler may be changed with MPI_File_set_errhandler; the predefined error handler MPI_ERRORS_ARE_FATAL may be used to make I/O errors fatal. Note that MPI does not guarantee that an MPI program can continue past an error. - diff --git a/ompi/mpi/man/man3/MPI_File_read_all_end.3in b/ompi/mpi/man/man3/MPI_File_read_all_end.3in deleted file mode 100644 index 6957ab5c135..00000000000 --- a/ompi/mpi/man/man3/MPI_File_read_all_end.3in +++ /dev/null @@ -1,84 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright 2015-2016 Research Organization for Information Science -.\" and Technology (RIST). All rights reserved. -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_File_read_all_end 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_File_read_all_end\fP \- Reads a file starting at the locations specified by individual file pointers; ending part of a split collective routine (blocking). - -.SH SYNTAX -.ft R -.nf -.SH C Syntax -.nf -#include -int MPI_File_read_all_end(MPI_File \fIfh\fP, void \fI*buf\fP, - MPI_Status \fI*status\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_FILE_READ_ALL_END(\fIFH\fP, \fIBUF\fP, \fISTATUS\fP, \fIIERROR\fP) - \fIBUF(*)\fP - INTEGER \fIFH, STATUS(MPI_STATUS_SIZE), IERROR\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_File_read_all_end(\fIfh\fP, \fIbuf\fP, \fIstatus\fP, \fIierror\fP) - TYPE(MPI_File), INTENT(IN) :: \fIfh\fP - TYPE(*), DIMENSION(..), ASYNCHRONOUS :: \fIbuf\fP - TYPE(MPI_Status) :: \fIstatus\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT/OUTPUT PARAMETER -.ft R -.TP 1i -fh -File handle (handle). - -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -buf -Initial address of buffer (choice). -.ft R -.TP 1i -status -Status object (status). -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -MPI_File_read_all_end is the ending part of a split collective operation that stores the number of elements actually read from the file associated with -.I fh -(at the current individual file pointer position maintained by the system) -into the user's buffer -.I buf -in -.I status. -The data is taken out of those parts of the -file specified by the current view. All other fields of -.I status -are undefined. - -.SH NOTES -.ft R -All the nonblocking collective routines for data access are "split" into two routines, each with _begin or _end as a suffix. These split collective routines are subject to the semantic rules described in Section 9.4.5 of the MPI-2 standard. - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. For MPI I/O function errors, the default error handler is set to MPI_ERRORS_RETURN. The error handler may be changed with MPI_File_set_errhandler; the predefined error handler MPI_ERRORS_ARE_FATAL may be used to make I/O errors fatal. Note that MPI does not guarantee that an MPI program can continue past an error. - diff --git a/ompi/mpi/man/man3/MPI_File_read_at.3in b/ompi/mpi/man/man3/MPI_File_read_at.3in deleted file mode 100644 index ac1eea5583b..00000000000 --- a/ompi/mpi/man/man3/MPI_File_read_at.3in +++ /dev/null @@ -1,121 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright 2015-2016 Research Organization for Information Science -.\" and Technology (RIST). All rights reserved. -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_File_read_at 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_File_read_at\fP \- Reads a file at an explicitly specified offset (blocking, noncollective). - -.SH SYNTAX -.ft R -.nf -.SH C Syntax -.nf -#include -int MPI_File_read_at(MPI_File \fIfh\fP, MPI_Offset \fIoffset\fP, - void \fI*buf\fP, int \fIcount\fP, MPI_Datatype \fIdatatype\fP, - MPI_Status \fI*status\fP) - -.fi -.SH Fortran Syntax (see FORTRAN 77 NOTES) -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_FILE_READ_AT(\fIFH\fP, \fIOFFSET\fP, \fIBUF\fP, \fICOUNT\fP, - \fIDATATYPE\fP, \fISTATUS\fP, \fIIERROR\fP) - \fIBUF\fP(*) - INTEGER \fIFH, COUNT, DATATYPE, STATUS(MPI_STATUS_SIZE), IERROR\fP - INTEGER(KIND=MPI_OFFSET_KIND) \fIOFFSET\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_File_read_at(\fIfh\fP, \fIoffset\fP, \fIbuf\fP, \fIcount\fP, \fIdatatype\fP, \fIstatus\fP, \fIierror\fP) - TYPE(MPI_File), INTENT(IN) :: \fIfh\fP - INTEGER(KIND=MPI_OFFSET_KIND), INTENT(IN) :: \fIoffset\fP - TYPE(*), DIMENSION(..) :: \fIbuf\fP - INTEGER, INTENT(IN) :: \fIcount\fP - TYPE(MPI_Datatype), INTENT(IN) :: \fIdatatype\fP - TYPE(MPI_Status) :: \fIstatus\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETERS -.ft R -.TP 1i -fh -File handle (handle). -.TP 1i -offset -File offset (integer). -.TP 1i -count -Number of elements in buffer (integer). -.TP 1i -datatype -Data type of each buffer element (handle). - -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -buf -Initial address of buffer (choice). -.TP 1i -status -Status object (status). -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R - -MPI_File_read_at attempts to read from the file associated with -.I fh -(at the -.I offset -position) a total number of -.I count -data items having -.I datatype -type into the user's buffer -.I buf. -The -.I offset -is in -.I etype -units relative to the current view. That is, holes are not counted -when locating an offset. The data is taken out of those parts of the -file specified by the current view. MPI_File_read_at stores the -number of -.I datatype -elements actually read in -.I status. -All other fields of -.I status -are undefined. It is erroneous to call this function if MPI_MODE_SEQUENTIAL mode was specified when the file was opened. - -.SH FORTRAN 77 NOTES -.ft R -The MPI standard prescribes portable Fortran syntax for -the \fIOFFSET\fP argument only for Fortran 90. FORTRAN 77 -users may use the non-portable syntax -.sp -.nf - INTEGER*MPI_OFFSET_KIND \fIOFFSET\fP -.fi -.sp -where MPI_OFFSET_KIND is a constant defined in mpif.h -and gives the length of the declared integer in bytes. - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. For MPI I/O function errors, the default error handler is set to MPI_ERRORS_RETURN. The error handler may be changed with MPI_File_set_errhandler; the predefined error handler MPI_ERRORS_ARE_FATAL may be used to make I/O errors fatal. Note that MPI does not guarantee that an MPI program can continue past an error. - diff --git a/ompi/mpi/man/man3/MPI_File_read_at_all.3in b/ompi/mpi/man/man3/MPI_File_read_at_all.3in deleted file mode 100644 index 715c7c18d7e..00000000000 --- a/ompi/mpi/man/man3/MPI_File_read_at_all.3in +++ /dev/null @@ -1,118 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright 2015-2016 Research Organization for Information Science -.\" and Technology (RIST). All rights reserved. -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_File_read_at_all 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_File_read_at_all\fP \- Reads a file at explicitly specified offsets (blocking, collective). - -.SH SYNTAX -.ft R -.nf -.SH C Syntax -.nf -#include -int MPI_File_read_at_all(MPI_File \fIfh\fP, MPI_Offset \fIoffset\fP, - void \fI*buf\fP, int \fIcount\fP, MPI_Datatype \fIdatatype\fP, - MPI_Status \fI*status\fP) - -.fi -.SH Fortran Syntax (see FORTRAN 77 NOTES) -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_FILE_READ_AT_ALL(\fIFH\fP, \fIOFFSET\fP, \fIBUF\fP, \fICOUNT\fP, - \fIDATATYPE\fP, \fISTATUS\fP, \fIIERROR\fP) - \fIBUF\fP(*) - INTEGER \fIFH, COUNT, DATATYPE, STATUS(MPI_STATUS_SIZE), IERROR\fP - INTEGER(KIND=MPI_OFFSET_KIND) \fIOFFSET\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_File_read_at_all(\fIfh\fP, \fIoffset\fP, \fIbuf\fP, \fIcount\fP, \fIdatatype\fP, \fIstatus\fP, \fIierror\fP) - TYPE(MPI_File), INTENT(IN) :: \fIfh\fP - INTEGER(KIND=MPI_OFFSET_KIND), INTENT(IN) :: \fIoffset\fP - TYPE(*), DIMENSION(..) :: \fIbuf\fP - INTEGER, INTENT(IN) :: \fIcount\fP - TYPE(MPI_Datatype), INTENT(IN) :: \fIdatatype\fP - TYPE(MPI_Status) :: \fIstatus\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETERS -.ft R -.TP 1i -fh -File handle (handle). -.TP 1i -offset -File offset (integer). -.TP 1i -count -Number of elements in buffer (integer). -.TP 1i -datatype -Data type of each buffer element (handle). - -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -buf -Initial address of buffer (choice). -.TP 1i -status -Status object (status). -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -MPI_File_read_at_all is a collective routine that attempts to read from the file associated with -.I fh -(at the -.I offset -position) a total number of -.I count -data items having -.I datatype -type into the user's buffer -.I buf. -The -.I offset -is in etype units relative to the current view. That is, holes are not counted -when locating an offset. The data is taken out of those parts of the -file specified by the current view. MPI_File_read_at_all stores the -number of -.I datatype -elements actually read in -.I status. -All other fields of -.I status -are undefined. It is erroneous to call this function if MPI_MODE_SEQUENTIAL mode was specified when the file was opened. - -.SH FORTRAN 77 NOTES -.ft R -The MPI standard prescribes portable Fortran syntax for -the \fIOFFSET\fP argument only for Fortran 90. FORTRAN 77 -users may use the non-portable syntax -.sp -.nf - INTEGER*MPI_OFFSET_KIND \fIOFFSET\fP -.fi -.sp -where MPI_OFFSET_KIND is a constant defined in mpif.h -and gives the length of the declared integer in bytes. - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. For MPI I/O function errors, the default error handler is set to MPI_ERRORS_RETURN. The error handler may be changed with MPI_File_set_errhandler; the predefined error handler MPI_ERRORS_ARE_FATAL may be used to make I/O errors fatal. Note that MPI does not guarantee that an MPI program can continue past an error. - diff --git a/ompi/mpi/man/man3/MPI_File_read_at_all_begin.3in b/ompi/mpi/man/man3/MPI_File_read_at_all_begin.3in deleted file mode 100644 index 7fbd263dde9..00000000000 --- a/ompi/mpi/man/man3/MPI_File_read_at_all_begin.3in +++ /dev/null @@ -1,114 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright 2015-2016 Research Organization for Information Science -.\" and Technology (RIST). All rights reserved. -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_File_read_at_all_begin 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_File_read_at_all_begin\fP \- Reads a file at explicitly specified offsets; beginning part of a split collective routine (nonblocking). - -.SH SYNTAX -.ft R -.nf -.SH C Syntax -.nf -#include -int MPI_File_read_at_all_begin(MPI_File \fIfh\fP, MPI_Offset - \fIoffset\fP, void \fI*buf\fP, int \fIcount\fP, MPI_Datatype - \fIdatatype\fP) - -.fi -.SH Fortran Syntax (see FORTRAN 77 NOTES) -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_FILE_READ_AT_ALL_BEGIN(\fIFH\fP, \fIOFFSET\fP, \fIBUF\fP, - \fICOUNT\fP, \fIDATATYPE\fP, \fIIERROR\fP) - \fIBUF\fP(*) - INTEGER \fIFH, COUNT, DATATYPE, IERROR\fP - INTEGER(KIND=MPI_OFFSET_KIND) \fIOFFSET\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_File_read_at_all_begin(\fIfh\fP, \fIoffset\fP, \fIbuf\fP, \fIcount\fP, \fIdatatype\fP, \fIierror\fP) - TYPE(MPI_File), INTENT(IN) :: \fIfh\fP - INTEGER(KIND=MPI_OFFSET_KIND), INTENT(IN) :: \fIoffset\fP - TYPE(*), DIMENSION(..), ASYNCHRONOUS :: \fIbuf\fP - INTEGER, INTENT(IN) :: \fIcount\fP - TYPE(MPI_Datatype), INTENT(IN) :: \fIdatatype\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETERS -.ft R -.TP 1i -fh -File handle (handle). -.ft R -.TP 1i -offset -File offset (integer). -.ft R -.TP 1i -count -Number of elements in buffer (integer). -.ft R -.TP 1i -datatype -Data type of each buffer element. - -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -buf -Initial address of buffer (choice). -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -MPI_File_read_at_all_begin is the beginning part of a split collective routine that attempts to read from the file associated with -.I fh -(at the -.I offset -position) a total number of -.I count -data items having -.I datatype -type into the user's buffer -.I buf. -The -.I offset -is in etype units relative to the current view. That is, holes are not counted -when locating an offset. The data is taken out of those parts of the -file specified by the current view. - -.SH FORTRAN 77 NOTES -.ft R -The MPI standard prescribes portable Fortran syntax for -the \fIOFFSET\fP argument only for Fortran 90. FORTRAN 77 -users may use the non-portable syntax -.sp -.nf - INTEGER*MPI_OFFSET_KIND \fIOFFSET\fP -.fi -.sp -where MPI_OFFSET_KIND is a constant defined in mpif.h -and gives the length of the declared integer in bytes. - -.SH NOTES -.ft R -All the nonblocking collective routines for data access are "split" into two routines, each with _begin or _end as a suffix. These split collective routines are subject to the semantic rules described in Section 9.4.5 of the MPI-2 standard. - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. For MPI I/O function errors, the default error handler is set to MPI_ERRORS_RETURN. The error handler may be changed with MPI_File_set_errhandler; the predefined error handler MPI_ERRORS_ARE_FATAL may be used to make I/O errors fatal. Note that MPI does not guarantee that an MPI program can continue past an error. - diff --git a/ompi/mpi/man/man3/MPI_File_read_at_all_end.3in b/ompi/mpi/man/man3/MPI_File_read_at_all_end.3in deleted file mode 100644 index 3a6676766bd..00000000000 --- a/ompi/mpi/man/man3/MPI_File_read_at_all_end.3in +++ /dev/null @@ -1,80 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright 2015-2016 Research Organization for Information Science -.\" and Technology (RIST). All rights reserved. -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_File_read_at_all_end 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_File_read_at_all_end\fP \- Reads a file at explicitly specified offsets; ending part of a split collective routine (blocking). - -.SH SYNTAX -.ft R -.nf -.SH C Syntax -.nf -#include -int MPI_File_read_at_all_end(MPI_File \fIfh\fP, void \fI*buf\fP, - MPI_Status \fI*status\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_FILE_READ_AT_ALL_END(\fIFH\fP, \fIBUF\fP, \fISTATUS\fP, \fIIERROR\fP) - \fIBUF(*)\fP - INTEGER \fIFH, STATUS(MPI_STATUS_SIZE), IERROR\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_File_read_at_all_end(\fIfh\fP, \fIbuf\fP, \fIstatus\fP, \fIierror\fP) - TYPE(MPI_File), INTENT(IN) :: \fIfh\fP - TYPE(*), DIMENSION(..), ASYNCHRONOUS :: \fIbuf\fP - TYPE(MPI_Status) :: \fIstatus\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETER -.ft R -.TP 1i -fh -File handle (handle). - -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -buf -Initial address of buffer (choice). -.ft R -.TP 1i -status -Status object (status). -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -MPI_File_read_at_all_end is a split collective routine that stores the number of elements actually read from the file associated with -.I fh -in -.I status. -MPI_File_read_at_all_end blocks until the operation initiated by MPI_File_read_at_all_begin completes. The data is taken out of those parts of the file specified by the current view. All other fields of -.I status -are undefined. - -.SH NOTES -.ft R -All the nonblocking collective routines for data access are "split" into two routines, each with _begin or _end as a suffix. These split collective routines are subject to the semantic rules described in Section 9.4.5 of the MPI-2 standard. - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. For MPI I/O function errors, the default error handler is set to MPI_ERRORS_RETURN. The error handler may be changed with MPI_File_set_errhandler; the predefined error handler MPI_ERRORS_ARE_FATAL may be used to make I/O errors fatal. Note that MPI does not guarantee that an MPI program can continue past an error. - diff --git a/ompi/mpi/man/man3/MPI_File_read_ordered.3in b/ompi/mpi/man/man3/MPI_File_read_ordered.3in deleted file mode 100644 index 1bb16a77083..00000000000 --- a/ompi/mpi/man/man3/MPI_File_read_ordered.3in +++ /dev/null @@ -1,99 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright 2015 Research Organization for Information Science -.\" and Technology (RIST). All rights reserved. -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_File_read_ordered 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_File_read_ordered\fP \- Reads a file at a location specified by a shared file pointer (blocking, collective). - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -int MPI_File_read_ordered(MPI_File \fIfh\fP, void \fI*buf\fP, - int \fIcount\fP, MPI_Datatype \fIdatatype\fP, - MPI_Status \fI*status\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_FILE_READ_ORDERED(\fIFH\fP, \fIBUF\fP, \fICOUNT\fP, \fIDATATYPE\fP, - \fISTATUS\fP, \fIIERROR\fP) - \fIBUF\fP(*) - INTEGER \fIFH, COUNT, DATATYPE, STATUS(MPI_STATUS_SIZE), IERROR\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_File_read_ordered(\fIfh\fP, \fIbuf\fP, \fIcount\fP, \fIdatatype\fP, \fIstatus\fP, \fIierror\fP) - TYPE(MPI_File), INTENT(IN) :: \fIfh\fP - TYPE(*), DIMENSION(..) :: \fIbuf\fP - INTEGER, INTENT(IN) :: \fIcount\fP - TYPE(MPI_Datatype), INTENT(IN) :: \fIdatatype\fP - TYPE(MPI_Status) :: \fIstatus\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETERS -.ft R -.TP 1i -fh -File handle (handle). -.TP 1i -count -Number of elements in buffer (integer). -.TP 1i -datatype -Data type of each buffer element (handle). - -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -buf -Initial address of buffer (choice). -.TP 1i -status -Status object (Status). -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R - -MPI_File_read_ordered is a collective routine. This routine must be -called by all processes in the communicator group associated with the -file handle -.I fh. -Each process may pass different argument values for the -.I datatype -and -.I count -arguments. Each process attempts to read, from the file associated with -.I fh, -a total number of -.I count -data items having -.I datatype -type into the user's buffer -.I buf. -For each process, the location in the file at which data is read is the position at which the shared file pointer would be after all processes whose ranks within the group are less than that of this process had read their data. MPI_File_read_ordered returns the actual number of -.I datatype -elements read in -.I status. -The shared file pointer is updated by the amounts of data requested by all processes of the group. - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. For MPI I/O function errors, the default error handler is set to MPI_ERRORS_RETURN. The error handler may be changed with MPI_File_set_errhandler; the predefined error handler MPI_ERRORS_ARE_FATAL may be used to make I/O errors fatal. Note that MPI does not guarantee that an MPI program can continue past an error. - diff --git a/ompi/mpi/man/man3/MPI_File_read_ordered_begin.3in b/ompi/mpi/man/man3/MPI_File_read_ordered_begin.3in deleted file mode 100644 index 276cdb9fdf5..00000000000 --- a/ompi/mpi/man/man3/MPI_File_read_ordered_begin.3in +++ /dev/null @@ -1,97 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright 2015-2016 Research Organization for Information Science -.\" and Technology (RIST). All rights reserved. -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_File_read_ordered_begin 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_File_read_ordered_begin\fP \- Reads a file at a location specified by a shared file pointer; beginning part of a split collective routine (nonblocking). - -.SH SYNTAX -.ft R -.nf -.SH C Syntax -.nf -#include -int MPI_File_read_ordered_begin(MPI_File \fIfh\fP, void \fI*buf\fP, - int \fIcount\fP, MPI_Datatype \fIdatatype\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_FILE_READ_ORDERED_BEGIN(\fIFH\fP, \fIBUF\fP, \fICOUNT\fP, \fIDATATYPE\fP, \fIIERROR\fP) - \fIBUF(*)\fP - INTEGER \fIFH, COUNT, DATATYPE, IERROR\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_File_read_ordered_begin(\fIfh\fP, \fIbuf\fP, \fIcount\fP, \fIdatatype\fP, \fIierror\fP) - TYPE(MPI_File), INTENT(IN) :: \fIfh\fP - TYPE(*), DIMENSION(..), ASYNCHRONOUS :: \fIbuf\fP - INTEGER, INTENT(IN) :: \fIcount\fP - TYPE(MPI_Datatype), INTENT(IN) :: \fIdatatype\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT/OUTPUT PARAMETER -.ft R -.TP 1i -fh -File handle (handle). - -.SH INPUT PARAMETERS -.ft R -.TP 1i -count -Number of elements in buffer (integer). -.ft R -.TP 1i -datatype -Data type of each buffer element (handle). - -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -buf -Initial address of buffer (choice). -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -MPI_File_read_ordered_begin is the beginning part of a split collective, nonblocking routine that must be -called by all processes in the communicator group associated with the -file handle -.I fh. -Each process may pass different argument values for the -.I datatype -and -.I count -arguments. Each process attempts to read, from the file associated with -.I fh, -a total number of -.I count -data items having -.I datatype -type into the user's buffer -.I buf. -For each process, the location in the file at which data is read is the position at which the shared file pointer would be after all processes whose ranks within the group are less than that of this process had read their data. - -.SH NOTES -.ft R -All the nonblocking collective routines for data access are "split" into two routines, each with _begin or _end as a suffix. These split collective routines are subject to the semantic rules described in Section 9.4.5 of the MPI-2 standard. - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. For MPI I/O function errors, the default error handler is set to MPI_ERRORS_RETURN. The error handler may be changed with MPI_File_set_errhandler; the predefined error handler MPI_ERRORS_ARE_FATAL may be used to make I/O errors fatal. Note that MPI does not guarantee that an MPI program can continue past an error. - diff --git a/ompi/mpi/man/man3/MPI_File_read_ordered_end.3in b/ompi/mpi/man/man3/MPI_File_read_ordered_end.3in deleted file mode 100644 index fcf80a3f562..00000000000 --- a/ompi/mpi/man/man3/MPI_File_read_ordered_end.3in +++ /dev/null @@ -1,81 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright 2015-2016 Research Organization for Information Science -.\" and Technology (RIST). All rights reserved. -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_File_read_ordered_end 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_File_read_ordered_end\fP \- Reads a file at a location specified by a shared file pointer; ending part of a split collective routine (blocking). - -.SH SYNTAX -.ft R -.nf -.SH C Syntax -.nf -#include -int MPI_File_read_ordered_end(MPI_File \fIfh\fP, void \fI*buf\fP, - MPI_Status \fI*status\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_FILE_READ_ORDERED_END(\fIFH\fP, \fIBUF\fP, \fISTATUS\fP, \fIIERROR\fP) - \fIBUF(*)\fP - INTEGER \fIFH, STATUS(MPI_STATUS_SIZE), IERROR\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_File_read_ordered_end(\fIfh\fP, \fIbuf\fP, \fIstatus\fP, \fIierror\fP) - TYPE(MPI_File), INTENT(IN) :: \fIfh\fP - TYPE(*), DIMENSION(..), ASYNCHRONOUS :: \fIbuf\fP - TYPE(MPI_Status) :: \fIstatus\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT/OUTPUT PARAMETER -.ft R -.TP 1i -fh -File handle (handle). - -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -buf -Initial address of buffer (choice). -.ft R -.TP 1i -status -Status object (status). -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -MPI_File_read_ordered_end is the ending part of a split collective routine that must be called by all processes in the communicator group associated with the -file handle -.I fh. -MPI_File_rad_ordered_end blocks until the operation initiated by MPI_File_read_ordered_begin completes. It attempts to read the file associated with -.I fh -into the user's buffer -.I buf. -The shared file pointer is updated by the amounts of data requested by all processes of the group. For each process, the location in the file at which data is read is the position at which the shared file pointer would be after all processes whose ranks within the group are less than that of this process had read their data. - -.SH NOTES -.ft R -All the nonblocking collective routines for data access are "split" into two routines, each with _begin or _end as a suffix. These split collective routines are subject to the semantic rules described in Section 9.4.5 of the MPI-2 standard. - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. For MPI I/O function errors, the default error handler is set to MPI_ERRORS_RETURN. The error handler may be changed with MPI_File_set_errhandler; the predefined error handler MPI_ERRORS_ARE_FATAL may be used to make I/O errors fatal. Note that MPI does not guarantee that an MPI program can continue past an error. - diff --git a/ompi/mpi/man/man3/MPI_File_read_shared.3in b/ompi/mpi/man/man3/MPI_File_read_shared.3in deleted file mode 100644 index e66eb158197..00000000000 --- a/ompi/mpi/man/man3/MPI_File_read_shared.3in +++ /dev/null @@ -1,83 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright 2015-2016 Research Organization for Information Science -.\" and Technology (RIST). All rights reserved. -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_File_read_shared 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_File_read_shared\fP \- Reads a file using the shared file pointer (blocking, noncollective). - -.SH SYNTAX -.ft R -.nf -.SH C Syntax -.nf -#include -int MPI_File_read_shared(MPI_File \fIfh\fP, void \fI*buf\fP, int \fIcount\fP, - MPI_Datatype \fIdatatype\fP, MPI_Status \fI*status\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_FILE_READ_SHARED(\fIFH\fP, \fIBUF\fP, \fICOUNT\fP, \fIDATATYPE\fP, \fISTATUS\fP, - \fIIERROR\fP) - \fIBUF(*)\fP - INTEGER \fIFH, COUNT, DATATYPE,STATUS(MPI_STATUS_SIZE), IERROR\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_File_read_shared(\fIfh\fP, \fIbuf\fP, \fIcount\fP, \fIdatatype\fP, \fIstatus\fP, \fIierror\fP) - TYPE(MPI_File), INTENT(IN) :: \fIfh\fP - TYPE(*), DIMENSION(..) :: \fIbuf\fP - INTEGER, INTENT(IN) :: \fIcount\fP - TYPE(MPI_Datatype), INTENT(IN) :: \fIdatatype\fP - TYPE(MPI_Status) :: \fIstatus\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT/OUTPUT PARAMETER -.ft R -.TP 1i -fh -File handle (handle). - -.SH INPUT PARAMETERS -.ft R -.TP 1i -count -Number of elements in buffer (integer) -.ft R -.TP 1i -datatype -Data type of each buffer element (handle). - -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -buf -Initial address of buffer (choice). -.ft R -.TP 1i -status -Status object (status). -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -MPI_File_read_shared is a blocking routine that uses the shared file pointer to read files. The order of serialization is not deterministic for this noncollective routine. - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. For MPI I/O function errors, the default error handler is set to MPI_ERRORS_RETURN. The error handler may be changed with MPI_File_set_errhandler; the predefined error handler MPI_ERRORS_ARE_FATAL may be used to make I/O errors fatal. Note that MPI does not guarantee that an MPI program can continue past an error. - diff --git a/ompi/mpi/man/man3/MPI_File_seek.3in b/ompi/mpi/man/man3/MPI_File_seek.3in deleted file mode 100644 index bead5615a0e..00000000000 --- a/ompi/mpi/man/man3/MPI_File_seek.3in +++ /dev/null @@ -1,105 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright 2015-2016 Research Organization for Information Science -.\" and Technology (RIST). All rights reserved. -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_File_seek 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_File_seek\fP \- Updates individual file pointers (noncollective). - -.SH SYNTAX -.ft R -.nf -.SH C Syntax -.nf -#include -int MPI_File_seek(MPI_File \fIfh\fP, MPI_Offset \fIoffset\fP, - int \fIwhence\fP) - -.fi -.SH Fortran Syntax (see FORTRAN 77 NOTES) -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_FILE_SEEK(\fIFH\fP, \fIOFFSET\fP, \fIWHENCE\fP, \fIIERROR\fP) - INTEGER \fIFH, WHENCE, IERROR\fP - INTEGER(KIND=MPI_OFFSET_KIND) \fIOFFSET\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_File_seek(\fIfh\fP, \fIoffset\fP, \fIwhence\fP, \fIierror\fP) - TYPE(MPI_File), INTENT(IN) :: \fIfh\fP - INTEGER(KIND=MPI_OFFSET_KIND), INTENT(IN) :: \fIoffset\fP - INTEGER, INTENT(IN) :: \fIwhence\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETERS -.ft R -.TP 1i -fh -File handle (handle). -.TP 1i -offset -File offset (integer). -.TP 1i -whence -Update mode (integer). - -.SH OUTPUT PARAMETER -.ft R -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -MPI_File_seek updates the individual file pointer according to -.I whence, -which could have the following possible values: -.TP - o -MPI_SEEK_SET - The pointer is set to -.I offset. -.TP - o -MPI_SEEK_CUR - The pointer is set to the current pointer position plus -.I offset. -.TP - o -MPI_SEEK_END - The pointer is set to the end of the file plus -.I offset. -.sp -.RE -The -.I offset -can be negative, which allows seeking backwards. It is erroneous to -seek to a negative position in the file. The end of the file is -defined to be the location of the next elementary data item -immediately after the last accessed data item, even if that location -is a hole. - -.SH FORTRAN 77 NOTES -.ft R -The MPI standard prescribes portable Fortran syntax for -the \fIOFFSET\fP argument only for Fortran 90. FORTRAN 77 -users may use the non-portable syntax -.sp -.nf - INTEGER*MPI_OFFSET_KIND \fIOFFSET\fP -.fi -.sp -where MPI_OFFSET_KIND is a constant defined in mpif.h -and gives the length of the declared integer in bytes. - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. For MPI I/O function errors, the default error handler is set to MPI_ERRORS_RETURN. The error handler may be changed with MPI_File_set_errhandler; the predefined error handler MPI_ERRORS_ARE_FATAL may be used to make I/O errors fatal. Note that MPI does not guarantee that an MPI program can continue past an error. - diff --git a/ompi/mpi/man/man3/MPI_File_seek_shared.3in b/ompi/mpi/man/man3/MPI_File_seek_shared.3in deleted file mode 100644 index 4403529b778..00000000000 --- a/ompi/mpi/man/man3/MPI_File_seek_shared.3in +++ /dev/null @@ -1,115 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright 2015-2016 Research Organization for Information Science -.\" and Technology (RIST). All rights reserved. -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_File_seek_shared 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_File_seek_shared\fP \- Updates the global shared file pointer (collective). - -.SH SYNTAX -.ft R -.nf -.SH C Syntax -.nf -#include -int MPI_File_seek_shared(MPI_File \fIfh\fP, MPI_Offset \fIoffset\fP, - int \fIwhence\fP) - -.fi -.SH Fortran Syntax (see FORTRAN 77 NOTES) -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_FILE_SEEK_SHARED(\fIFH\fP, \fIOFFSET\fP, \fIWHENCE\fP, \fIIERROR\fP) - INTEGER \fIFH, WHENCE, IERROR\fP - INTEGER(KIND=MPI_OFFSET_KIND) \fIOFFSET\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_File_seek_shared(\fIfh\fP, \fIoffset\fP, \fIwhence\fP, \fIierror\fP) - TYPE(MPI_File), INTENT(IN) :: \fIfh\fP - INTEGER(KIND=MPI_OFFSET_KIND), INTENT(IN) :: \fIoffset\fP - INTEGER, INTENT(IN) :: \fIwhence\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETERS -.ft R -.TP 1i -fh -File handle (handle). -.TP 1i -offset -File offset (integer). -.TP 1i -whence -Update mode (integer). - -.SH OUTPUT PARAMETER -.ft R -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -MPI_File_seek_shared updates the shared file pointer according to -.I whence, -which could have the following possible values: -.TP - o -MPI_SEEK_SET - The pointer is set to -.I offset. -.TP - o -MPI_SEEK_CUR - The pointer is set to the current pointer position plus -.I offset. -.TP - o -MPI_SEEK_END - The pointer is set to the end of the file plus -.I offset. -.sp -.RE -MPI_File_seek_shared is collective; all the processes in the communicator -group associated with the file handle -.I fh -must call MPI_File_seek_shared with the same -.I offset -and -.I whence. -All processes in the communicator group are synchronized before the shared file pointer is updated. - -.sp -The -.I offset -can be negative, which allows seeking backwards. It is erroneous to -seek to a negative position in the view. The end of the view is -defined to be the position of the next elementary data item, relative -to the current view, following the last whole elementary data item -accessible. - -.SH FORTRAN 77 NOTES -.ft R -The MPI standard prescribes portable Fortran syntax for -the \fIOFFSET\fP argument only for Fortran 90. FORTRAN 77 -users may use the non-portable syntax -.sp -.nf - INTEGER*MPI_OFFSET_KIND \fIOFFSET\fP -.fi -.sp -where MPI_OFFSET_KIND is a constant defined in mpif.h -and gives the length of the declared integer in bytes. - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. For MPI I/O function errors, the default error handler is set to MPI_ERRORS_RETURN. The error handler may be changed with MPI_File_set_errhandler; the predefined error handler MPI_ERRORS_ARE_FATAL may be used to make I/O errors fatal. Note that MPI does not guarantee that an MPI program can continue past an error. - diff --git a/ompi/mpi/man/man3/MPI_File_set_atomicity.3in b/ompi/mpi/man/man3/MPI_File_set_atomicity.3in deleted file mode 100644 index 93d1f4caf7d..00000000000 --- a/ompi/mpi/man/man3/MPI_File_set_atomicity.3in +++ /dev/null @@ -1,79 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright 2015-2016 Research Organization for Information Science -.\" and Technology (RIST). All rights reserved. -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_File_set_atomicity 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_File_set_atomicity\fP \- Sets consistency semantics for data-access operations (collective). - -.SH SYNTAX -.ft R -.nf -.SH C Syntax -.nf -#include -int MPI_File_set_atomicity(MPI_File \fIfh\fP, int \fIflag\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_FILE_SET_ATOMICITY(\fIFH\fP, \fIFLAG\fP, \fIIERROR\fP) - INTEGER \fIFH, FLAG, IERROR\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_File_set_atomicity(\fIfh\fP, \fIflag\fP, \fIierror\fP) - TYPE(MPI_File), INTENT(IN) :: \fIfh\fP - LOGICAL, INTENT(IN) :: \fIflag\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETERS -.ft R -.TP 1i -fh -File handle (handle). -.TP 1i -flag -\fBtrue\fP to enable atomic mode, \fBfalse\fP to enable nonatomic mode (boolean). - -.SH OUTPUT PARAMETER -.ft R -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -The consistency semantics for data-access operations using the set of -file handles created by one collective MPI_File_open is set by collectively -calling MPI_File_set_atomicity. All processes in the group must pass identical values for -.I fh -and -.I flag. -If -.I flag -is -.I true, -atomic mode is set; if -.I flag -is -.I false, -nonatomic mode is set. -.sp -The default value on a call to MPI_File_open in Open MPI is \fItrue\fP for jobs running on more than one node, \fIfalse\fP for jobs running on a single SMP. For more information, see the MPI-2 standard. - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. For MPI I/O function errors, the default error handler is set to MPI_ERRORS_RETURN. The error handler may be changed with MPI_File_set_errhandler; the predefined error handler MPI_ERRORS_ARE_FATAL may be used to make I/O errors fatal. Note that MPI does not guarantee that an MPI program can continue past an error. - diff --git a/ompi/mpi/man/man3/MPI_File_set_errhandler.3in b/ompi/mpi/man/man3/MPI_File_set_errhandler.3in deleted file mode 100644 index e5a15d5f9b6..00000000000 --- a/ompi/mpi/man/man3/MPI_File_set_errhandler.3in +++ /dev/null @@ -1,65 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_File_set_errhandler 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_File_set_errhandler \fP \- Sets the error handler for a file. - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -int MPI_File_set_errhandler(MPI_File \fIfile\fP, MPI_Errhandler - \fIerrhandler\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_FILE_SET_ERRHANDLER(\fIFILE, ERRHANDLER, IERROR\fP) - INTEGER \fIFILE, ERRHANDLER, IERROR\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_File_set_errhandler(\fIfile\fP, \fIerrhandler\fP, \fIierror\fP) - TYPE(MPI_File), INTENT(IN) :: \fIfile\fP - TYPE(MPI_Errhandler), INTENT(IN) :: \fIerrhandler\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT/OUTPUT PARAMETER -.ft R -.TP 1i -file -File (handle). - -.SH INPUT PARAMETER -.ft R -.TP 1i -errhandler -New error handler for file (handle). - -.SH OUTPUT PARAMETER -.ft R -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -Attaches a new error handler to a file. The error handler must be either a predefined error handler or an error handler created by a call to MPI_File_create_errhandler. - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. For MPI I/O function errors, the default error handler is set to MPI_ERRORS_RETURN. The error handler may be changed with MPI_File_set_errhandler; the predefined error handler MPI_ERRORS_ARE_FATAL may be used to make I/O errors fatal. Note that MPI does not guarantee that an MPI program can continue past an error. - - diff --git a/ompi/mpi/man/man3/MPI_File_set_info.3in b/ompi/mpi/man/man3/MPI_File_set_info.3in deleted file mode 100644 index ac63a375a18..00000000000 --- a/ompi/mpi/man/man3/MPI_File_set_info.3in +++ /dev/null @@ -1,105 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright 2015-2016 Research Organization for Information Science -.\" and Technology (RIST). All rights reserved. -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_File_set_info 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_File_set_info\fP \- Sets new values for hints (collective). - -.SH SYNTAX -.ft R -.nf -.SH C Syntax -.nf -#include -int MPI_File_set_info(MPI_File \fIfh\fP, MPI_Info \fIinfo\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_FILE_SET_INFO(\fIFH\fP, \fIINFO\fP, \fIIERROR\fP) - INTEGER \fIFH, INFO, IERROR\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_File_set_info(\fIfh\fP, \fIinfo\fP, \fIierror\fP) - TYPE(MPI_File), INTENT(IN) :: \fIfh\fP - TYPE(MPI_Info), INTENT(IN) :: \fIinfo\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT/OUTPUT PARAMETER -.ft R -.TP 1i -fh -File handle (handle). - -.SH INPUT PARAMETER -.ft R -.TP 1i -info -Info object (handle). - -.SH OUTPUT PARAMETER -.ft R -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -MPI_File_set_info is a collective routine that sets new values for the hints of the file associated with \fIfh\fP. These hints are set for each file, using the MPI_File_open, MPI_File_delete, MPI_File_set_view, and MPI_File_set_info routines. The opaque \fIinfo\fP object, which allows you to provide hints for optimization of your code, may be different on each process, but some \fIinfo\fP entries are required to be the same on all processes: In these cases, they must appear with the same value in each process's info object. See the HINTS section for a list of hints that can be set. - -.SH HINTS -.ft R -The following hints can be used as values for the \fIinfo\fP argument. -.sp -SETTABLE HINTS: -.sp -- shared_file_timeout: Amount of time (in seconds) to wait for access to the -shared file pointer before exiting with MPI_ERR_TIMEDOUT. -.sp -- rwlock_timeout: Amount of time (in seconds) to wait for obtaining a read or -write lock on a contiguous chunk of a UNIX file before exiting with MPI_ERR_TIMEDOUT. -.sp -- noncoll_read_bufsize: Maximum size of the buffer used by -MPI I/O to satisfy read requests in -the noncollective data-access routines. (See NOTE, below.) -.sp -- noncoll_write_bufsize: Maximum size of the buffer used by -MPI I/O to satisfy write requests in -the noncollective data-access routines. (See NOTE, below.) -.sp -- coll_read_bufsize: Maximum size of the buffer used by MPI -I/O to satisfy read requests in the -collective data-access routines. (See NOTE, below.) -.sp -- coll_write_bufsize: Maximum size of the buffer used by MPI -I/O to satisfy write requests in the -collective data-access routines. (See NOTE, below.) -.sp -NOTE: A buffer size smaller than the distance (in bytes) in a UNIX file between the first byte and the last byte of the access request causes MPI I/O to iterate and perform multiple UNIX read() or write() calls. If the request includes multiple noncontiguous chunks of data, and the buffer size is greater than the size of those chunks, then the UNIX read() or write() (made at the MPI I/O level) will access data not requested by this process in order to reduce the total number of write() calls made. If this is not desirable behavior, you should reduce this buffer size to equal the size of the contiguous chunks within the aggregate request. -.sp -- mpiio_concurrency: (boolean) controls whether nonblocking -I/O routines can bind an extra thread to an LWP. -.sp -- mpiio_coll_contiguous: (boolean) controls whether subsequent collective data accesses will request collectively contiguous regions of the file. -.sp -NON-SETTABLE HINTS: -.sp -- filename: Access this hint to get the name of the file. - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. For MPI I/O function errors, the default error handler is set to MPI_ERRORS_RETURN. The error handler may be changed with MPI_File_set_errhandler; the predefined error handler MPI_ERRORS_ARE_FATAL may be used to make I/O errors fatal. Note that MPI does not guarantee that an MPI program can continue past an error. - diff --git a/ompi/mpi/man/man3/MPI_File_set_size.3in b/ompi/mpi/man/man3/MPI_File_set_size.3in deleted file mode 100644 index 6d5639b1f14..00000000000 --- a/ompi/mpi/man/man3/MPI_File_set_size.3in +++ /dev/null @@ -1,90 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright 2015-2016 Research Organization for Information Science -.\" and Technology (RIST). All rights reserved. -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_File_set_size 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_File_set_size\fP \- Resizes a file (collective). - -.SH SYNTAX -.ft R -.nf -.SH C Syntax -.nf -#include -int MPI_File_set_size(MPI_File \fIfh\fP, MPI_Offset \fIsize\fP) - -.fi -.SH Fortran Syntax (see FORTRAN 77 NOTES) -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_FILE_SET_SIZE(\fIFH\fP, \fISIZE\fP, \fIIERROR\fP) - INTEGER \fIFH, IERROR\fP - INTEGER(KIND=MPI_OFFSET_KIND) \fISIZE\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_File_set_size(\fIfh\fP, \fIsize\fP, \fIierror\fP) - TYPE(MPI_File), INTENT(IN) :: \fIfh\fP - INTEGER(KIND=MPI_OFFSET_KIND), INTENT(IN) :: \fIsize\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETERS -.ft R -.TP 1i -fh -File handle (handle). -.TP 1i -size -Size to truncate or expand file (integer). - -.SH OUTPUT PARAMETER -.ft R -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -MPI_File_set_size resizes the file associated with the file handle -.I fh, -truncating UNIX files as necessary. MPI_File_set_size is collective; all -processes in the group must pass identical values for size. -.sp -When using MPI_File_set_size on a UNIX file, if \fIsize\fP is larger than the current file size, the file size becomes \fIsize\fP. If \fIsize\fP is smaller than the current file size, the file is truncated at the position defined by \fIsize\fP (from the beginning of the file and measured in bytes). Regions of the file which have been previously written are unaffected. -.sp -MPI_File_set_size does not affect the individual file pointers or the -shared file pointer. -.sp -Note that the actual amount of storage space cannot be allocated by MPI_File_set_size. Use MPI_File_preallocate to accomplish this. -.sp -It is erroneous to call this function if MPI_MODE_SEQUENTIAL mode was specified when the file was opened. - -.SH FORTRAN 77 NOTES -.ft R -The MPI standard prescribes portable Fortran syntax for -the \fISIZE\fP argument only for Fortran 90. FORTRAN 77 -users may use the non-portable syntax -.sp -.nf - INTEGER*MPI_OFFSET_KIND \fISIZE\fP -.fi -.sp -where MPI_OFFSET_KIND is a constant defined in mpif.h -and gives the length of the declared integer in bytes. - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. For MPI I/O function errors, the default error handler is set to MPI_ERRORS_RETURN. The error handler may be changed with MPI_File_set_errhandler; the predefined error handler MPI_ERRORS_ARE_FATAL may be used to make I/O errors fatal. Note that MPI does not guarantee that an MPI program can continue past an error. - - diff --git a/ompi/mpi/man/man3/MPI_File_set_view.3in b/ompi/mpi/man/man3/MPI_File_set_view.3in deleted file mode 100644 index 5b6a099e39c..00000000000 --- a/ompi/mpi/man/man3/MPI_File_set_view.3in +++ /dev/null @@ -1,177 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2013 Los Alamos National Security, LLC. All rights reserved. -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright 2015-2016 Research Organization for Information Science -.\" and Technology (RIST). All rights reserved. -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_File_set_view 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_File_set_view\fP \- Changes process's view of data in file (collective). -.SH SYNTAX -.ft R -.nf -.SH C Syntax -.nf -#include -int MPI_File_set_view(MPI_File \fIfh\fP, MPI_Offset \fIdisp\fP, - MPI_Datatype \fIetype\fP, MPI_Datatype \fIfiletype\fP, - const char \fI*datarep\fP, MPI_Info \fIinfo\fP) - -.fi -.SH Fortran Syntax (see FORTRAN 77 NOTES) -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_FILE_SET_VIEW(\fIFH\fP, \fIDISP\fP, \fIETYPE\fP, - \fIFILETYPE\fP, \fIDATAREP\fP, \fIINFO\fP, \fIIERROR\fP) - INTEGER \fIFH, ETYPE, FILETYPE, INFO, IERROR\fP - CHARACTER*(*) \fIDATAREP\fP - INTEGER(KIND=MPI_OFFSET_KIND) \fIDISP\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_File_set_view(\fIfh\fP, \fIdisp\fP, \fIetype\fP, \fIfiletype\fP, \fIdatarep\fP, \fIinfo\fP, \fIierror\fP) - TYPE(MPI_File), INTENT(IN) :: \fIfh\fP - INTEGER(KIND=MPI_OFFSET_KIND), INTENT(IN) :: \fIdisp\fP - TYPE(MPI_Datatype), INTENT(IN) :: \fIetype\fP, \fIfiletype\fP - CHARACTER(LEN=*), INTENT(IN) :: \fIdatarep\fP - TYPE(MPI_Info), INTENT(IN) :: \fIinfo\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT/OUTPUT PARAMETER -.ft R -.TP 1i -fh -File handle (handle). - -.SH INPUT PARAMETERS -.ft R -.TP 1i -disp -Displacement (integer). -.TP 1i -etype -Elementary data type (handle). -.TP 1i -filetype -File type (handle). See Restrictions, below. -.TP 1i -datarep -Data representation (string). -.TP 1i -info -Info object (handle). - -.SH OUTPUT PARAMETER -.ft R -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -The MPI_File_set_view routine changes the process's view of the data -in the file -- the beginning of the data accessible in the file through -that view is set to -.I disp; -the type of data is set to -.I etype; -and the distribution of data to processes is set to -.I filetype. -In addition, MPI_File_set_view resets the independent file pointers and -the shared file pointer to zero. MPI_File_set_view is collective across the -.IR fh ; -all processes in the group must pass identical values for -.IR datarep -and provide an -.I etype -with an identical extent. The values for -.IR disp , -.IR filetype , -and -.I info -may vary. It is erroneous to use the shared file pointer data-access -routines unless identical values for -.I disp -and -.I filetype -are also given. The data types passed in -.I etype -and -.I filetype -must be committed. -.sp -The -.I disp -displacement argument specifies the position (absolute offset in -bytes from the beginning of the file) where the view begins. -.sp -The MPI_File_set_view interface allows the user to pass a data-representation string to MPI I/O via the \fIdatarep\fP argument. To obtain the default value pass the value "native". The user can also pass information via the \fIinfo\fP argument. See the HINTS section for a list of hints that can be set. For more information, see the MPI-2 standard. - -.SH HINTS -.ft R -The following hints can be used as values for the \fIinfo\fP argument. -.sp -SETTABLE HINTS: -.sp -- MPI_INFO_NULL -.sp -- shared_file_timeout: Amount of time (in seconds) to wait for access to the -shared file pointer before exiting with MPI_ERR_TIMEDOUT. -.sp -- rwlock_timeout: Amount of time (in seconds) to wait for obtaining a read or -write lock on a contiguous chunk of a UNIX file before exiting with MPI_ERR_TIMEDOUT. -.sp -- noncoll_read_bufsize: Maximum size of the buffer used by -MPI I/O to satisfy read requests in -the noncollective data-access routines. (See NOTE, below.) -.sp -- noncoll_write_bufsize: Maximum size of the buffer used by -MPI I/O to satisfy write requests in -the noncollective data-access routines. (See NOTE, below.) -.sp -- coll_read_bufsize: Maximum size of the buffer used by MPI -I/O to satisfy read requests in the -collective data-access routines. (See NOTE, below.) -.sp -- coll_write_bufsize: Maximum size of the buffer used by MPI -I/O to satisfy write requests in the -collective data-access routines. (See NOTE, below.) -.sp -NOTE: A buffer size smaller than the distance (in bytes) in a UNIX file between the first byte and the last byte of the access request causes MPI I/O to iterate and perform multiple UNIX read() or write() calls. If the request includes multiple noncontiguous chunks of data, and the buffer size is greater than the size of those chunks, then the UNIX read() or write() (made at the MPI I/O level) will access data not requested by this process in order to reduce the total number of write() calls made. If this is not desirable behavior, you should reduce this buffer size to equal the size of the contiguous chunks within the aggregate request. -.sp -- mpiio_concurrency: (boolean) controls whether nonblocking -I/O routines can bind an extra thread to an LWP. -.sp -- mpiio_coll_contiguous: (boolean) controls whether subsequent collective data accesses will request collectively contiguous regions of the file. -.sp -NON-SETTABLE HINTS: -.sp -- filename: Access this hint to get the name of the file. - -.SH FORTRAN 77 NOTES -.ft R -The MPI standard prescribes portable Fortran syntax for -the \fIDISP\fP argument only for Fortran 90. FORTRAN 77 -users may use the non-portable syntax -.sp -.nf - INTEGER*MPI_OFFSET_KIND \fIDISP\fP -.fi -.sp -where MPI_OFFSET_KIND is a constant defined in mpif.h -and gives the length of the declared integer in bytes. - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. For MPI I/O function errors, the default error handler is set to MPI_ERRORS_RETURN. The error handler may be changed with MPI_File_set_errhandler; the predefined error handler MPI_ERRORS_ARE_FATAL may be used to make I/O errors fatal. Note that MPI does not guarantee that an MPI program can continue past an error. - - diff --git a/ompi/mpi/man/man3/MPI_File_sync.3in b/ompi/mpi/man/man3/MPI_File_sync.3in deleted file mode 100644 index 2400083bda9..00000000000 --- a/ompi/mpi/man/man3/MPI_File_sync.3in +++ /dev/null @@ -1,70 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright 2015-2016 Research Organization for Information Science -.\" and Technology (RIST). All rights reserved. -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_File_sync 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_File_sync\fP \- Makes semantics consistent for data-access operations (collective). - -.SH SYNTAX -.ft R -.nf -.SH C Syntax -.nf -#include -int MPI_File_sync(MPI_File \fIfh\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_FILE_SYNC(\fIFH\fP, \fIIERROR\fP) - INTEGER \fIFH, IERROR\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_File_sync(\fIfh\fP, \fIierror\fP) - TYPE(MPI_File), INTENT(IN) :: \fIfh\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETER -.ft R -.TP 1i -fh -File handle (handle). - -.SH OUTPUT PARAMETER -.ft R -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -Calling MPI_File_sync with -.I fh -causes all previous writes to -.I fh -by the calling process to be written to permanent storage. If other processes have made updates to permanent storage, then all such updates become visible to subsequent reads of -.I fh -by the calling process. -.sp -MPI_File_sync is a collective operation. The user is responsible for ensuring that all nonblocking requests on -.I fh -have been completed before calling MPI_File_sync. Otherwise, the call to MPI_File_sync is erroneous. - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. For MPI I/O function errors, the default error handler is set to MPI_ERRORS_RETURN. The error handler may be changed with MPI_File_set_errhandler; the predefined error handler MPI_ERRORS_ARE_FATAL may be used to make I/O errors fatal. Note that MPI does not guarantee that an MPI program can continue past an error. - - diff --git a/ompi/mpi/man/man3/MPI_File_write.3in b/ompi/mpi/man/man3/MPI_File_write.3in deleted file mode 100644 index 91c4ca0b799..00000000000 --- a/ompi/mpi/man/man3/MPI_File_write.3in +++ /dev/null @@ -1,101 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2013 Los Alamos National Security, LLC. All rights reserved. -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright 2015-2016 Research Organization for Information Science -.\" and Technology (RIST). All rights reserved. -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_File_write 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_File_write\fP \- Writes a file starting at the location specified by the individual file pointer (blocking, noncollective). - -.SH SYNTAX -.ft R -.nf -.SH C Syntax -.nf -#include -int MPI_File_write(MPI_File \fIfh\fP, const void \fI*buf\fP, - int \fIcount\fP, MPI_Datatype \fIdatatype\fP, - MPI_Status \fI*status\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_FILE_WRITE(\fIFH\fP, \fIBUF\fP, \fICOUNT\fP, - \fIDATATYPE\fP, \fISTATUS\fP, \fIIERROR\fP) - \fIBUF\fP(*) - INTEGER \fIFH, COUNT, DATATYPE, STATUS(MPI_STATUS_SIZE), IERROR\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_File_write(\fIfh\fP, \fIbuf\fP, \fIcount\fP, \fIdatatype\fP, \fIstatus\fP, \fIierror\fP) - TYPE(MPI_File), INTENT(IN) :: \fIfh\fP - TYPE(*), DIMENSION(..), INTENT(IN) :: \fIbuf\fP - INTEGER, INTENT(IN) :: \fIcount\fP - TYPE(MPI_Datatype), INTENT(IN) :: \fIdatatype\fP - TYPE(MPI_Status) :: \fIstatus\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT/OUTPUT PARAMETER -.ft R -.TP 1i -fh -File handle (handle). - -.SH INPUT PARAMETERS -.ft R -.TP 1i -buf -Initial address of buffer (choice). -.TP 1i -count -Number of elements in buffer (integer). -.TP 1i -datatype -Data type of each buffer element (handle). - -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -status -Status object (status). -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -MPI_File_write attempts to write into the file associated with -.I fh -(at the current individual file pointer position maintained by the system) a total number of -.I count -data items having -.I datatype -type from the user's buffer -.I buf. -The data is written into those parts of the -file specified by the current view. MPI_File_write stores the -number of -.I datatype -elements actually written in -.I status. -All other fields of -.I status -are undefined. -.sp -It is erroneous to call this function if MPI_MODE_SEQUENTIAL mode was specified when the file was opened. - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. For MPI I/O function errors, the default error handler is set to MPI_ERRORS_RETURN. The error handler may be changed with MPI_File_set_errhandler; the predefined error handler MPI_ERRORS_ARE_FATAL may be used to make I/O errors fatal. Note that MPI does not guarantee that an MPI program can continue past an error. - diff --git a/ompi/mpi/man/man3/MPI_File_write_all.3in b/ompi/mpi/man/man3/MPI_File_write_all.3in deleted file mode 100644 index ec8ce2e416e..00000000000 --- a/ompi/mpi/man/man3/MPI_File_write_all.3in +++ /dev/null @@ -1,97 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2013 Los Alamos National Security, LLC. All rights reserved. -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright 2015-2016 Research Organization for Information Science -.\" and Technology (RIST). All rights reserved. -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_File_write_all 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_File_write_all\fP \- Writes a file starting at the locations specified by individual file pointers (blocking, collective). - -.SH SYNTAX -.ft R -.nf -.SH C Syntax -.nf -#include -int MPI_File_write_all(MPI_File \fIfh\fP, const void \fI*buf\fP, - int \fIcount\fP, MPI_Datatype \fIdatatype\fP, MPI_Status \fI*status\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_FILE_WRITE_ALL(\fIFH\fP, \fIBUF\fP, \fICOUNT\fP, - \fIDATATYPE\fP, \fISTATUS\fP, \fIIERROR\fP) - \fIBUF(*)\fP - INTEGER \fIFH, COUNT, DATATYPE, STATUS(MPI_STATUS_SIZE), IERROR\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_File_write_all(\fIfh\fP, \fIbuf\fP, \fIcount\fP, \fIdatatype\fP, \fIstatus\fP, \fIierror\fP) - TYPE(MPI_File), INTENT(IN) :: \fIfh\fP - TYPE(*), DIMENSION(..), INTENT(IN) :: \fIbuf\fP - INTEGER, INTENT(IN) :: \fIcount\fP - TYPE(MPI_Datatype), INTENT(IN) :: \fIdatatype\fP - TYPE(MPI_Status) :: \fIstatus\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETERS -.ft R -.TP 1i -fh -File handle (handle). -.TP 1i -buf -Initial address of buffer (choice). -.TP 1i -count -Number of elements in buffer (integer). -.TP 1i -datatype -Data type of each buffer element (handle). - -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -status -Status object (status). -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -MPI_File_write_all is a collective routine that attempts to write into the file associated with -.I fh -(at the current individual file pointer position maintained by the system) a total number of -.I count -data items having -.I datatype -type from the user's buffer -.I buf. -The data is written into those parts of the -file specified by the current view. MPI_File_write_all stores the -number of -.I datatype -elements actually written in -.I status. -All other fields of -.I status -are undefined. -.sp -It is erroneous to call this function if MPI_MODE_SEQUENTIAL mode was specified when the file was opened. - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. For MPI I/O function errors, the default error handler is set to MPI_ERRORS_RETURN. The error handler may be changed with MPI_File_set_errhandler; the predefined error handler MPI_ERRORS_ARE_FATAL may be used to make I/O errors fatal. Note that MPI does not guarantee that an MPI program can continue past an error. - diff --git a/ompi/mpi/man/man3/MPI_File_write_all_begin.3in b/ompi/mpi/man/man3/MPI_File_write_all_begin.3in deleted file mode 100644 index 349020c5aac..00000000000 --- a/ompi/mpi/man/man3/MPI_File_write_all_begin.3in +++ /dev/null @@ -1,95 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2013 Los Alamos National Security, LLC. All rights reserved. -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright 2015-2016 Research Organization for Information Science -.\" and Technology (RIST). All rights reserved. -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_File_write_all_begin 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_File_write_all_begin\fP \- Writes a file starting at the locations specified by individual file pointers; beginning part of a split collective routine (nonblocking). - -.SH SYNTAX -.ft R -.nf -.SH C Syntax -.nf -#include -int MPI_File_write_all_begin(MPI_File \fIfh\fP, const void \fI*buf\fP, - int \fIcount\fP, MPI_Datatype \fIdatatype\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_FILE_WRITE_ALL_BEGIN(\fIFH\fP, \fIBUF\fP, \fICOUNT\fP, \fIDATATYPE\fP, \fIIERROR\fP) - \fIBUF(*)\fP - INTEGER \fIFH, COUNT, DATATYPE, IERROR\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_File_write_all_begin(\fIfh\fP, \fIbuf\fP, \fIcount\fP, \fIdatatype\fP, \fIierror\fP) - TYPE(MPI_File), INTENT(IN) :: \fIfh\fP - TYPE(*), DIMENSION(..), INTENT(IN), ASYNCHRONOUS :: \fIbuf\fP - INTEGER, INTENT(IN) :: \fIcount\fP - TYPE(MPI_Datatype), INTENT(IN) :: \fIdatatype\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT/OUTPUT PARAMETER -.ft R -.TP 1i -fh -File handle (handle). - -.SH INPUT PARAMETERS -.ft R -.TP 1i -buf -Initial address of buffer (choice). -.ft R -.TP 1i -count -Number of elements in buffer (integer). -.ft R -.TP 1i -datatype -Data type of each buffer element (handle). - -.SH OUTPUT PARAMETER -.ft R -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -MPI_File_write_all_begin is the beginning part of a split collective, nonblocking routine that attempts to write into the file associated with -.I fh -(at the current individual file pointer position maintained by the system) a total number of -.I count -data items having -.I datatype -type from the user's buffer -.I buf. -The data is written into those parts of the -file specified by the current view. - -.SH NOTES -.ft R -All the nonblocking collective routines for data access are "split" into two routines, each with _begin or _end as a suffix. These split collective routines are subject to the semantic rules described in Section 9.4.5 of the MPI-2 standard. - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. For MPI I/O function errors, the default error handler is set to MPI_ERRORS_RETURN. The error handler may be changed with MPI_File_set_errhandler; the predefined error handler MPI_ERRORS_ARE_FATAL may be used to make I/O errors fatal. Note that MPI does not guarantee that an MPI program can continue past an error. - - - - diff --git a/ompi/mpi/man/man3/MPI_File_write_all_end.3in b/ompi/mpi/man/man3/MPI_File_write_all_end.3in deleted file mode 100644 index c53fb09689c..00000000000 --- a/ompi/mpi/man/man3/MPI_File_write_all_end.3in +++ /dev/null @@ -1,85 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2013 Los Alamos National Security, LLC. All rights reserved. -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright 2015-2016 Research Organization for Information Science -.\" and Technology (RIST). All rights reserved. -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_File_write_all_end 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_File_write_all_end\fP \- Writes a file starting at the locations specified by individual file pointers; ending part of a split collective routine (blocking). - -.SH SYNTAX -.ft R -.nf -.SH C Syntax -.nf -#include -int MPI_File_write_all_end(MPI_File \fIfh\fP, const void \fI*buf\fP, MPI_Status \fI*status\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_FILE_WRITE_ALL_END(\fIFH\fP, \fIBUF\fP, \fISTATUS\fP, \fIIERROR\fP) - \fIBUF(*)\fP - INTEGER \fIFH, STATUS, IERROR\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_File_write_all_end(\fIfh\fP, \fIbuf\fP, \fIstatus\fP, \fIierror\fP) - TYPE(MPI_File), INTENT(IN) :: \fIfh\fP - TYPE(*), DIMENSION(..), INTENT(IN), ASYNCHRONOUS :: \fIbuf\fP - TYPE(MPI_Status) :: \fIstatus\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT/OUTPUT PARAMETER -.ft R -.TP 1i -fh -File handle (handle). - -.SH INPUT PARAMETER -.ft R -.TP 1i -buf -Initial address of buffer (choice). - -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -status -Status object (status). -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -MPI_File_write_all_end is the ending part of a split collective routine that stores the -number of elements actually written into the file associated with -.I fh -from the user's buffer -.I buf -in -.I status. -MPI_File_write_all_end blocks until the operation initiated by MPI_File_write_all_begin completes. The data is written into those parts of the -file specified by the current view. All other fields of -.I status -are undefined. - -.SH NOTES -.ft R -All the nonblocking collective routines for data access are "split" into two routines, each with _begin or _end as a suffix. These split collective routines are subject to the semantic rules described in Section 9.4.5 of the MPI-2 standard. - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. For MPI I/O function errors, the default error handler is set to MPI_ERRORS_RETURN. The error handler may be changed with MPI_File_set_errhandler; the predefined error handler MPI_ERRORS_ARE_FATAL may be used to make I/O errors fatal. Note that MPI does not guarantee that an MPI program can continue past an error. diff --git a/ompi/mpi/man/man3/MPI_File_write_at.3in b/ompi/mpi/man/man3/MPI_File_write_at.3in deleted file mode 100644 index 12d37e98956..00000000000 --- a/ompi/mpi/man/man3/MPI_File_write_at.3in +++ /dev/null @@ -1,131 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2013 Los Alamos National Security, LLC. All rights reserved. -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright 2015-2016 Research Organization for Information Science -.\" and Technology (RIST). All rights reserved. -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_File_write_at 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_File_write_at\fP \- Writes a file at an explicitly specified offset (blocking, noncollective). - -.SH SYNTAX -.ft R -.nf -.SH C Syntax -.nf -#include -int MPI_File_write_at(MPI_File \fIfh\fP, MPI_Offset \fIoffset\fP, const void \fI*buf\fP, - int \fIcount\fP, MPI_Datatype \fIdatatype\fP, MPI_Status \fI*status\fP) - -.fi -.SH Fortran Syntax (see FORTRAN 77 NOTES) -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_FILE_WRITE_AT(\fIFH\fP, \fIOFFSET\fP, \fIBUF\fP, \fICOUNT\fP, - \fIDATATYPE\fP, \fISTATUS\fP, \fIIERROR\fP) - \fIBUF\fP(*) - INTEGER \fIFH, COUNT, DATATYPE, STATUS(MPI_STATUS_SIZE), IERROR\fP - INTEGER(KIND=MPI_OFFSET_KIND) \fIOFFSET\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_File_write_at(\fIfh\fP, \fIoffset\fP, \fIbuf\fP, \fIcount\fP, \fIdatatype\fP, \fIstatus\fP, \fIierror\fP) - TYPE(MPI_File), INTENT(IN) :: \fIfh\fP - INTEGER(KIND=MPI_OFFSET_KIND), INTENT(IN) :: \fIoffset\fP - TYPE(*), DIMENSION(..), INTENT(IN) :: \fIbuf\fP - INTEGER, INTENT(IN) :: \fIcount\fP - TYPE(MPI_Datatype), INTENT(IN) :: \fIdatatype\fP - TYPE(MPI_Status) :: \fIstatus\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETERS -.ft R -.TP 1i -fh -File handle (handle). -.TP 1i -offset -File offset (integer). -.TP 1i -buf -Initial address of buffer (choice). -.TP 1i -count -Number of elements in buffer (integer). -.TP 1i -datatype -Data type of each buffer element (handle). - -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -status -Status object (status). -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -MPI_File_write_at attempts to write into the file associated with -.I fh -(at the -.I offset -position) a total number of -.I count -data items having -.I datatype -type from the user's buffer -.I buf. -The offset is in -.I etype -units relative to the current view. That is, holes are not counted -when locating an offset. The data is written into those parts of the -file specified by the current view. MPI_File_write_at stores the -number of -.I datatype -elements actually written in -.I status. -All other fields of -.I status -are undefined. -.sp -It is erroneous to call this function if MPI_MODE_SEQUENTIAL mode was specified when the file was opened. - -.SH FORTRAN 77 NOTES -.ft R -The MPI standard prescribes portable Fortran syntax for -the \fIOFFSET\fP argument only for Fortran 90. FORTRAN 77 -users may use the non-portable syntax -.sp -.nf - INTEGER*MPI_OFFSET_KIND \fIOFFSET\fP -.fi -.sp -where MPI_OFFSET_KIND is a constant defined in mpif.h -and gives the length of the declared integer in bytes. - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. For MPI I/O function errors, the default error handler is set to MPI_ERRORS_RETURN. The error handler may be changed with MPI_File_set_errhandler; the predefined error handler MPI_ERRORS_ARE_FATAL may be used to make I/O errors fatal. Note that MPI does not guarantee that an MPI program can continue past an error. - -.SH SEE ALSO -.ft R -MPI_File_iwrite_at -.br -MPI_File_write_at_all -.br -MPI_File_write_at_all_begin -.br -MPI_File_write_at_all_end -.br - diff --git a/ompi/mpi/man/man3/MPI_File_write_at_all.3in b/ompi/mpi/man/man3/MPI_File_write_at_all.3in deleted file mode 100644 index 722b520a32c..00000000000 --- a/ompi/mpi/man/man3/MPI_File_write_at_all.3in +++ /dev/null @@ -1,118 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2013 Los Alamos National Security, LLC. All rights reserved. -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright 2015-2016 Research Organization for Information Science -.\" and Technology (RIST). All rights reserved. -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_File_write_at_all 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_File_write_at_all\fP \- Writes a file at explicitly specified offsets (blocking, collective). - -.SH SYNTAX -.ft R -.nf -.SH C Syntax -.nf -#include -int MPI_File_write_at_all(MPI_File \fIfh\fP, MPI_Offset \fIoffset\fP, const void \fI*buf\fP, - int \fIcount\fP, MPI_Datatype \fIdatatype\fP, MPI_Status \fI*status\fP) - -.fi -.SH Fortran Syntax (see FORTRAN 77 NOTES) -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_FILE_WRITE_AT_ALL(\fIFH\fP, \fIOFFSET\fP, \fIBUF\fP, \fICOUNT\fP, - \fIDATATYPE\fP, \fISTATUS\fP, \fIIERROR\fP) - \fIBUF\fP(*) - INTEGER \fIFH, COUNT, DATATYPE, STATUS(MPI_STATUS_SIZE), IERROR\fP - INTEGER(KIND=MPI_OFFSET_KIND) \fIOFFSET\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_File_write_at_all(\fIfh\fP, \fIoffset\fP, \fIbuf\fP, \fIcount\fP, \fIdatatype\fP, \fIstatus\fP, \fIierror\fP) - TYPE(MPI_File), INTENT(IN) :: \fIfh\fP - INTEGER(KIND=MPI_OFFSET_KIND), INTENT(IN) :: \fIoffset\fP - TYPE(*), DIMENSION(..), INTENT(IN) :: \fIbuf\fP - INTEGER, INTENT(IN) :: \fIcount\fP - TYPE(MPI_Datatype), INTENT(IN) :: \fIdatatype\fP - TYPE(MPI_Status) :: \fIstatus\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETERS -.ft R -.TP 1i -fh -File handle (handle). -.TP 1i -offset -File offset (integer). -.TP 1i -buf -Initial address of buffer (choice). -.TP 1i -count -Number of elements in buffer (integer). -.TP 1i -datatype -Data type of each buffer element (handle). - -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -status -Status object (status). -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -MPI_File_write_at_all is a collective routine that attempts to write into the file associated with -.I fh -(at the -.I offset -position) a total number of -.I count -data items having -.I datatype -type from the user's buffer -.I buf. -The offset is in etype units relative to the current view. That is, holes are not counted -when locating an offset. The data is written into those parts of the -file specified by the current view. MPI_File_write_at_all stores the -number of -.I datatype -elements actually written in -.I status. -All other fields of -.I status -are undefined. -.sp -It is erroneous to call this function if MPI_MODE_SEQUENTIAL mode was specified when the file was opened. - -.SH FORTRAN 77 NOTES -.ft R -The MPI standard prescribes portable Fortran syntax for -the \fIOFFSET\fP argument only for Fortran 90. FORTRAN 77 -users may use the non-portable syntax -.sp -.nf - INTEGER*MPI_OFFSET_KIND \fIOFFSET\fP -.fi -.sp -where MPI_OFFSET_KIND is a constant defined in mpif.h -and gives the length of the declared integer in bytes. - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. For MPI I/O function errors, the default error handler is set to MPI_ERRORS_RETURN. The error handler may be changed with MPI_File_set_errhandler; the predefined error handler MPI_ERRORS_ARE_FATAL may be used to make I/O errors fatal. Note that MPI does not guarantee that an MPI program can continue past an error. - diff --git a/ompi/mpi/man/man3/MPI_File_write_at_all_begin.3in b/ompi/mpi/man/man3/MPI_File_write_at_all_begin.3in deleted file mode 100644 index 885b1ad08a8..00000000000 --- a/ompi/mpi/man/man3/MPI_File_write_at_all_begin.3in +++ /dev/null @@ -1,114 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2013 Los Alamos National Security, LLC. All rights reserved. -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright 2015-2016 Research Organization for Information Science -.\" and Technology (RIST). All rights reserved. -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_File_write_at_all_begin 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_File_write_at_all_begin\fP \- Writes a file at explicitly specified offsets; beginning part of a split collective routine (nonblocking). - -.SH SYNTAX -.ft R -.nf -.SH C Syntax -.nf -#include -int MPI_File_write_at_all_begin(MPI_File \fIfh\fP, MPI_Offset \fIoffset\fP, - const void \fI*buf\fP, int \fIcount\fP, MPI_Datatype \fIdatatype\fP) - -.fi -.SH Fortran Syntax (see FORTRAN 77 NOTES) -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_FILE_WRITE_AT_ALL_BEGIN(\fIFH\fP, \fIOFFSET\fP, \fIBUF\fP, \fICOUNT\fP, \fIDATATYPE\fP, \fIIERROR\fP) - \fIBUF\fP(*) - INTEGER \fIFH, COUNT, DATATYPE, IERROR\fP - INTEGER(KIND=MPI_OFFSET_KIND) \fIOFFSET\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_File_write_at_all_begin(\fIfh\fP, \fIoffset\fP, \fIbuf\fP, \fIcount\fP, \fIdatatype\fP, \fIierror\fP) - TYPE(MPI_File), INTENT(IN) :: \fIfh\fP - INTEGER(KIND=MPI_OFFSET_KIND), INTENT(IN) :: \fIoffset\fP - TYPE(*), DIMENSION(..), INTENT(IN), ASYNCHRONOUS :: \fIbuf\fP - INTEGER, INTENT(IN) :: \fIcount\fP - TYPE(MPI_Datatype), INTENT(IN) :: \fIdatatype\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT/OUTPUT PARAMETER -.ft R -.TP 1i -fh -File handle (handle). - -.SH INPUT PARAMETERS -.ft R -.TP 1i -offset -File offset (handle). -.ft R -.TP 1i -buf -Initial address of buffer (choice). -.ft R -.TP 1i -count -Number of elements in buffer (integer). -.ft R -.TP 1i -datatype -Data type of each buffer element (handle). - -.SH OUTPUT PARAMETER -.ft R -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -MPI_File_write_at_all_begin is the beginning part of a split collective, that is, a nonblocking routine that attempts to write into the file associated with -.I fh -(at the -.I offset -position) a total number of -.I count -data items having -.I datatype -type from the user's buffer -.I buf. -The offset is in etype units relative to the current view. That is, holes are not counted -when locating an offset. The data is written into those parts of the -file specified by the current view. - -.SH FORTRAN 77 NOTES -.ft R -The MPI standard prescribes portable Fortran syntax for -the \fIOFFSET\fP argument only for Fortran 90. FORTRAN 77 -users may use the non-portable syntax -.sp -.nf - INTEGER*MPI_OFFSET_KIND \fIOFFSET\fP -.fi -.sp -where MPI_OFFSET_KIND is a constant defined in mpif.h -and gives the length of the declared integer in bytes. - -.SH NOTES -.ft R -All the nonblocking collective routines for data access are "split" into two routines, each with _begin or _end as a suffix. These split collective routines are subject to the semantic rules described in Section 9.4.5 of the MPI-2 standard. - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. For MPI I/O function errors, the default error handler is set to MPI_ERRORS_RETURN. The error handler may be changed with MPI_File_set_errhandler; the predefined error handler MPI_ERRORS_ARE_FATAL may be used to make I/O errors fatal. Note that MPI does not guarantee that an MPI program can continue past an error. - diff --git a/ompi/mpi/man/man3/MPI_File_write_at_all_end.3in b/ompi/mpi/man/man3/MPI_File_write_at_all_end.3in deleted file mode 100644 index c94ce8475ed..00000000000 --- a/ompi/mpi/man/man3/MPI_File_write_at_all_end.3in +++ /dev/null @@ -1,85 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2013 Los Alamos National Security, LLC. All rights reserved. -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright 2015-2016 Research Organization for Information Science -.\" and Technology (RIST). All rights reserved. -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_File_write_at_all_end 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_File_write_at_all_end\fP \- Writes a file at explicitly specified offsets; ending part of a split collective routine (blocking). - -.SH SYNTAX -.ft R -.nf -.SH C Syntax -.nf -#include -int MPI_File_write_at_all_end(MPI_File \fIfh\fP, const void \fI*buf\fP, - MPI_Status \fI*status\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_FILE_WRITE_AT_ALL_END(\fIFH\fP, \fIBUF\fP, \fISTATUS\fP, \fIIERROR\fP) - \fIBUF(*)\fP - INTEGER \fIFH, STATUS(MPI_STATUS_SIZE), IERROR\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_File_write_at_all_end(\fIfh\fP, \fIbuf\fP, \fIstatus\fP, \fIierror\fP) - TYPE(MPI_File), INTENT(IN) :: \fIfh\fP - TYPE(*), DIMENSION(..), INTENT(IN), ASYNCHRONOUS :: \fIbuf\fP - TYPE(MPI_Status) :: \fIstatus\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT/OUTPUT PARAMETER -.ft R -.TP 1i -fh -File handle (handle). - -.SH INPUT PARAMETER -.ft R -.TP 1i -buf -Initial address of buffer (choice). - -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -status -Status object (status). -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -MPI_File_write_at_all_end is the ending part of a split collective routine that stores the -number of elements actually written into the file associated with -.I fh -in -.I status. -The data is written into those parts of the -file specified by the current view. All other fields of -.I status -are undefined. - -.SH NOTES -.ft R -All the nonblocking collective routines for data access are "split" into two routines, each with _begin or _end as a suffix. These split collective routines are subject to the semantic rules described in Section 9.4.5 of the MPI-2 standard. - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. For MPI I/O function errors, the default error handler is set to MPI_ERRORS_RETURN. The error handler may be changed with MPI_File_set_errhandler; the predefined error handler MPI_ERRORS_ARE_FATAL may be used to make I/O errors fatal. Note that MPI does not guarantee that an MPI program can continue past an error. - diff --git a/ompi/mpi/man/man3/MPI_File_write_ordered.3.md b/ompi/mpi/man/man3/MPI_File_write_ordered.3.md deleted file mode 100644 index f57cad9ec7c..00000000000 --- a/ompi/mpi/man/man3/MPI_File_write_ordered.3.md +++ /dev/null @@ -1,81 +0,0 @@ -# Name - -`MPI_File_write_ordered` - Writes a file at a location specified by a -shared file pointer (blocking, collective). - -# Syntax - -## C Syntax - -```c -#include - -int MPI_File_write_ordered(MPI_File fh, const void *buf, - int count, MPI_Datatype datatype, - MPI_Status *status) -``` - -## Fortran Syntax - -```fortran -USE MPI -! or the older form: INCLUDE 'mpif.h' - -MPI_FILE_WRITE_ORDERED(FH, BUF, COUNT, DATATYPE, - STATUS, IERROR) - BUF(*) - INTEGER FH, COUNT, DATATYPE, STATUS(MPI_STATUS_SIZE), IERROR -``` - -## Fortran 2008 Syntax - -```fortran -USE mpi_f08 - -MPI_File_write_ordered(fh, buf, count, datatype, status, ierror) - TYPE(MPI_File), INTENT(IN) :: fh - TYPE(*), DIMENSION(..), INTENT(IN) :: buf - INTEGER, INTENT(IN) :: count - TYPE(MPI_Datatype), INTENT(IN) :: datatype - TYPE(MPI_Status) :: status - INTEGER, OPTIONAL, INTENT(OUT) :: ierror -``` - -# Input Parameters - -* `fh` : File handle (handle). -* `buf` : Initial address of buffer (choice). -* `count` : Number of elements in buffer (integer). -* `datatype` : Data type of each buffer element (handle). - -# Output Parameters - -* `status` : Status object (Status). -* `IERROR` : Fortran only: Error status (integer). - -# Description - -`MPI_File_write_ordered` is a collective routine. This routine must be -called by all processes in the communicator group associated with the -file handle `fh`. Each process may pass different argument values for -the `datatype` and `count` arguments. Each process attempts to write, -into the file associated with `fh`, a total number of `count` data items -having `datatype` type contained in the user's buffer `buf`. For each -process, the location in the file at which data is written is the -position at which the shared file pointer would be after all processes -whose ranks within the group are less than that of this process had -written their data. `MPI_File_write_ordered` returns the number of -`datatype` elements written in `status`. The shared file pointer is -updated by the amounts of data requested by all processes of the group. - -# Errors - -Almost all MPI routines return an error value; C routines as the value -of the function and Fortran routines in the last argument. - -Before the error value is returned, the current MPI error handler is -called. For MPI I/O function errors, the default error handler is set to -`MPI_ERRORS_RETURN`. The error handler may be changed with -`MPI_File_set_errhandler`; the predefined error handler -`MPI_ERRORS_ARE_FATAL` may be used to make I/O errors fatal. Note that MPI -does not guarantee that an MPI program can continue past an error. diff --git a/ompi/mpi/man/man3/MPI_File_write_ordered.3in b/ompi/mpi/man/man3/MPI_File_write_ordered.3in deleted file mode 100644 index e8cc2fde28b..00000000000 --- a/ompi/mpi/man/man3/MPI_File_write_ordered.3in +++ /dev/null @@ -1,105 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2013 Los Alamos National Security, LLC. All rights reserved. -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright 2015 Research Organization for Information Science -.\" and Technology (RIST). All rights reserved. -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_File_write_ordered 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_File_write_ordered\fP \- Writes a file at a location specified by a shared file pointer (blocking, collective). - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -int MPI_File_write_ordered(MPI_File \fIfh\fP, const void \fI*buf\fP, - int \fIcount\fP, MPI_Datatype \fIdatatype\fP, - MPI_Status \fI*status\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_FILE_WRITE_ORDERED(\fIFH\fP, \fIBUF\fP, \fICOUNT\fP, \fIDATATYPE\fP, - \fISTATUS\fP, \fIIERROR\fP) - \fIBUF\fP(*) - INTEGER \fIFH, COUNT, DATATYPE, STATUS(MPI_STATUS_SIZE), IERROR\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_File_write_ordered(\fIfh\fP, \fIbuf\fP, \fIcount\fP, \fIdatatype\fP, \fIstatus\fP, \fIierror\fP) - TYPE(MPI_File), INTENT(IN) :: \fIfh\fP - TYPE(*), DIMENSION(..), INTENT(IN) :: \fIbuf\fP - INTEGER, INTENT(IN) :: \fIcount\fP - TYPE(MPI_Datatype), INTENT(IN) :: \fIdatatype\fP - TYPE(MPI_Status) :: \fIstatus\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETERS -.ft R -.TP 1i -fh -File handle (handle). -.TP 1i -buf -Initial address of buffer (choice). -.TP 1i -count -Number of elements in buffer (integer). -.TP 1i -datatype -Data type of each buffer element (handle). - -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -status -Status object (Status). -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -MPI_File_write_ordered is a collective routine. This routine must -be called by all processes in the communicator group associated with -the file handle -.I fh. -Each process may pass different argument values -for the -.I datatype -and -.I count -arguments. Each process attempts to -write, into the file associated with -.I fh, -a total number of -.I count -data items having datatype type contained in the user's buffer -.I buf. -For -each process, the location in the file at which data is written is the -position at which the shared file pointer would be after all processes -whose ranks within the group are less than that of this process had -written their data. MPI_File_write_ordered returns the number of -.I datatype -elements written in -.I status. -The shared file pointer is -updated by the amounts of data requested by all processes of the -group. - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. For MPI I/O function errors, the default error handler is set to MPI_ERRORS_RETURN. The error handler may be changed with MPI_File_set_errhandler; the predefined error handler MPI_ERRORS_ARE_FATAL may be used to make I/O errors fatal. Note that MPI does not guarantee that an MPI program can continue past an error. - diff --git a/ompi/mpi/man/man3/MPI_File_write_ordered_begin.3.md b/ompi/mpi/man/man3/MPI_File_write_ordered_begin.3.md deleted file mode 100644 index dfa7d2af061..00000000000 --- a/ompi/mpi/man/man3/MPI_File_write_ordered_begin.3.md +++ /dev/null @@ -1,88 +0,0 @@ -# Name - -`MPI_File_write_ordered_begin` - Writes a file at a location specified -by a shared file pointer; beginning part of a split collective routine -(nonblocking). - -# Syntax - -## C Syntax - -```c -#include - -int MPI_File_write_ordered_begin(MPI_File fh, const void *buf, - int count, MPI_Datatype datatype) -``` - -## Fortran Syntax - -```fortran -USE MPI -! or the older form: INCLUDE 'mpif.h' - -MPI_FILE_WRITE_ORDERED_BEGIN(FH, BUF, COUNT, DATATYPE, IERROR) - BUF(*) - INTEGER FH, COUNT, DATATYPE, IERROR -``` - -## Fortran 2008 Syntax - -```fortran -USE mpi_f08 - -MPI_File_write_ordered_begin(fh, buf, count, datatype, ierror) - TYPE(MPI_File), INTENT(IN) :: fh - TYPE(*), DIMENSION(..), INTENT(IN), ASYNCHRONOUS :: buf - INTEGER, INTENT(IN) :: count - TYPE(MPI_Datatype), INTENT(IN) :: datatype - INTEGER, OPTIONAL, INTENT(OUT) :: ierror -``` - -# Input/Output Parameter - -* `fh` : File handle (handle). - -# Input Parameters - -* `buf` : Initial address of buffer (choice). -* `count` : Number of elements in buffer (integer). -* `datatype` : Data type of each buffer element (handle). - -# Output Parameter - -* `IERROR` : Fortran only: Error status (integer). - -# Description - -`MPI_File_write_ordered_begin` is the beginning part of a split -collective, nonblocking routine that must be called by all processes in -the communicator group associated with the file handle `fh`. Each -process may pass different argument values for the `datatype` and -`count` arguments. After all processes of the group have issued their -respective calls, each process attempts to write, into the file -associated with `fh`, a total number of `count` data items having -`datatype` type contained in the user's buffer `buf`. For each process, -the location in the file at which data is written is the position at -which the shared file pointer would be after all processes whose ranks -within the group are less than that of this process had written their -data. - -# Notes - -All the nonblocking collective routines for data access are "split" -into two routines, each with _begin or _end as a suffix. These split -collective routines are subject to the semantic rules described in -Section 9.4.5 of the MPI-2 standard. - -# Errors - -Almost all MPI routines return an error value; C routines as the value -of the function and Fortran routines in the last argument. - -Before the error value is returned, the current MPI error handler is -called. For MPI I/O function errors, the default error handler is set to -`MPI_ERRORS_RETURN`. The error handler may be changed with -`MPI_File_set_errhandler`; the predefined error handler -`MPI_ERRORS_ARE_FATAL` may be used to make I/O errors fatal. Note that MPI -does not guarantee that an MPI program can continue past an error. diff --git a/ompi/mpi/man/man3/MPI_File_write_ordered_end.3.md b/ompi/mpi/man/man3/MPI_File_write_ordered_end.3.md deleted file mode 100644 index 74f3745677e..00000000000 --- a/ompi/mpi/man/man3/MPI_File_write_ordered_end.3.md +++ /dev/null @@ -1,79 +0,0 @@ -# Name - -`MPI_File_write_ordered_end` - Writes a file at a location specified -by a shared file pointer; ending part of a split collective routine -(blocking). - -# Syntax - -## C Syntax - -```c -#include - -int MPI_File_write_ordered_end(MPI_File fh, const void *buf, - MPI_Status *status) -``` - -## Fortran Syntax - -```fortran -USE MPI -! or the older form: INCLUDE 'mpif.h' - -MPI_FILE_WRITE_ORDERED_END(FH, BUF, STATUS, IERROR) - BUF(*) - INTEGER FH, STATUS(MPI_STATUS_SIZE), IERROR -``` - -## Fortran 2008 Syntax - -```fortran -USE mpi_f08 - -MPI_File_write_ordered_end(fh, buf, status, ierror) - TYPE(MPI_File), INTENT(IN) :: fh - TYPE(*), DIMENSION(..), INTENT(IN), ASYNCHRONOUS :: buf - TYPE(MPI_Status) :: status - INTEGER, OPTIONAL, INTENT(OUT) :: ierror -``` - -# Input/Output Parameter - -* `fh` : File handle (handle). - -# Input Parameter - -* `buf` : Initial address of buffer (choice). - -# Output Parameters - -* `status` : Status object (status). -* `IERROR` : Fortran only: Error status (integer). - -# Description - -`MPI_File_write_ordered_end` is the ending part of a split collective -routine that must be called by all processes in the communicator group -associated with the file handle `fh`. `MPI_File_write_ordered_end` returns -the number of elements written into the file associated with `fh` in -`status`. - -# Notes - -All the nonblocking collective routines for data access are "split" -into two routines, each with _begin or _end as a suffix. These split -collective routines are subject to the semantic rules described in -Section 9.4.5 of the MPI-2 standard. - -# Errors - -Almost all MPI routines return an error value; C routines as the value -of the function and Fortran routines in the last argument. - -Before the error value is returned, the current MPI error handler is -called. For MPI I/O function errors, the default error handler is set to -`MPI_ERRORS_RETURN`. The error handler may be changed with -`MPI_File_set_errhandler`; the predefined error handler -`MPI_ERRORS_ARE_FATAL` may be used to make I/O errors fatal. Note that MPI -does not guarantee that an MPI program can continue past an error. diff --git a/ompi/mpi/man/man3/MPI_File_write_shared.3.md b/ompi/mpi/man/man3/MPI_File_write_shared.3.md deleted file mode 100644 index 2a72daef358..00000000000 --- a/ompi/mpi/man/man3/MPI_File_write_shared.3.md +++ /dev/null @@ -1,73 +0,0 @@ -# Name - -`MPI_File_write_shared` - Writes a file using the shared file pointer -(blocking, noncollective). - -# Syntax - -## C Syntax - -```c -#include - -int MPI_File_write_shared(MPI_File fh, const void *buf, int count, - MPI_Datatype datatype, MPI_Status *status) -``` - -## Fortran Syntax - -```fortran -USE MPI -! or the older form: INCLUDE 'mpif.h' - -MPI_FILE_WRITE_SHARED(FH, BUF, COUNT, DATATYPE, STATUS, IERROR) - BUF(*) - INTEGER FH, COUNT, DATATYPE, STATUS(MPI_STATUS_SIZE), IERROR -``` - -## Fortran 2008 Syntax - -```fortran -USE mpi_f08 - -MPI_File_write_shared(fh, buf, count, datatype, status, ierror) - TYPE(MPI_File), INTENT(IN) :: fh - TYPE(*), DIMENSION(..), INTENT(IN) :: buf - INTEGER, INTENT(IN) :: count - TYPE(MPI_Datatype), INTENT(IN) :: datatype - TYPE(MPI_Status) :: status - INTEGER, OPTIONAL, INTENT(OUT) :: ierror -``` - -# Input/Output Parameter - -* `fh` : File handle (handle). - -# Input Parameters - -* `buf` : Initial address of buffer (choice). -* `count` : Number of elements in buffer (integer). -* `datatype` : Data type of each buffer element (handle). - -# Output Parameters - -* `status` : Status object (status). -* `IERROR` : Fortran only: Error status (integer). - -# Description - -`MPI_File_write_shared` is a blocking routine that uses the shared file -pointer to write files. The order of serialization is not deterministic -for this noncollective routine. - -# Errors - -Almost all MPI routines return an error value; C routines as the value -of the function and Fortran routines in the last argument. - -Before the error value is returned, the current MPI error handler is -called. For MPI I/O function errors, the default error handler is set to -`MPI_ERRORS_RETURN`. The error handler may be changed with -`MPI_File_set_errhandler`; the predefined error handler -`MPI_ERRORS_ARE_FATAL` may be used to make I/O errors fatal. Note that MPI -does not guarantee that an MPI program can continue past an error. diff --git a/ompi/mpi/man/man3/MPI_Finalize.3.md b/ompi/mpi/man/man3/MPI_Finalize.3.md deleted file mode 100644 index 350b31e9bdf..00000000000 --- a/ompi/mpi/man/man3/MPI_Finalize.3.md +++ /dev/null @@ -1,103 +0,0 @@ -# Name - -`MPI_Finalize` - Terminates MPI execution environment. - -# Syntax - -## C Syntax - -```c -#include - -int MPI_Finalize() -``` - -## Fortran Syntax - -```fortran -USE MPI -! or the older form: INCLUDE 'mpif.h' - -MPI_FINALIZE(IERROR) - INTEGER IERROR -``` - -## Fortran 2008 Syntax - -```fortran -USE mpi_f08 - -MPI_Finalize(ierror) - INTEGER, OPTIONAL, INTENT(OUT) :: ierror -``` - -# Output Parameter - -* `IERROR` : Fortran only: Error status (integer). - -# Description - -This routine cleans up all MPI states. Once this routine is called, no -MPI routine (not even `MPI_Init`) may be called, except for -`MPI_Get_version`, `MPI_Initialized`, and `MPI_Finalized`. Unless there has -been a call to `MPI_Abort`, you must ensure that all pending -communications involving a process are complete before the process calls -`MPI_Finalize`. If the call returns, each process may either continue -local computations or exit without participating in further -communication with other processes. At the moment when the last process -calls `MPI_Finalize`, all pending sends must be matched by a receive, and -all pending receives must be matched by a send. - -`MPI_Finalize` is collective over all connected processes. If no processes -were spawned, accepted, or connected, then this means it is collective -over `MPI_COMM_WORLD`. Otherwise, it is collective over the union of all -processes that have been and continue to be connected. - -# Notes - -All processes must call this routine before exiting. All processes will -still exist but may not make any further MPI calls. `MPI_Finalize` -guarantees that all local actions required by communications the user -has completed will, in fact, occur before it returns. However, -`MPI_Finalize` guarantees nothing about pending communications that have -not been completed; completion is ensured only by `MPI_Wait`, `MPI_Test,` -or `MPI_Request_free` combined with some other verification of completion. - -For example, a successful return from a blocking communication operation -or from `MPI_Wait` or `MPI_Test` means that the communication is completed -by the user and the buffer can be reused, but does not guarantee that -the local process has no more work to do. Similarly, a successful return -from `MPI_Request_free` with a request handle generated by an `MPI_Isend` -nullifies the handle but does not guarantee that the operation has -completed. The `MPI_Isend` is complete only when a matching receive has -completed. - -If you would like to cause actions to happen when a process finishes, -attach an attribute to `MPI_COMM_SELF` with a callback function. Then, -when `MPI_Finalize` is called, it will first execute the equivalent of an -`MPI_Comm_free` on `MPI_COMM_SELF`. This will cause the delete callback -function to be executed on all keys associated with `MPI_COMM_SELF` in an -arbitrary order. If no key has been attached to `MPI_COMM_SELF`, then no -callback is invoked. This freeing of `MPI_COMM_SELF` happens before any -other parts of MPI are affected. Calling `MPI_Finalized` will thus return -"false" in any of these callback functions. Once you have done this -with `MPI_COMM_SELF`, the results of `MPI_Finalize` are not specified. - -# Errors - -Almost all MPI routines return an error value; C routines as the value -of the function and Fortran routines in the last argument. - -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for -I/O function errors. The error handler may be changed with -`MPI_Comm_set_errhandler`; the predefined error handler `MPI_ERRORS_RETURN` -may be used to cause error values to be returned. Note that MPI does not -guarantee that an MPI program can continue past an error. - -# See Also - -[`MPI_Init`(3)](MPI_Init.html) -[`MPI_Init_thread`(3)](MPI_Init_thread.html) -[`MPI_Initialized`(3)](MPI_Initialized.html) -[`MPI_Finalized`(3)](MPI_Finalized.html) diff --git a/ompi/mpi/man/man3/MPI_Finalized.3.md b/ompi/mpi/man/man3/MPI_Finalized.3.md deleted file mode 100644 index 46d27f5ecc6..00000000000 --- a/ompi/mpi/man/man3/MPI_Finalized.3.md +++ /dev/null @@ -1,64 +0,0 @@ -# Name - -`MPI_Finalized` - Checks whether MPI has been finalized - -# Syntax - -## C Syntax - -```c -#include - -int MPI_Finalized(int *flag) -``` - -## Fortran Syntax - -```fortran -USE MPI -! or the older form: INCLUDE 'mpif.h' - -MPI_FINALIZED(FLAG, IERROR) - LOGICAL FLAG - INTEGER IERROR -``` - -## Fortran 2008 Syntax - -```fortran -USE mpi_f08 - -MPI_Finalized(flag, ierror) - LOGICAL, INTENT(OUT) :: flag - INTEGER, OPTIONAL, INTENT(OUT) :: ierror -``` - -# Output Parameter - -* `flag` : True if MPI was finalized, and false otherwise (logical). -* `IERROR` : Fortran only: Error status (integer). - -# Description - -This routine may be used to determine whether MPI has been finalized. It -is one of a small number of routines that may be called before MPI is -initialized and after MPI has been finalized (`MPI_Initialized` is -another). - -# Errors - -Almost all MPI routines return an error value; C routines as the value -of the function and Fortran routines in the last argument. -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for -I/O function errors. The error handler may be changed with -`MPI_Comm_set_errhandler`; the predefined error handler `MPI_ERRORS_RETURN` -may be used to cause error values to be returned. Note that MPI does not -guarantee that an MPI program can continue past an error. - -# See Also - -[`MPI_Init`(3)](MPI_Init.html) -[`MPI_Init_thread`(3)](MPI_Init_thread.html) -[`MPI_Initialized`(3)](MPI_Initialized.html) -[`MPI_Finalize`(3)](MPI_Finalize.html) diff --git a/ompi/mpi/man/man3/MPI_Free_mem.3.md b/ompi/mpi/man/man3/MPI_Free_mem.3.md deleted file mode 100644 index bdd0f19fe9c..00000000000 --- a/ompi/mpi/man/man3/MPI_Free_mem.3.md +++ /dev/null @@ -1,64 +0,0 @@ -# Name - -`MPI_Free_mem` - Frees memory that has been allocated using -`MPI_Alloc_mem`. - -# Syntax - -## C Syntax - -```c -#include - -int MPI_Free_mem(void *base) -``` - -## Fortran Syntax - -```fortran -USE MPI -! or the older form: INCLUDE 'mpif.h' - -MPI_FREE_MEM(BASE, IERROR) - BASE(*) - INTEGER IERROR -``` - -## Fortran 2008 Syntax - -```fortran -USE mpi_f08 - -MPI_Free_mem(base, ierror) - TYPE(*), DIMENSION(..), INTENT(IN), ASYNCHRONOUS :: base - INTEGER, OPTIONAL, INTENT(OUT) :: ierror -``` - -# Input Parameter - -* `base` : Initial address of memory segment allocated by `MPI_Alloc_mem` -(choice). - -# Output Parameter - -* `IERROR` : Fortran only: Error status (integer). - -# Description - -`MPI_Free_mem` frees memory that has been allocated by `MPI_Alloc_mem`. - -# Errors - -Almost all MPI routines return an error value; C routines as the value -of the function and Fortran routines in the last argument. - -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for -I/O function errors. The error handler may be changed with -`MPI_Comm_set_errhandler`; the predefined error handler `MPI_ERRORS_RETURN` -may be used to cause error values to be returned. Note that MPI does not -guarantee that an MPI program can continue past an error. - -# See Also - -[`MPI_Alloc_mem`(3)](MPI_Alloc_mem.html) diff --git a/ompi/mpi/man/man3/MPI_Gather.3.md b/ompi/mpi/man/man3/MPI_Gather.3.md deleted file mode 100644 index 67091047c53..00000000000 --- a/ompi/mpi/man/man3/MPI_Gather.3.md +++ /dev/null @@ -1,227 +0,0 @@ -# Name - -`MPI_Gather`, `MPI_Igather`, `MPI_Gather_init` - Gathers values from a group of processes. - -# Synopsis - -## C Syntax - -```c -#include - -int MPI_Gather(const void *sendbuf, int sendcount, MPI_Datatype sendtype, - void *recvbuf, int recvcount, MPI_Datatype recvtype, int root, - MPI_Comm comm) - -int MPI_Igather(const void *sendbuf, int sendcount, MPI_Datatype sendtype, - void *recvbuf, int recvcount, MPI_Datatype recvtype, int root, - MPI_Comm comm, MPI_Request *request) - -int MPI_Gather_init(const void *sendbuf, int sendcount, MPI_Datatype sendtype, - void *recvbuf, int recvcount, MPI_Datatype recvtype, int root, - MPI_Comm comm, MPI_Info info, MPI_Request *request) -``` - -## Fortran Syntax - -```fortran -USE MPI -! or the older form: INCLUDE 'mpif.h' - -MPI_GATHER(SENDBUF, SENDCOUNT, SENDTYPE, RECVBUF, RECVCOUNT, - RECVTYPE, ROOT, COMM, IERROR) - SENDBUF(*), RECVBUF(*) - INTEGER SENDCOUNT, SENDTYPE, RECVCOUNT, RECVTYPE, ROOT - INTEGER COMM, IERROR - -MPI_IGATHER(SENDBUF, SENDCOUNT, SENDTYPE, RECVBUF, RECVCOUNT, - RECVTYPE, ROOT, COMM, REQUEST, IERROR) - SENDBUF(*), RECVBUF(*) - INTEGER SENDCOUNT, SENDTYPE, RECVCOUNT, RECVTYPE, ROOT - INTEGER COMM, REQUEST, IERROR - -MPI_GATHER_INIT(SENDBUF, SENDCOUNT, SENDTYPE, RECVBUF, RECVCOUNT, - RECVTYPE, ROOT, COMM, INFO, REQUEST, IERROR) - SENDBUF(*), RECVBUF(*) - INTEGER SENDCOUNT, SENDTYPE, RECVCOUNT, RECVTYPE, ROOT - INTEGER COMM, INFO, REQUEST, IERROR -``` - -## Fortran 2008 Syntax - -```fortran -USE mpi_f08 - -MPI_Gather(sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype, - root, comm, ierror) - TYPE(*), DIMENSION(..), INTENT(IN) :: sendbuf - TYPE(*), DIMENSION(..) :: recvbuf - INTEGER, INTENT(IN) :: sendcount, recvcount, root - TYPE(MPI_Datatype), INTENT(IN) :: sendtype, recvtype - TYPE(MPI_Comm), INTENT(IN) :: comm - INTEGER, OPTIONAL, INTENT(OUT) :: ierror - -MPI_Igather(sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype, - root, comm, request, ierror) - TYPE(*), DIMENSION(..), INTENT(IN), ASYNCHRONOUS :: sendbuf - TYPE(*), DIMENSION(..), ASYNCHRONOUS :: recvbuf - INTEGER, INTENT(IN) :: sendcount, recvcount, root - TYPE(MPI_Datatype), INTENT(IN) :: sendtype, recvtype - TYPE(MPI_Comm), INTENT(IN) :: comm - TYPE(MPI_Request), INTENT(OUT) :: request - INTEGER, OPTIONAL, INTENT(OUT) :: ierror - -MPI_Gather_init(sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype, - root, comm, info, request, ierror) - TYPE(*), DIMENSION(..), INTENT(IN), ASYNCHRONOUS :: sendbuf - TYPE(*), DIMENSION(..), ASYNCHRONOUS :: recvbuf - INTEGER, INTENT(IN) :: sendcount, recvcount, root - TYPE(MPI_Datatype), INTENT(IN) :: sendtype, recvtype - TYPE(MPI_Comm), INTENT(IN) :: comm - TYPE(MPI_Info), INTENT(IN) :: info - TYPE(MPI_Request), INTENT(OUT) :: request - INTEGER, OPTIONAL, INTENT(OUT) :: ierror -``` - - -# Input Parameters - -* `sendbuf` : Starting address of send buffer (choice). -* `sendcount` : Number of elements in send buffer (integer). -* `sendtype` : Datatype of send buffer elements (handle). -* `recvcount` : Number of elements for any single receive (integer, significant only -at root). -* `recvtype` : Datatype of recvbuffer elements (handle, significant only at root). -* `root` : Rank of receiving process (integer). -* `comm` : Communicator (handle). -* `info` : Info (handle, persistent only). - - -# Output Parameters - -* `recvbuf` : Address of receive buffer (choice, significant only at root). -* `request` : Request (handle, non-blocking only). -* `IERROR` : Fortran only: Error status (integer). - -# Description - -Each process (root process included) sends the contents of its send -buffer to the root process. The root process receives the messages and -stores them in rank order. The outcome is as if each of the n processes -in the group (including the root process) had executed a call to - -```c -MPI_Send(sendbuf, sendcount, sendtype, root, ...) -``` - -and the root had executed n calls to - -```c -MPI_Recv(recfbuf + i * recvcount * extent(recvtype), recvcount, recvtype, i, ...) -``` - -where extent(recvtype) is the type extent obtained from a call to -`MPI_Type_extent()`. - -An alternative description is that the n messages sent by the processes -in the group are concatenated in rank order, and the resulting message -is received by the root as if by a call to `MPI_RECV(recvbuf, recvcount* -n, recvtype, ... )`. - -The receive buffer is ignored for all nonroot processes. - -General, derived datatypes are allowed for both sendtype and recvtype. -The type signature of `sendcount`, `sendtype` on process i must be equal to -the type signature of `recvcount`, `recvtype` at the root. This implies that -the amount of data sent must be equal to the amount of data received, -pairwise between each process and the root. Distinct type maps between -sender and receiver are still allowed. - -All arguments to the function are significant on process root, while on -other processes, only arguments `sendbuf`, `sendcount`, `sendtype`, `root`, `comm` -are significant. The arguments `root` and `comm` must have identical values -on all processes. - -The specification of counts and types should not cause any location on -the root to be written more than once. Such a call is erroneous. -Note that the `recvcount` argument at the root indicates the number of -items it receives from each process, not the total number of items it -receives. - -Example 1: Gather 100 ints from every process in group to root. - -```c -MPI_Comm comm; -int gsize,sendarray[100]; -int root, *rbuf; -//... - -MPI_Comm_size( comm, &gsize); -rbuf = (int *)malloc(gsize*100*sizeof(int)); - -MPI_Gather( sendarray, 100, MPI_INT, rbuf, 100, MPI_INT, root, comm); -``` - -Example 2: Previous example modified -- only the root allocates -memory for the receive buffer. - -```c -MPI_Comm comm; -int gsize,sendarray[100]; -int root, myrank, *rbuf; -//... - -MPI_Comm_rank( comm, myrank); -if ( myrank == root) { - MPI_Comm_size( comm, &gsize); - rbuf = (int *)malloc(gsize*100*sizeof(int)); -} -MPI_Gather( sendarray, 100, MPI_INT, rbuf, 100, MPI_INT, root, comm); -``` - -Example 3: Do the same as the previous example, but use a derived -datatype. Note that the type cannot be the entire set of gsize * 100 -ints since type matching is defined pairwise between the root and each -process in the gather. - -```c -MPI_Comm comm; -int gsize,sendarray[100]; -int root, *rbuf; -MPI_Datatype rtype; -//... - -MPI_Comm_size( comm, &gsize); -MPI_Type_contiguous( 100, MPI_INT, &rtype ); -MPI_Type_commit( &rtype ); -rbuf = (int *)malloc(gsize*100*sizeof(int)); -MPI_Gather( sendarray, 100, MPI_INT, rbuf, 1, rtype, root, comm); -``` - -# Use Of In-Place Option - -When the communicator is an intracommunicator, you can perform a gather operation in-place (the output buffer is used as the input buffer). Use the variable `MPI_IN_PLACE` as the value of the root process `sendbuf`. In this case, `sendcount` and `sendtype` are ignored, and the contribution of the root process to the gathered vector is assumed to already be in the correct place in the receive buffer. -Note that `MPI_IN_PLACE` is a special kind of value; it has the same restrictions on its use as MPI_BOTTOM. -Because the in-place option converts the receive buffer into a send-and-receive buffer, a Fortran binding that includes `INTENT` must mark these as `INOUT`, not `OUT`. - -# When Communicator Is An Inter-Communicator - -When the communicator is an inter-communicator, the root process in the first group gathers data from all the processes in the second group. The first group defines the root process. That process uses MPI_ROOT as the value of its `root` argument. The remaining processes use `MPI_PROC_NULL` as the value of their `root` argument. All processes in the second group use the rank of that root process in the first group as the value of their `root` argument. The send buffer argument of the processes in the first group must be consistent with the receive buffer argument of the root process in the second group. - -# Errors - -Almost all MPI routines return an error value; C routines as the value -of the function and Fortran routines in the last argument. -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for -I/O function errors. The error handler may be changed with -`MPI_Comm_set_errhandler`; the predefined error handler `MPI_ERRORS_RETURN` -may be used to cause error values to be returned. Note that MPI does not -guarantee that an MPI program can continue past an error. -See the MPI man page for a full list of MPI error codes. - -# See Also - -[`MPI_Gatherv`(3)](MPI_Gatherv.html) -[`MPI_Scatter`(3)](MPI_Scatter.html) -[`MPI_Scatterv`(3)](MPI_Scatterv.html) diff --git a/ompi/mpi/man/man3/MPI_Gather_init.3in b/ompi/mpi/man/man3/MPI_Gather_init.3in deleted file mode 100644 index d15bc2d25cf..00000000000 --- a/ompi/mpi/man/man3/MPI_Gather_init.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/MPI_Gather.3 diff --git a/ompi/mpi/man/man3/MPI_Gatherv.3.md b/ompi/mpi/man/man3/MPI_Gatherv.3.md deleted file mode 100644 index e9925817b4d..00000000000 --- a/ompi/mpi/man/man3/MPI_Gatherv.3.md +++ /dev/null @@ -1,402 +0,0 @@ -# Name - -`MPI_Gatherv`, `MPI_Igatherv`, `MPI_Gatherv_init` - Gathers varying amounts of data from all -processes to the root process - -# Syntax - -## C Syntax - -```c -#include - -int MPI_Gatherv(const void *sendbuf, int sendcount, MPI_Datatype sendtype, - void *recvbuf, const int recvcounts[], const int displs[], MPI_Datatype recvtype, - int root, MPI_Comm comm) - -int MPI_Igatherv(const void *sendbuf, int sendcount, MPI_Datatype sendtype, - void *recvbuf, const int recvcounts[], const int displs[], MPI_Datatype recvtype, - int root, MPI_Comm comm, MPI_Request *request) - -int MPI_Gatherv_init(const void *sendbuf, int sendcount, MPI_Datatype sendtype, - void *recvbuf, const int recvcounts[], const int displs[], MPI_Datatype recvtype, - int root, MPI_Comm comm, MPI_Info info, MPI_Request *request) -``` - -## Fortran Syntax - -```fortran -USE MPI -! or the older form: INCLUDE 'mpif.h' - -MPI_GATHERV(SENDBUF, SENDCOUNT, SENDTYPE, RECVBUF, RECVCOUNTS, - DISPLS, RECVTYPE, ROOT, COMM, IERROR) - SENDBUF(*), RECVBUF(*) - INTEGER SENDCOUNT, SENDTYPE, RECVCOUNTS(*), DISPLS(*) - INTEGER RECVTYPE, ROOT, COMM, IERROR - -MPI_IGATHERV(SENDBUF, SENDCOUNT, SENDTYPE, RECVBUF, RECVCOUNTS, - DISPLS, RECVTYPE, ROOT, COMM, REQUEST, IERROR) - SENDBUF(*), RECVBUF(*) - INTEGER SENDCOUNT, SENDTYPE, RECVCOUNTS(*), DISPLS(*) - INTEGER RECVTYPE, ROOT, COMM, REQUEST, IERROR - -MPI_GATHERV_INIT(SENDBUF, SENDCOUNT, SENDTYPE, RECVBUF, RECVCOUNTS, - DISPLS, RECVTYPE, ROOT, COMM, INFO, REQUEST, IERROR) - SENDBUF(*), RECVBUF(*) - INTEGER SENDCOUNT, SENDTYPE, RECVCOUNTS(*), DISPLS(*) - INTEGER RECVTYPE, ROOT, COMM, INFO, REQUEST, IERROR -``` - -## Fortran 2008 Syntax - -```fortran -USE mpi_f08 - -MPI_Gatherv(sendbuf, sendcount, sendtype, recvbuf, recvcounts, displs, - recvtype, root, comm, ierror) - TYPE(*), DIMENSION(..), INTENT(IN) :: sendbuf - TYPE(*), DIMENSION(..) :: recvbuf - INTEGER, INTENT(IN) :: sendcount, recvcounts(*), displs(*), root - TYPE(MPI_Datatype), INTENT(IN) :: sendtype, recvtype - TYPE(MPI_Comm), INTENT(IN) :: comm - INTEGER, OPTIONAL, INTENT(OUT) :: ierror - -MPI_Igatherv(sendbuf, sendcount, sendtype, recvbuf, recvcounts, displs, - recvtype, root, comm, request, ierror) - TYPE(*), DIMENSION(..), INTENT(IN), ASYNCHRONOUS :: sendbuf - TYPE(*), DIMENSION(..), ASYNCHRONOUS :: recvbuf - INTEGER, INTENT(IN) :: sendcount, root - INTEGER, INTENT(IN), ASYNCHRONOUS :: recvcounts(*), displs(*) - TYPE(MPI_Datatype), INTENT(IN) :: sendtype, recvtype - TYPE(MPI_Comm), INTENT(IN) :: comm - TYPE(MPI_Request), INTENT(OUT) :: request - INTEGER, OPTIONAL, INTENT(OUT) :: ierror - -MPI_Gatherv_init(sendbuf, sendcount, sendtype, recvbuf, recvcounts, displs, - recvtype, root, comm, info, request, ierror) - TYPE(*), DIMENSION(..), INTENT(IN), ASYNCHRONOUS :: sendbuf - TYPE(*), DIMENSION(..), ASYNCHRONOUS :: recvbuf - INTEGER, INTENT(IN) :: sendcount, root - INTEGER, INTENT(IN), ASYNCHRONOUS :: recvcounts(*), displs(*) - TYPE(MPI_Datatype), INTENT(IN) :: sendtype, recvtype - TYPE(MPI_Comm), INTENT(IN) :: comm - TYPE(MPI_Info), INTENT(IN) :: info - TYPE(MPI_Request), INTENT(OUT) :: request - INTEGER, OPTIONAL, INTENT(OUT) :: ierror -``` - -# Input Parameters - -* `sendbuf` : Starting address of send buffer (choice). -* `sendcount` : Number of elements in send buffer (integer). -* `sendtype` : Datatype of send buffer elements (handle). -* `recvcounts` : Integer array (of length group size) containing the number of -elements that are received from each process (significant only at -root). -* `displs` : Integer array (of length group size). Entry i specifies the -displacement relative to recvbuf at which to place the incoming data -from process i (significant only at root). -* `recvtype` : Datatype of recv buffer elements (significant only at root) -(handle). -* `root` : Rank of receiving process (integer). -* `comm` : Communicator (handle). -* `info` : Info (handle, persistent only). - - - -# Output Parameters - -* `recvbuf` : Address of receive buffer (choice, significant only at root). -* `request` : Request (handle, non-blocking only). -* `IERROR` : Fortran only: Error status (integer). - -# Description - - -`MPI_Gatherv` extends the functionality of `MPI_Gather` by allowing a -varying count of data from each process, since `recvcounts` is now an -array. It also allows more flexibility as to where the data is placed on -the root, by providing the new argument, `displs`. - -The outcome is as if each process, including the root process, sends a -message to the root, - -```c -MPI_Send(sendbuf, sendcount, sendtype, root, ...) -``` - -and the root executes n receives, - -```c -MPI_Recv(recvbuf + disp[i] * extent(recvtype), - recvcounts[i], recvtype, i, ...) -``` - -Messages are placed in the receive buffer of the root process in rank -order, that is, the data sent from process j is placed in the jth -portion of the receive buffer `recvbuf` on process root. The jth portion -of `recvbuf` begins at offset displs[j] elements (in terms of `recvtype`) -into `recvbuf`. - -The receive buffer is ignored for all nonroot processes. - -The type signature implied by `sendcount`, `sendtype` on process i must be -equal to the type signature implied by `recvcounts[i]`, `recvtype` at the -root. This implies that the amount of data sent must be equal to the -amount of data received, pairwise between each process and the root. -Distinct type maps between sender and receiver are still allowed, as -illustrated in Example 2, below. - -All arguments to the function are significant on process `root`, while on -other processes, only arguments `sendbuf`, `sendcount`, `sendtype`, `root`, `comm` -are significant. The arguments `root` and `comm` must have identical values -on all processes. - -The specification of counts, types, and displacements should not cause -any location on the `root` to be written more than once. Such a call is -erroneous. - -Example 1: Now have each process send 100 ints to `root`, but place -each set (of 100) stride ints apart at receiving end. Use `MPI_Gatherv` -and the `displs` argument to achieve this effect. Assume stride >= 100. - -```c -MPI_Comm comm; -int gsize,sendarray[100]; -int root, *rbuf, stride; -int *displs,i,*rcounts; -// ... -MPI_Comm_size(comm, &gsize); -rbuf = (int *)malloc(gsize*stride*sizeof(int)); -displs = (int *)malloc(gsize*sizeof(int)); -rcounts = (int *)malloc(gsize*sizeof(int)); -for (i=0; i - -MPI_Get(void *origin_addr, int origin_count, MPI_Datatype - origin_datatype, int target_rank, MPI_Aint target_disp, - int target_count, MPI_Datatype target_datatype, MPI_Win win) - -MPI_Rget(void *origin_addr, int origin_count, MPI_Datatype - origin_datatype, int target_rank, MPI_Aint target_disp, - int target_count, MPI_Datatype target_datatype, MPI_Win win, - MPI_Request *request) -``` - -## Fortran Syntax (See Fortran 77 Notes) - -```fortran -USE MPI -! or the older form: INCLUDE 'mpif.h' - -MPI_GET(ORIGIN_ADDR, ORIGIN_COUNT, ORIGIN_DATATYPE, TARGET_RANK, - TARGET_DISP, TARGET_COUNT, TARGET_DATATYPE, WIN, IERROR) - ORIGIN_ADDR(*) - INTEGER(KIND=MPI_ADDRESS_KIND) TARGET_DISP - INTEGER ORIGIN_COUNT, ORIGIN_DATATYPE, TARGET_RANK, - TARGET_COUNT, TARGET_DATATYPE, WIN, IERROR - -MPI_RGET(ORIGIN_ADDR, ORIGIN_COUNT, ORIGIN_DATATYPE, TARGET_RANK, - TARGET_DISP, TARGET_COUNT, TARGET_DATATYPE, WIN, REQUEST, IERROR) - ORIGIN_ADDR(*) - INTEGER(KIND=MPI_ADDRESS_KIND) TARGET_DISP - INTEGER ORIGIN_COUNT, ORIGIN_DATATYPE, TARGET_RANK, - TARGET_COUNT, TARGET_DATATYPE, WIN, REQUEST, IERROR -``` - -## Fortran 2008 Syntax - -```fortran -USE mpi_f08 - -MPI_Get(origin_addr, origin_count, origin_datatype, target_rank, - target_disp, target_count, target_datatype, win, ierror) - TYPE(*), DIMENSION(..), ASYNCHRONOUS :: origin_addr - INTEGER, INTENT(IN) :: origin_count, target_rank, target_count - TYPE(MPI_Datatype), INTENT(IN) :: origin_datatype, target_datatype - INTEGER(KIND=MPI_ADDRESS_KIND), INTENT(IN) :: target_disp - TYPE(MPI_Win), INTENT(IN) :: win - INTEGER, OPTIONAL, INTENT(OUT) :: ierror - -MPI_Rget(origin_addr, origin_count, origin_datatype, target_rank, - target_disp, target_count, target_datatype, win, request, - ierror) - TYPE(*), DIMENSION(..), ASYNCHRONOUS :: origin_addr - INTEGER, INTENT(IN) :: origin_count, target_rank, target_count - TYPE(MPI_Datatype), INTENT(IN) :: origin_datatype, target_datatype - INTEGER(KIND=MPI_ADDRESS_KIND), INTENT(IN) :: target_disp - TYPE(MPI_Win), INTENT(IN) :: win - TYPE(MPI_Request), INTENT(OUT) :: request - INTEGER, OPTIONAL, INTENT(OUT) :: ierror -``` - -# Input Parameters - -* `origin_addr` : Initial address of origin buffer (choice). -* `origin_count` : Number of entries in origin buffer (nonnegative integer). -* `origin_datatype` : Data type of each entry in origin buffer (handle). -* `target_rank` : Rank of target (nonnegative integer). -* `target_disp` : Displacement from window start to the beginning of the target buffer -(nonnegative integer). -* `target_count` : Number of entries in target buffer (nonnegative integer). -* `target datatype` : datatype of each entry in target buffer (handle) -* `win` : window object used for communication (handle) - -# Output Parameter - -* `request` : MPI_Rget: RMA request -* `IERROR` : Fortran only: Error status (integer). - -# Description - -`MPI_Get` copies data from the target memory to the origin, similar to -`MPI_Put`, except that the direction of data transfer is reversed. The -`origin_datatype` may not specify overlapping entries in the origin -buffer. The target buffer must be contained within the target window, -and the copied data must fit, without truncation, in the origin buffer. -Only processes within the same node can access the target window. - -`MPI_Rget` is similar to `MPI_Get`, except that it allocates a -communication `request` object and associates it with the `request` handle -(the argument `request`) that can be used to wait or test for -completion. The completion of an `MPI_Rget` operation indicates that the -data is available in the origin buffer. If `origin_addr` points to -memory attached to a window, then the data becomes available in the -private copy of this window. - -# Fortran 77 Notes - -The MPI standard prescribes portable Fortran syntax for the -`TARGET_DISP` argument only for Fortran 90. FORTRAN 77 users may use the -non-portable syntax - -```fortran -INTEGER*MPI_ADDRESS_KIND TARGET_DISP -``` - -where `MPI_ADDRESS_KIND` is a constant defined in mpif.h and gives the -length of the declared integer in bytes. - -# Errors - -Almost all MPI routines return an error value; C routines as the value -of the function and Fortran routines in the last argument. - -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for -I/O function errors. The error handler may be changed with -`MPI_Comm_set_errhandler`; the predefined error handler `MPI_ERRORS_RETURN` -may be used to cause error values to be returned. Note that MPI does not -guarantee that an MPI program can continue past an error. - -# See Also - -[`MPI_Put`(3)](MPI_Put.html) diff --git a/ompi/mpi/man/man3/MPI_Get_accumulate.3.md b/ompi/mpi/man/man3/MPI_Get_accumulate.3.md deleted file mode 100644 index d18be6b41ec..00000000000 --- a/ompi/mpi/man/man3/MPI_Get_accumulate.3.md +++ /dev/null @@ -1,193 +0,0 @@ -# Name - -`MPI_Get_accumulate`, `MPI_Rget_accumulate` - Combines the contents -of the origin buffer with that of a target buffer and returns the target -buffer value. - -# Syntax - -## C Syntax - -```c -#include - -int MPI_Get_accumulate(const void *origin_addr, int origin_count, - MPI_Datatype origin_datatype, void *result_addr, - int result_count, MPI_Datatype result_datatype, - int target_rank, MPI_Aint target_disp, int target_count, - MPI_Datatype target_datatype, MPI_Op op, MPI_Win win) - -int MPI_Rget_accumulate(const void *origin_addr, int origin_count, - MPI_Datatype origin_datatype, void *result_addr, - int result_count, MPI_Datatype result_datatype, - int target_rank, MPI_Aint target_disp, int target_count, - MPI_Datatype target_datatype, MPI_Op op, MPI_Win win, - MPI_Request *request) -``` - -## Fortran Syntax (See Fortran 77 Notes) - -```fortran -USE MPI -! or the older form: INCLUDE 'mpif.h' - -MPI_GET_ACCUMULATE(ORIGIN_ADDR, ORIGIN_COUNT, ORIGIN_DATATYPE, RESULT_ADDR, - RESULT_COUNT, RESULT_DATATYPE, TARGET_RANK, TARGET_DISP, TARGET_COUNT, - TARGET_DATATYPE, OP, WIN, IERROR) - ORIGIN_ADDR, RESULT_ADDR(*) - INTEGER(KIND=MPI_ADDRESS_KIND) TARGET_DISP - INTEGER ORIGIN_COUNT, ORIGIN_DATATYPE, TARGET_COUNT, TARGET_DATATYPE, - TARGET_RANK, TARGET_COUNT, TARGET_DATATYPE, OP, WIN, IERROR - -MPI_RGET_ACCUMULATE(ORIGIN_ADDR, ORIGIN_COUNT, ORIGIN_DATATYPE, RESULT_ADDR, - RESULT_COUNT, RESULT_DATATYPE, TARGET_RANK, TARGET_DISP, TARGET_COUNT, - TARGET_DATATYPE, OP, WIN, REQUEST, IERROR) - ORIGIN_ADDR, RESULT_ADDR(*) - INTEGER(KIND=MPI_ADDRESS_KIND) TARGET_DISP - INTEGER ORIGIN_COUNT, ORIGIN_DATATYPE, TARGET_COUNT, TARGET_DATATYPE, - TARGET_RANK, TARGET_COUNT, TARGET_DATATYPE, OP, WIN, REQUEST, IERROR -``` - -## Fortran 2008 Syntax - -```fortran -USE mpi_f08 - -MPI_Get_accumulate(origin_addr, origin_count, origin_datatype, result_addr, - result_count, result_datatype, target_rank, target_disp, - target_count, target_datatype, op, win, ierror) - TYPE(*), DIMENSION(..), INTENT(IN) :: origin_addr - TYPE(*), DIMENSION(..) :: result_addr - INTEGER, INTENT(IN) :: origin_count, result_count, target_rank, target_count - TYPE(MPI_Datatype), INTENT(IN) :: origin_datatype, target_datatype, result_datatype - INTEGER(KIND=MPI_ADDRESS_KIND), INTENT(IN) :: target_dist - TYPE(MPI_Op), INTENT(IN) :: op - TYPE(MPI_Win), INTENT(IN) :: win - INTEGER, OPTIONAL, INTENT(OUT) :: ierror - -MPI_Rget_accumulate(origin_addr, origin_count, origin_datatype, - result_addr, result_count, result_datatype, target_rank, - target_disp, target_count, target_datatype, op, win, request, - ierror) - TYPE(*), DIMENSION(..), INTENT(IN) :: origin_addr - TYPE(*), DIMENSION(..) :: result_addr - INTEGER, INTENT(IN) :: origin_count, result_count, target_rank, target_count - TYPE(MPI_Datatype), INTENT(IN) :: origin_datatype, target_datatype, result_datatype - INTEGER(KIND=MPI_ADDRESS_KIND), INTENT(IN) :: target_dist - TYPE(MPI_Op), INTENT(IN) :: op - TYPE(MPI_Win), INTENT(IN) :: win - TYPE(MPI_Request), INTENT(OUT) :: request - INTEGER, OPTIONAL, INTENT(OUT) :: ierror -``` - -# Input Parameters - -* `origin_addr` : Initial address of buffer (choice). -* `origin_count` : Number of entries in buffer (nonnegative integer). -* `origin_datatype` : Data type of each buffer entry (handle). -* `result_addr` : Initial address of result buffer (choice). -* `result_count` : Number of entries in result buffer (nonnegative integer). -* `result_datatype` : Data type of each result buffer entry (handle). -* `target_rank` : Rank of target (nonnegative integer). -* `target_disp` : Displacement from start of window to beginning of target buffer -(nonnegative integer). -* `target_count` : Number of entries in target buffer (nonnegative integer). -* `target_datatype` : Data type of each entry in target buffer (handle). -* `op` : Reduce operation (handle). -* `win` : Window object (handle). - -# Output Parameter - -* `MPI_Rget_accumulate`: RMA request -* `IERROR` : Fortran only: Error status (integer). - -# Description - -`MPI_Get_accumulate` is a function used for one-sided MPI -communication that adds the contents of the origin buffer (as defined by -`origin_addr`, `origin_count`, and `origin_datatype`) to the buffer -specified by the arguments `target_count` and `target_datatype`, at -offset `target_disp`, in the target window specified by `target_rank` -and `win`, using the operation `op`. `MPI_Get_accumulate` returns in -the result buffer `result_addr` the contents of the target buffer before -the accumulation. - -Any of the predefined operations for `MPI_Reduce`, as well as `MPI_NO_OP,` -can be used. User-defined functions cannot be used. For example, if `op` -is `MPI_SUM`, each element of the origin buffer is added to the -corresponding element in the target, replacing the former value in the -target. - -Each datatype argument must be a predefined data type or a derived data -type, where all basic components are of the same predefined data type. -Both datatype arguments must be constructed from the same predefined -data type. The operation `op` applies to elements of that predefined -type. The `target_datatype` argument must not specify overlapping -entries, and the target buffer must fit in the target window. - -A new predefined operation, `MPI_REPLACE`, is defined. It corresponds to -the associative function f(a, b) =b; that is, the current value in the -target memory is replaced by the value supplied by the origin. - -A new predefined operation, `MPI_NO_OP`, is defined. It corresponds to the -assiciative function f(a, b) = a; that is the current value in the -target memory is returned in the result buffer at the origin and no -operation is performed on the target buffer. - -`MPI_Rget_accumulate` is similar to `MPI_Get_accumulate`, except -that it allocates a communication request object and associates it with -the request handle (the argument request) that can be used to wait or -test for completion. The completion of an `MPI_Rget_accumulate` -operation indicates that the data is available in the result buffer and -the origin buffer is free to be updated. It does not indicate that the -operation has been completed at the target window. - -# Fortran 77 Notes - -The MPI standard prescribes portable Fortran syntax for the -`TARGET_DISP` argument only for Fortran 90. FORTRAN 77 users may use the -non-portable syntax - -```fortran -INTEGER*MPI_ADDRESS_KIND TARGET_DISP -``` - -where MPI_ADDRESS_KIND is a constant defined in mpif.h and gives the -length of the declared integer in bytes. - -# Notes - -The generic functionality of `MPI_Get_accumulate` might limit the -performance of fetch-and-increment or fetch-and-add calls that might be -supported by special hardware operations. `MPI_Fetch_and_op` thus allows -for a fast implementation of a commonly used subset of the functionality -of `MPI_Get_accumulate`. - -`MPI_Get` is a special case of `MPI_Get_accumulate`, with the operation -`MPI_NO_OP`. Note, however, that `MPI_Get` and `MPI_Get_accumulate` have -different constraints on concurrent updates. - -It is the user's responsibility to guarantee that, when using the -accumulate functions, the target displacement argument is such that -accesses to the window are properly aligned according to the data type -arguments in the call to the `MPI_Get_accumulate` function. - -# Errors - -Almost all MPI routines return an error value; C routines as the value -of the function and Fortran routines in the last argument. - -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for -I/O function errors. The error handler may be changed with -`MPI_Comm_set_errhandler`; the predefined error handler `MPI_ERRORS_RETURN` -may be used to cause error values to be returned. Note that MPI does not -guarantee that an MPI program can continue past an error. - -# See Also - -[`MPI_Put`(3)](MPI_Put.html) -[`MPI_Get`(3)](MPI_Get.html) -[`MPI_Accumulate`(3)](MPI_Accumulate.html) -[`MPI_Fetch_and_op`(3)](MPI_Fetch_and_op.html) -[`MPI_Reduce`(3)](MPI_Reduce.html) diff --git a/ompi/mpi/man/man3/MPI_Get_address.3.md b/ompi/mpi/man/man3/MPI_Get_address.3.md deleted file mode 100644 index d85346425fa..00000000000 --- a/ompi/mpi/man/man3/MPI_Get_address.3.md +++ /dev/null @@ -1,84 +0,0 @@ -# Name - -`MPI_Get_address` - Gets the address of a location in memory. - -# Syntax - -## C Syntax - -```c -#include - -int MPI_Get_address(const void *location, MPI_Aint *address) -``` - -## Fortran Syntax - -```fortran -USE MPI -! or the older form: INCLUDE 'mpif.h' - -MPI_GET_ADDRESS(LOCATION, ADDRESS, IERROR) - LOCATION(*) - INTEGER(KIND=MPI_ADDRESS_KIND) ADDRESS - INTEGER IERROR -``` - -## Fortran 2008 Syntax - -```fortran -USE mpi_f08 - -MPI_Get_address(location, address, ierror) - TYPE(*), DIMENSION(..), ASYNCHRONOUS :: location - INTEGER(KIND=MPI_ADDRESS_KIND), INTENT(OUT) :: address - INTEGER, OPTIONAL, INTENT(OUT) :: ierror -``` - -# Input Parameters - -* `location` : Location in caller memory (choice). - -# Output Parameters - -* `address` : Address of location (integer). -* `IERROR` : Fortran only: Error status (integer). - -# Description - - -`MPI_Get_address` returns the byte `address` of a location in memory. - -Example: Using `MPI_Get_address` for an array. - -```fortran -EAL A(100,100) - INTEGER I1, I2, DIFF - CALL MPI_GET_ADDRESS(A(1,1), I1, IERROR) - CALL MPI_GET_ADDRESS(A(10,10), I2, IERROR) - DIFF = I2 - I1 -! The value of DIFF is 909*sizeofreal; the values of I1 and I2 are -! implementation dependent. -``` - -# Notes - -Current Fortran MPI codes will run unmodified and will port to any -system. However, they may fail if `addresses` larger than 2^32 - 1 are -used in the program. New codes should be written so that they use the -new functions. This provides compatibility with C and avoids errors on -64-bit architectures. However, such newly written codes may need to be -(slightly) rewritten to port to old Fortran 77 environments that do not -support KIND declarations. - -# Errors - -Almost all MPI routines return an error value; C routines as the value -of the function and Fortran routines in the last argument. - -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for -I/O function errors. The error handler may be changed with -`MPI_Comm_set_errhandler`; the predefined error handler `MPI_ERRORS_RETURN` -may be used to cause error values to be returned. Note that MPI does not -guarantee that an MPI program can continue past an error. diff --git a/ompi/mpi/man/man3/MPI_Get_count.3.md b/ompi/mpi/man/man3/MPI_Get_count.3.md deleted file mode 100644 index 6c5123848e0..00000000000 --- a/ompi/mpi/man/man3/MPI_Get_count.3.md +++ /dev/null @@ -1,87 +0,0 @@ -# Name - -`MPI_Get_count` - Gets the number of top-level elements received. - -# Syntax - -## C Syntax - -```c -#include - -int MPI_Get_count(const MPI_Status *status, MPI_Datatype datatype, - int *count) -``` - -## Fortran Syntax - -```fortran -USE MPI -! or the older form: INCLUDE 'mpif.h' - -MPI_GET_COUNT(STATUS, DATATYPE, COUNT, IERROR) - INTEGER STATUS(MPI_STATUS_SIZE), DATATYPE, COUNT, IERROR -``` - -## Fortran 2008 Syntax - -```fortran -USE mpi_f08 - -MPI_Get_count(status, datatype, count, ierror) - TYPE(MPI_Status), INTENT(IN) :: status - TYPE(MPI_Datatype), INTENT(IN) :: datatype - INTEGER, INTENT(OUT) :: count - INTEGER, OPTIONAL, INTENT(OUT) :: ierror -``` - -# Input Parameters - -* `status` : Return status of receive operation (status). -* `datatype` : Datatype of each receive buffer element (handle). - -# Output Parameters - -* `count` : Number of received elements (integer). -* `IERROR` : Fortran only: Error status (integer). - -# Description - - -Returns the number of entries received. (We count entries, each of type -`datatype`, not bytes.) The `datatype` argument should match the argument -provided by the receive call that set the `status` variable. (As explained -in Section 3.12.5 in the MPI-1 Standard, "Use of General Datatypes in -Communication," `MPI_Get_count` may, in certain situations, return the -value `MPI_UNDEFINED`.) - -The `datatype` argument is passed to `MPI_Get_count` to improve performance. -A message might be received without counting the number of elements it -contains, and the `count` value is often not needed. Also, this allows the -same function to be used after a call to `MPI_Probe`. - -# Notes - -If the size of the `datatype` is zero, this routine will return a `count` of -zero. If the amount of data in `status` is not an exact multiple of the -size of `datatype` (so that `count` would not be integral), a `count` of -`MPI_UNDEFINED` is returned instead. - -# Errors - -If the value to be returned is larger than can fit into the `count` -parameter, an `MPI_ERR_TRUNCATE` error is raised. - -Almost all MPI routines return an error value; C routines as the value -of the function and Fortran routines in the last argument. - -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for -I/O function errors. The error handler may be changed with -`MPI_Comm_set_errhandler`; the predefined error handler `MPI_ERRORS_RETURN` -may be used to cause error values to be returned. Note that MPI does not -guarantee that an MPI program can continue past an error. - -# See Also - -[`MPI_Get_elements`(3)](MPI_Get_elements.html) diff --git a/ompi/mpi/man/man3/MPI_Get_elements.3.md b/ompi/mpi/man/man3/MPI_Get_elements.3.md deleted file mode 100644 index 7519d11adec..00000000000 --- a/ompi/mpi/man/man3/MPI_Get_elements.3.md +++ /dev/null @@ -1,132 +0,0 @@ -# Name - -`MPI_Get_elements`, `MPI_Get_elements_x` - Returns the number of basic -elements in a data type. - -# Syntax - -## C Syntax - -```c -#include - -int MPI_Get_elements(const MPI_Status *status, MPI_Datatype datatype, - int *count) - -int MPI_Get_elements_x(const MPI_Status *status, MPI_Datatype datatype, - MPI_Count *count) -``` - -## Fortran Syntax - -```fortran -USE MPI -! or the older form: INCLUDE 'mpif.h' - -MPI_GET_ELEMENTS(STATUS, DATATYPE, COUNT, IERROR) - INTEGER STATUS(MPI_STATUS_SIZE), DATATYPE, COUNT, IERROR - -MPI_GET_ELEMENTS_X(STATUS, DATATYPE, COUNT, IERROR) - INTEGER STATUS(MPI_STATUS_SIZE), DATATYPE - INTEGER(KIND=MPI_COUNT_KIND) COUNT - INTEGER IERROR -``` - -## Fortran 2008 Syntax - -```fortran -USE mpi_f08 - -MPI_Get_elements(status, datatype, count, ierror) - TYPE(MPI_Status), INTENT(IN) :: status - TYPE(MPI_Datatype), INTENT(IN) :: datatype - INTEGER, INTENT(OUT) :: count - INTEGER, OPTIONAL, INTENT(OUT) :: ierror - -MPI_Get_elements_x(status, datatype, count, ierror) - TYPE(MPI_Status), INTENT(IN) :: status - TYPE(MPI_Datatype), INTENT(IN) :: datatype - INTEGER(KIND = MPI_COUNT_KIND), INTENT(OUT) :: count - INTEGER, OPTIONAL, INTENT(OUT) :: ierror -``` - -# Input Parameters - -* `status` : Return status of receive operation (status). -* `datatype` : Datatype used by receive operation (handle). - -# Output Parameters - -* `IERROR` : Fortran only: Error status (integer). - -# Description - -`MPI_Get_elements` and `MPI_Get_elements_x` behave different from -`MPI_Get_count`, which returns the number of "top-level entries" -received, i.e., the number of "copies" of type `datatype`. `MPI_Get_count` -may return any integer value k, where 0 =< k =< count. If -`MPI_Get_count` returns k, then the number of basic elements received (and -the value returned by `MPI_Get_elements` and `MPI_Get_elements_x`) is n -k, where n is the number of basic elements in the type map of `datatype`. -If the number of basic elements received is not a multiple of n, that -is, if the receive operation has not received an integral number of -`datatype` "copies," then `MPI_Get_count` returns the value `MPI_UNDEFINED.` -For both functions, if the count parameter cannot express the value to -be returned (e.g., if the parameter is too small to hold the output -value), it is set to `MPI_UNDEFINED`. - -Example: Usage of `MPI_Get_count` and `MPI_Get_element`: - -```fortran -//... -MPI_TYPE_CONTIGUOUS(2, MPI_REAL, Type2, ierr) -MPI_TYPE_COMMIT(Type2, ierr) -// ... -MPI_COMM_RANK(comm, rank, ierr) -IF(rank.EQ.0) THEN - CALL MPI_SEND(a, 2, MPI_REAL, 1, 0, comm, ierr) - CALL MPI_SEND(a, 3, MPI_REAL, 1, 0, comm, ierr) -ELSE - CALL MPI_RECV(a, 2, Type2, 0, 0, comm, stat, ierr) - CALL MPI_GET_COUNT(stat, Type2, i, ierr) ! returns i=1 - CALL MPI_GET_ELEMENTS(stat, Type2, i, ierr) ! returns i=2 - CALL MPI_RECV(a, 2, Type2, 0, 0, comm, stat, ierr) - CALL MPI_GET_COUNT(stat, Type2, i, ierr) ! returns i=MPI_UNDEFINED - - CALL MPI_GET_ELEMENTS(stat, Type2, i, ierr) ! returns i=3 -END IF -``` - -The function `MPI_Get_elements` can also be used after a probe to find the -number of elements in the probed message. Note that the two functions -`MPI_Get_count` and `MPI_Get_elements` return the same values when they are -used with primitive data types. - -# Errors - -Almost all MPI routines return an error value; C routines as the value -of the function and Fortran routines in the last argument. - -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for -I/O function errors. The error handler may be changed with -`MPI_Comm_set_errhandler`; the predefined error handler `MPI_ERRORS_RETURN` -may be used to cause error values to be returned. Note that MPI does not -guarantee that an MPI program can continue past an error. - -# Fortran 77 Notes - -The MPI standard prescribes portable Fortran syntax for the COUNT -argument of `MPI_Get_elements_x` only for Fortran 90. FORTRAN 77 users may -use the non-portable syntax - -```Fortran -INTEGER*MPI_COUNT_KIND COUNT -``` - -where `MPI_COUNT_KIND` is a constant defined in mpif.h and gives the -length of the declared integer in bytes. - -# See Also - -[`MPI_Get_count`(3)](MPI_Get_count.html) diff --git a/ompi/mpi/man/man3/MPI_Get_elements_x.3in b/ompi/mpi/man/man3/MPI_Get_elements_x.3in deleted file mode 100644 index 55dfa77bded..00000000000 --- a/ompi/mpi/man/man3/MPI_Get_elements_x.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/MPI_Get_elements.3 diff --git a/ompi/mpi/man/man3/MPI_Get_library_version.3.md b/ompi/mpi/man/man3/MPI_Get_library_version.3.md deleted file mode 100644 index b119d22085e..00000000000 --- a/ompi/mpi/man/man3/MPI_Get_library_version.3.md +++ /dev/null @@ -1,78 +0,0 @@ -# Name - -`MPI_Get_library_version` - Returns a string of the current Open MPI -version - -# Syntax - -## C Syntax - -```c -#include - -int MPI_Get_library_version(char *version, int *resultlen) -``` - -## Fortran Syntax - -```fortran -USE MPI -! or the older form: INCLUDE 'mpif.h' - -MPI_GET_LIBRARY_VERSION(VERSION, RESULTLEN, IERROR) - CHARACTER*(*) NAME - INTEGER RESULTLEN, IERROR -``` - -## Fortran 2008 Syntax - -```fortran -USE mpi_f08 - -MPI_Get_library_version(version, resulten, ierror) - CHARACTER(LEN=MPI_MAX_LIBRARY_VERSION_STRING), INTENT(OUT) :: version - INTEGER, INTENT(OUT) :: resultlen - INTEGER, OPTIONAL, INTENT(OUT) :: ierror -``` - -# Output Parameters - -* `version` : A string containing the Open MPI version (string). -* `resultlen` : Length (in characters) of result returned in `version` (integer). -* `IERROR` : Fortran only: Error status (integer). - -# Description - -This routine returns a string representing the `version` of the MPI -library. The `version` argument is a character string for maximum -flexibility. - -The number of characters actually written is returned in the output -argument, `resultlen`. In C, a '0' character is additionally stored -at `version[resultlen]`. The `resultlen` cannot be larger than -(`MPI_MAX_LIBRARY_VERSION_STRING` - 1). In Fortran, `version` is padded on -the right with blank characters. The `resultlen` cannot be larger than `MPI_MAX_LIBRARY_VERSION_STRING`. - -# Note - -The `version` string that is passed must be at least -`MPI_MAX_LIBRARY_VERSION_STRING` characters long. - -`MPI_Get_library_version` is one of the few functions that can be called -before `MPI_Init` and after `MPI_Finalize.` - -# Errors - -Almost all MPI routines return an error value; C routines as the value -of the function and Fortran routines in the last argument. - -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for -I/O function errors. The error handler may be changed with -`MPI_Comm_set_errhandler`; the predefined error handler `MPI_ERRORS_RETURN` -may be used to cause error values to be returned. Note that MPI does not -guarantee that an MPI program can continue past an error. - -# See Also - -[`MPI_Get_version`(3)](MPI_Get_version.html) diff --git a/ompi/mpi/man/man3/MPI_Get_processor_name.3.md b/ompi/mpi/man/man3/MPI_Get_processor_name.3.md deleted file mode 100644 index 81cf542683f..00000000000 --- a/ompi/mpi/man/man3/MPI_Get_processor_name.3.md +++ /dev/null @@ -1,71 +0,0 @@ -# Name - -`MPI_Get_processor_name` - Gets the name of the processor. - -# Syntax - -## C Syntax - -```c -#include - -int MPI_Get_processor_name(char *name, int *resultlen) -``` - -## Fortran Syntax - -```fortran -USE MPI -! or the older form: INCLUDE 'mpif.h' - -MPI_GET_PROCESSOR_NAME(NAME, RESULTLEN, IERROR) - CHARACTER*(*) NAME - INTEGER RESULTLEN, IERROR -``` - -## Fortran 2008 Syntax - -```fortran -USE mpi_f08 - -MPI_Get_processor_name(name, resultlen, ierror) - CHARACTER(LEN=MPI_MAX_PROCESSOR_NAME), INTENT(OUT) :: name - INTEGER, INTENT(OUT) :: resultlen - INTEGER, OPTIONAL, INTENT(OUT) :: ierror -``` - -# Output Parameters - -* `name` : A unique specifier for the actual (as opposed to virtual) node. -* `resultlen` : Length (in characters) of result returned in name. -* `IERROR` : Fortran only: Error status (integer). - -# Description - -This routine returns the `name` of the processor on which it was called at -the moment of the call. The `name` is a character string for maximum -flexibility. From this value it must be possible to identify a specific -piece of hardware. The argument `name` must represent storage that is at -least `MPI_MAX_PROCESSOR_NAME` characters long. - -The number of characters actually written is returned in the output -argument, `resultlen`. - -# Notes - -The user must provide at least `MPI_MAX_PROCESSOR_NAME` space to write the -processor `name`; processor `name`s can be this long. The user should -examine the output argument, `resultlen`, to determine the actual length -of the `name`. - -# Errors - -Almost all MPI routines return an error value; C routines as the value -of the function and Fortran routines in the last argument. - -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for -I/O function errors. The error handler may be changed with -`MPI_Comm_set_errhandler`; the predefined error handler `MPI_ERRORS_RETURN` -may be used to cause error values to be returned. Note that MPI does not -guarantee that an MPI program can continue past an error. diff --git a/ompi/mpi/man/man3/MPI_Get_version.3.md b/ompi/mpi/man/man3/MPI_Get_version.3.md deleted file mode 100644 index f6c68d350c2..00000000000 --- a/ompi/mpi/man/man3/MPI_Get_version.3.md +++ /dev/null @@ -1,62 +0,0 @@ -# Name - -`MPI_Get_version` - Returns the version of the standard corresponding -to the current implementation. - -# Syntax - -## C Syntax - -```c -#include - -int MPI_Get_version(int *version, int *subversion) -``` - -## Fortran Syntax - -```fortran -USE MPI -! or the older form: INCLUDE 'mpif.h' - -MPI_GET_VERSION(VERSION, SUBVERSION, IERROR) - INTEGER VERSION, SUBVERSION, IERROR -``` - -## Fortran 2008 Syntax - -```fortran -USE mpi_f08 - -MPI_Get_version(version, subversion, ierror) - INTEGER, INTENT(OUT) :: version, subversion - INTEGER, OPTIONAL, INTENT(OUT) :: ierror -``` - -# Output Parameters - -* `version` : The major version number of the corresponding standard (integer). -* `subversion` : The minor version number of the corresponding standard (integer). -* `IERROR` : Fortran only: Error status (integer). - -# Description - -Since Open MPI is MPI 3.1 compliant, this function will return a `version` -value of 3 and a subversion value of 1 for this release. - -# Note - -`MPI_Get_version` is one of the few functions that can be called before -`MPI_Init` and after `MPI_Finalize`. - -# Errors - -Almost all MPI routines return an error value; C routines as the value -of the function and Fortran routines in the last argument. - -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for -I/O function errors. The error handler may be changed with -`MPI_Comm_set_errhandler`; the predefined error handler `MPI_ERRORS_RETURN` -may be used to cause error values to be returned. Note that MPI does not -guarantee that an MPI program can continue past an error. diff --git a/ompi/mpi/man/man3/MPI_Graph_create.3.md b/ompi/mpi/man/man3/MPI_Graph_create.3.md deleted file mode 100644 index cc6b302e236..00000000000 --- a/ompi/mpi/man/man3/MPI_Graph_create.3.md +++ /dev/null @@ -1,125 +0,0 @@ -# Name - -`MPI_Graph_create` - Makes a new communicator to which topology -information has been attached. - -# Syntax - -## C Syntax - -```c -#include - -int MPI_Graph_create(MPI_Comm comm_old, int nnodes, const int index[], - const int edges[], int reorder, MPI_Comm *comm_graph) -``` - -## Fortran Syntax - -```fortran -USE MPI -! or the older form: INCLUDE 'mpif.h' - -MPI_GRAPH_CREATE(COMM_OLD, NNODES, INDEX, EDGES, REORDER, - COMM_GRAPH, IERROR) - INTEGER COMM_OLD, NNODES, INDEX(*), EDGES(*) - INTEGER COMM_GRAPH, IERROR - LOGICAL REORDER -``` - -## Fortran 2008 Syntax - -```fortran -USE mpi_f08 - -MPI_Graph_create(comm_old, nnodes, index, edges, reorder, comm_graph, - ierror) - TYPE(MPI_Comm), INTENT(IN) :: comm_old - INTEGER, INTENT(IN) :: nnodes, index(nnodes), edges(*) - LOGICAL, INTENT(IN) :: reorder - TYPE(MPI_Comm), INTENT(OUT) :: comm_graph - INTEGER, OPTIONAL, INTENT(OUT) :: ierror -``` - - -# Input Parameters - -* `comm_old` : Input communicator without topology (handle). -* `nnodes` : Number of nodes in graph (integer). -* `index` : Array of integers describing node degrees (see below). -* `edges` : Array of integers describing graph edges (see below). -* `reorder` : Ranking may be reordered (true) or not (false) (logical). - -# Output Parameters - -* `comm_graph` : Communicator with graph topology added (handle). -* `IERROR` : Fortran only: Error status (integer). - -# Description - -`MPI_Graph_create` returns a handle to a new communicator to which the -graph topology information is attached. If `reorder` = false then the rank -of each process in the new group is identical to its rank in the old -group. Otherwise, the function may `reorder` the processes. If the size, -`nnodes`, of the graph is smaller than the size of the group of `comm_old`, -then some processes are returned `MPI_COMM_NULL`, in analogy to -`MPI_Cart_create` and `MPI_Comm_split`. The call is erroneous if it -specifies a graph that is larger than the group size of the input -communicator. - -The three parameters `nnodes`, `index`, and `edges` define the graph -structure. `nnodes` is the number of nodes of the graph. The nodes are -numbered from 0 to `nnodes`-1. The ith entry of array `index` stores the -total number of neighbors of the first i graph nodes. The lists of -neighbors of nodes 0, 1, ..., `nnodes`-1 are stored in consecutive -locations in array `edges`. The array `edges` is a flattened representation -of the edge lists. The total number of entries in `index` is `nnodes` and -the total number of entries in `edges` is equal to the number of graph -`edges`. - -The definitions of the arguments `nnodes`, `index`, and `edges` are -illustrated with the following simple example. - -Example: Assume there are four processes 0, 1, 2, 3 with the -following adjacency matrix: - -| Process | Neighbors | -| ------- | --------- | -| 0 | 1, 3 | -| 1 | 0 | -| 2 | 3 | -| 3 | 0, 2 | - -Then, the input arguments are: -* `nodes` = 4 -* `index` = 2, 3, 4, 6 -* `edges` = 1, 3, 0, 3, 0, 2 - -Thus, in C, `index[0]` is the degree of `node` zero, and `index[i]` - -`index[i-1]` is the degree of `node` i, i=1, . . . , nnodes-1; the list of -neighbors of node zero is stored in `edges[j]`, for 0 <= j <= -`index[0] - 1` and the list of neighbors of `node` i, i > 0 , is stored -in `edges[j]`, `index[i-1]` <= j <= `index[i] - 1`. - -In Fortran, `index(1)` is the degree of `node` zero, and `index(i+1)` - -`index(i)` is the degree of `node` i, i=1, . . . , nnodes-1; the list of -neighbors of `node` zero is stored in `edges(j)`, for 1 <= j <= `index(1)` -and the list of neighbors of `node` i, i > 0, is stored in `edges(j)`, -`index(i) + 1` <= j <= `index(i + 1)`. - -# Errors - -Almost all MPI routines return an error value; C routines as the value -of the function and Fortran routines in the last argument. - -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for -I/O function errors. The error handler may be changed with -`MPI_Comm_set_errhandler`; the predefined error handler `MPI_ERRORS_RETURN` -may be used to cause error values to be returned. Note that MPI does not -guarantee that an MPI program can continue past an error. - -# See Also - -[`MPI_Graph_get`(3)](MPI_Graph_get.html) -[`MPI_Graphdims_get`(3)](MPI_Graphdims_get.html) diff --git a/ompi/mpi/man/man3/MPI_Graph_get.3.md b/ompi/mpi/man/man3/MPI_Graph_get.3.md deleted file mode 100644 index 32b438ce166..00000000000 --- a/ompi/mpi/man/man3/MPI_Graph_get.3.md +++ /dev/null @@ -1,79 +0,0 @@ -# Name - -`MPI_Graph_get` - Retrieves graph topology information associated -with a communicator. - -# Syntax - -## C Syntax - -```c -#include - -int MPI_Graph_get(MPI_Comm comm, int maxindex, int maxedges, - int index[], int edges[]) -``` - -## Fortran Syntax - -```fortran -USE MPI -! or the older form: INCLUDE 'mpif.h' - -MPI_GRAPH_GET(COMM, MAXINDEX, MAXEDGES, INDEX, EDGES, IERROR) - INTEGER COMM, MAXINDEX, MAXEDGES, INDEX(*) - INTEGER EDGES(*), IERROR -``` - -## Fortran 2008 Syntax - -```fortran -USE mpi_f08 - -MPI_Graph_get(comm, maxindex, maxedges, index, edges, ierror) - TYPE(MPI_Comm), INTENT(IN) :: comm - INTEGER, INTENT(IN) :: maxindex, maxedges - INTEGER, INTENT(OUT) :: index(maxindex), edges(maxedges) - INTEGER, OPTIONAL, INTENT(OUT) :: ierror -``` - -# Input Parameters - -* `comm` : Communicator with graph structure (handle). -* `maxindex` : Length of vector index in the calling program (integer). -* `maxedges` : Length of vector edges in the calling program (integer). - -# Output Parameters - -* `index` : Array of integers containing the graph structure (for details see -the definition of `MPI_Graph_create`). -* `edges` : Array of integers containing the graph structure. -* `IERROR` : Fortran only: Error status (integer). - - - -# Description - -Functions `MPI_Graphdims_get` and `MPI_Graph_get` retrieve the -graph-topology information that was associated with a communicator by -`MPI_Graph_create`. - -The information provided by `MPI_Graphdims_get` can be used to dimension -the vectors `index` and `edges` correctly for a call to `MPI_Graph_get`. - -# Errors - -Almost all MPI routines return an error value; C routines as the value -of the function and Fortran routines in the last argument. - -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for -I/O function errors. The error handler may be changed with -`MPI_Comm_set_errhandler`; the predefined error handler `MPI_ERRORS_RETURN` -may be used to cause error values to be returned. Note that MPI does not -guarantee that an MPI program can continue past an error. - -# See Also - -[`MPI_Graph_create`(3)](MPI_Graph_create.html) -[`MPI_Graphdims_get`(3)](MPI_Graphdims_get.html) diff --git a/ompi/mpi/man/man3/MPI_Graph_map.3.md b/ompi/mpi/man/man3/MPI_Graph_map.3.md deleted file mode 100644 index 9142825f951..00000000000 --- a/ompi/mpi/man/man3/MPI_Graph_map.3.md +++ /dev/null @@ -1,75 +0,0 @@ -# Name - -`MPI_Graph_map` - Maps process to graph topology information. - -# Syntax - -## C Syntax - -```c -#include - -int MPI_Graph_map(MPI_Comm comm, int nnodes, const int index[], - const int edges[], int *newrank) -``` - -## Fortran Syntax - -```fortran -USE MPI -! or the older form: INCLUDE 'mpif.h' - -MPI_GRAPH_MAP(COMM, NNODES, INDEX, EDGES, NEWRANK, IERROR) - INTEGER COMM, NNODES, INDEX(*), EDGES(*), NEWRANK, IERROR -``` - -## Fortran 2008 Syntax - -```fortran -USE mpi_f08 - -MPI_Graph_map(comm, nnodes, index, edges, newrank, ierror) - TYPE(MPI_Comm), INTENT(IN) :: comm - INTEGER, INTENT(IN) :: nnodes, index(nnodes), edges(*) - INTEGER, INTENT(OUT) :: newrank - INTEGER, OPTIONAL, INTENT(OUT) :: ierror -``` - -# Input Parameters - -* `comm` : Input communicator (handle). -* `nnodes` : Number of graph nodes (integer). -* `index` : Integer array specifying the graph structure, see -`MPI_Graph_create`. -* `edges` : Integer array specifying the graph structure. - -# Output Parameters - -* `newrank` : Reordered rank of the calling process; MPI_UNDEFINED if the calling -process does not belong to graph (integer). -* `IERROR` : Fortran only: Error status (integer). - - - -# Description - -`MPI_Cart_map` and `MPI_Graph_map` can be used to implement all other -topology functions. In general they will not be called by the user -directly, unless he or she is creating additional virtual topology -capability other than that provided by MPI. - -# Errors - -Almost all MPI routines return an error value; C routines as the value -of the function and Fortran routines in the last argument. - -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for -I/O function errors. The error handler may be changed with -`MPI_Comm_set_errhandler`; the predefined error handler `MPI_ERRORS_RETURN` -may be used to cause error values to be returned. Note that MPI does not -guarantee that an MPI program can continue past an error. - -# See Also - -[`MPI_Cart_map`(3)](MPI_Cart_map.html) diff --git a/ompi/mpi/man/man3/MPI_Graph_neighbors.3in b/ompi/mpi/man/man3/MPI_Graph_neighbors.3in deleted file mode 100644 index 43f6f32cd2e..00000000000 --- a/ompi/mpi/man/man3/MPI_Graph_neighbors.3in +++ /dev/null @@ -1,106 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Graph_neighbors 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Graph_neighbors \fP \- Returns the neighbors of a node associated with a graph topology. - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -int MPI_Graph_neighbors(MPI_Comm \fIcomm\fP, int\fI rank\fP, int\fI maxneighbors\fP, - int\fI neighbors\fP[]) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_GRAPH_NEIGHBORS(\fICOMM, RANK, MAXNEIGHBORS, NEIGHBORS, IERROR\fP) - INTEGER \fICOMM, RANK, MAXNEIGHBORS, NEIGHBORS(*), IERROR\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Graph_neighbors(\fIcomm\fP, \fIrank\fP, \fImaxneighbors\fP, \fIneighbors\fP, \fIierror\fP) - TYPE(MPI_Comm), INTENT(IN) :: \fIcomm\fP - INTEGER, INTENT(IN) :: \fIrank\fP, \fImaxneighbors\fP - INTEGER, INTENT(OUT) :: \fIneighbors(maxneighbors)\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETERS -.ft R -.TP 1i -comm -Communicator with graph topology (handle). -.TP 1i -rank -Rank of process in group of comm (integer). -.TP 1i -maxneighbors -Size of array neighbors (integer). - -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -neighbors -Ranks of processes that are neighbors to specified process (array of integers). -.ft R -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -\fBExample:\fP Suppose that comm is a communicator with a shuffle-exchange -topology. The group has 2n members. Each process is labeled by a(1),\ ..., a(n) with a(i) E{0,1}, and has three neighbors: exchange (a(1),\ ..., a(n) = a(1),\ ..., a(n-1), a(n) (a = 1 - a), shuffle (a(1),\ ..., a(n)) = a(2),\ ..., a(n), a(1), and unshuffle (a(1),\ ..., a(n)) = a(n), a(1),\ ..., a(n-1). The graph adjacency list is illustrated below for n=3. -.sp -.nf - exchange shuffle unshuffle - node neighbors(1) neighbors(2) neighbors(3) - 0(000) 1 0 0 - 1(001) 0 2 4 - 2(010) 3 4 1 - 3(011) 2 6 5 - 4(100) 5 1 2 - 5(101) 4 3 6 - 6(110) 7 5 3 - 7(111) 6 7 7 -.fi -.sp -Suppose that the communicator comm has this topology associated with it. The following code fragment cycles through the three types of neighbors and performs an appropriate permutation for each. -.sp -.nf -C assume: each process has stored a real number A. -C extract neighborhood information - CALL MPI_COMM_RANK(comm, myrank, ierr) - CALL MPI_GRAPH_NEIGHBORS(comm, myrank, 3, neighbors, ierr) -C perform exchange permutation - CALL MPI_SENDRECV_REPLACE(A, 1, MPI_REAL, neighbors(1), 0, - + neighbors(1), 0, comm, status, ierr) -C perform shuffle permutation - CALL MPI_SENDRECV_REPLACE(A, 1, MPI_REAL, neighbors(2), 0, - + neighbors(3), 0, comm, status, ierr) -C perform unshuffle permutation - CALL MPI_SENDRECV_REPLACE(A, 1, MPI_REAL, neighbors(3), 0, - + neighbors(2), 0, comm, status, ierr) - -.fi -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for I/O function errors. The error handler may be changed with MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN may be used to cause error values to be returned. Note that MPI does not guarantee that an MPI program can continue past an error. - -.SH SEE ALSO -.ft R -.sp -MPI_Graph_neighbors_count - diff --git a/ompi/mpi/man/man3/MPI_Graph_neighbors_count.3.md b/ompi/mpi/man/man3/MPI_Graph_neighbors_count.3.md deleted file mode 100644 index bf3027c75bc..00000000000 --- a/ompi/mpi/man/man3/MPI_Graph_neighbors_count.3.md +++ /dev/null @@ -1,70 +0,0 @@ -# Name - -`MPI_Graph_neighbors_count` - Returns the number of neighbors of a -node associated with a graph topology. - -# Syntax - -## C Syntax - -```c -#include - -int MPI_Graph_neighbors_count(MPI_Comm comm, int rank, - int *nneighbors) -``` - -## Fortran Syntax - -```fortran -USE MPI -! or the older form: INCLUDE 'mpif.h' - -MPI_GRAPH_NEIGHBORS_COUNT(COMM, RANK, NNEIGHBORS, IERROR) - INTEGER COMM, RANK, NNEIGHBORS, IERROR -``` - -## Fortran 2008 Syntax - -```fortran -USE mpi_f08 - -MPI_Graph_neighbors_count(comm, rank, nneighbors, ierror) - TYPE(MPI_Comm), INTENT(IN) :: comm - INTEGER, INTENT(IN) :: rank - INTEGER, INTENT(OUT) :: nneighbors - INTEGER, OPTIONAL, INTENT(OUT) :: ierror -``` - - -# Input Parameters - -* `comm` : Communicator with graph topology (handle). -* `rank` : Rank of process in group of comm (integer). - -# Output Parameters - -* `nneighbors` : Number of neighbors of specified process (integer). -* `IERROR` : Fortran only: Error status (integer). - -# Description - -`MPI_Graph_neighbors_count` and `MPI_Graph_neighbors` provide adjacency -information for a general, graph topology. `MPI_Graph_neighbors_count` -returns the number of neighbors for the process signified by `rank`. - -# Errors - -Almost all MPI routines return an error value; C routines as the value -of the function and Fortran routines in the last argument. - -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for -I/O function errors. The error handler may be changed with -`MPI_Comm_set_errhandler`; the predefined error handler `MPI_ERRORS_RETURN` -may be used to cause error values to be returned. Note that MPI does not -guarantee that an MPI program can continue past an error. - -# See Also - -[`MPI_Graph_neighbors`(3)](MPI_Graph_neighbors.html) diff --git a/ompi/mpi/man/man3/MPI_Graphdims_get.3.md b/ompi/mpi/man/man3/MPI_Graphdims_get.3.md deleted file mode 100644 index a830ffeff29..00000000000 --- a/ompi/mpi/man/man3/MPI_Graphdims_get.3.md +++ /dev/null @@ -1,72 +0,0 @@ -# Name - -`MPI_Graphdims_get` - Retrieves graph topology information associated -with a communicator. - -# Syntax - -## C Syntax - -```c -#include - -int MPI_Graphdims_get(MPI_Comm comm, int *nnodes, int *nedges) -``` - -## Fortran Syntax - -```fortran -USE MPI -! or the older form: INCLUDE 'mpif.h' - -MPI_GRAPHDIMS_GET(COMM, NNODES, NEDGES, IERROR) - INTEGER COMM, NNODES, NEDGES, IERROR -``` - -## Fortran 2008 Syntax - -```fortran -USE mpi_f08 - -MPI_Graphdims_get(comm, nnodes, nedges, ierror) - TYPE(MPI_Comm), INTENT(IN) :: comm - INTEGER, INTENT(OUT) :: nnodes, nedges - INTEGER, OPTIONAL, INTENT(OUT) :: ierror -``` - - -# Input Parameter - -* `comm` : Communicator for group with graph structure (handle). - -# Output Parameters - -* `nnodes` : Number of nodes in graph (integer). -* `nedges` : Number of edges in graph (integer). -* `IERROR` : Fortran only: Error status (integer). - -# Description - -Functions `MPI_Graphdims_get` and `MPI_Graph_get` retrieve the -graph-topology information that was associated with a communicator by -`MPI_Graph_create`. - -The information provided by `MPI_Graphdims_get` can be used to dimension -the vectors index and edges correctly for a call to `MPI_Graph_get`. - -# Errors - -Almost all MPI routines return an error value; C routines as the value -of the function and Fortran routines in the last argument. - -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for -I/O function errors. The error handler may be changed with -`MPI_Comm_set_errhandler`; the predefined error handler `MPI_ERRORS_RETURN` -may be used to cause error values to be returned. Note that MPI does not -guarantee that an MPI program can continue past an error. - -# See Also - -[`MPI_Graph_create`(3)](MPI_Graph_create.html) -[`MPI_Graph_get`(3)](MPI_Graph_get.html) diff --git a/ompi/mpi/man/man3/MPI_Grequest_complete.3.md b/ompi/mpi/man/man3/MPI_Grequest_complete.3.md deleted file mode 100644 index 96160e49e34..00000000000 --- a/ompi/mpi/man/man3/MPI_Grequest_complete.3.md +++ /dev/null @@ -1,76 +0,0 @@ -# Name - -`MPI_Grequest_complete` - Reports that a generalized request is -complete. - -# Syntax - -## C Syntax - -```c -#include - -int MPI_Grequest_complete(MPI_Request request) -``` - -## Fortran Syntax - -```fortran -USE MPI -! or the older form: INCLUDE 'mpif.h' - -MPI_GREQUEST_COMPLETE(REQUEST, IERROR) - INTEGER REQUEST, IERROR -``` - -## Fortran 2008 Syntax - -```fortran -USE mpi_f08 - -MPI_Grequest_complete(request, ierror) - TYPE(MPI_Request), INTENT(IN) :: request - INTEGER, OPTIONAL, INTENT(OUT) :: ierror -``` - -# Input/Output Parameter - -* `request` : Generalized request (handle). - -# Output Parameter - -* `IERROR` : Fortran only: Error status (integer). - -# Description - -`MPI_Grequest_complete` informs MPI that the operations represented by the -generalized request `request` are complete. A call to `MPI_Wait(request, status)` -will return, and a call to `MPI_Test(request, flag, status)` will return -flag=true only after a call to `MPI_Grequest_complete` has -declared that these operations are complete. - -MPI imposes no restrictions on the code executed by the callback -functions. However, new nonblocking operations should be defined so that -the general semantic rules about MPI calls such as `MPI_Test`, -`MPI_Request_free`, or `MPI_Cancel` still hold. For example, all these calls -are supposed to be local and nonblocking. Therefore, the callback -functions `query_fn`, `free_fn`, or `cancel_fn` should invoke blocking -MPI communication calls only if the context is such that these calls are -guaranteed to return in finite time. Once `MPI_Cancel` has been invoked, -the canceled operation should complete in finite time, regardless of the -state of other processes (the operation has acquired "local" -semantics). It should either succeed or fail without side-effects. The -user should guarantee these same properties for newly defined -operations. - -# Errors - -Almost all MPI routines return an error value; C routines as the value -of the function and Fortran routines in the last argument. - -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for -I/O function errors. The error handler may be changed with -`MPI_Comm_set_errhandler`; the predefined error handler `MPI_ERRORS_RETURN` -may be used to cause error values to be returned. Note that MPI does not -guarantee that an MPI program can continue past an error. diff --git a/ompi/mpi/man/man3/MPI_Grequest_start.3.md b/ompi/mpi/man/man3/MPI_Grequest_start.3.md deleted file mode 100644 index 2b85cd74ca7..00000000000 --- a/ompi/mpi/man/man3/MPI_Grequest_start.3.md +++ /dev/null @@ -1,201 +0,0 @@ -# Name - -`MPI_Grequest_start` - Starts a generalized request and returns a -handle to it in `request`. - -# Syntax - -## C Syntax - -```c -#include - -int MPI_Grequest_start(MPI_Grequest_query_function *query_fn, - MPI_Grequest_free_function *free_fn, - MPI_Grequest_cancel_function *cancel_fn, void *extra_state, - MPI_Request *request) -``` - -## Fortran Syntax (See Fortran 77 Notes) - -```fortran -USE MPI -! or the older form: INCLUDE 'mpif.h' - -MPI_GREQUEST_START(QUERY_FN, FREE_FN, CANCEL_FN, EXTRA_STATE, - REQUEST, IERROR) - INTEGER REQUEST, IERROR - EXTERNAL QUERY_FN, FREE_FN, CANCEL_FN - INTEGER(KIND=MPI_ADDRESS_KIND) EXTRA_STATE -``` - -## Fortran 2008 Syntax - -```fortran -USE mpi_f08 - -MPI_Grequest_start(query_fn, free_fn, cancel_fn, extra_state, request, - ierror) - PROCEDURE(MPI_Grequest_query_function) :: query_fn - PROCEDURE(MPI_Grequest_free_function) :: free_fn - PROCEDURE(MPI_Grequest_cancel_function) :: cancel_fn - INTEGER(KIND=MPI_ADDRESS_KIND), INTENT(IN) :: extra_state - TYPE(MPI_Request), INTENT(OUT) :: request - INTEGER, OPTIONAL, INTENT(OUT) :: ierror -``` - -# Input Parameters - -* `query_fn` : Callback function invoked when request status is queried (function). -* `free_fn` : Callback function invoked when request is freed (function). -* `cancel_fn` : Callback function invoked when request is canceled (function). -* `extra_state` : Extra state. - -# Output Parameters - -* `request` : Generalized request (handle). -* `IERROR` : Fortran only: Error status (integer). - -# Description - -`MPI_Grequest_start` starts a generalized `request` and returns a handle to -it in `request`. - -The syntax and meaning of the callback functions are listed below. All -callback functions are passed the `extra_state` argument that was -associated with the `request` by the starting call `MPI_Grequest_start`. -This can be used to maintain user-defined state for the `request`. In C, -the query function is - -```c -typedef int MPI_Grequest_query_function(void *extra_state, - MPI_Status *status); -``` - -In Fortran, it is - -```fortran -SUBROUTINE GREQUEST_QUERY_FUNCTION(EXTRA_STATE, STATUS, IERROR) - INTEGER STATUS(MPI_STATUS_SIZE), IERROR - INTEGER(KIND=MPI_ADDRESS_KIND) EXTRA_STATE -``` - -The `query_fn` function computes the status that should be returned for -the generalized request. The status also includes information about -successful/unsuccessful cancellation of the request (result to be -returned by `MPI_Test_cancelled`). - -The `query_fn` function is invoked by the -`MPI_{Wait|Test}{any|some|all}` call that completed the generalized -request associated with this callback. The callback function is also -invoked by calls to `MPI_Request_get_status` if the request is complete -when the call occurs. In both cases, the callback is passed a reference -to the corresponding status variable passed by the user to the MPI call. -If the user provided `MPI_STATUS_IGNORE` or `MPI_STATUSES_IGNORE` to the MPI -function that causes `query_fn` to be called, then MPI will pass a valid -status object to `query_fn`, and this status will be ignored upon return -of the callback function. Note that `query_fn` is invoked only after -`MPI_Grequest_complete` is called on the request; it may be invoked -several times for the same generalized request. Note also that a call to -`MPI_{Wait|Test}{some|all}` may cause multiple invocations of -`query_fn` callback functions, one for each generalized request that is -completed by the MPI call. The order of these invocations is not -specified by MPI. - -In C, the free function is - -```c -typedef int MPI_Grequest_free_function(void *extra_state); -``` - -And in Fortran, it is - -```fortran -SUBROUTINE GREQUEST_FREE_FUNCTION(EXTRA_STATE, IERROR) - INTEGER IERROR - INTEGER(KIND=MPI_ADDRESS_KIND) EXTRA_STATE -``` - -The `free_fn` callback function is invoked to clean up user-allocated -resources when the generalized request is freed. - -The `free_fn` function is invoked by the -`MPI_{Wait|Test}{any|some|all}` call that completed the generalized -request associated with this callback. `free_fn` is invoked after the -call to `query_fn` for the same request. However, if the MPI call -completed multiple generalized requests, the order in which `free_fn` -callback functions are invoked is not specified by MPI. - -The `free_fn` callback is also invoked for generalized requests that are -freed by a call to `MPI_Request_free` (no call to -`MPI_{Wait|Test}{any|some|all}` will occur for such a request). In -this case, the callback function will be called either in the MPI call -`MPI_Request_free(request)` or in the MPI call -`MPI_Grequest_complete(request)`, whichever happens last. In other words, -in this case the actual freeing code is executed as soon as both calls -(`MPI_Request_free` and `MPI_Grequest_complete`) have occurred. The -`request` is not deallocated until after `free_fn` completes. Note that -`free_fn` will be invoked only once per request by a correct program. - -In C, the cancel function is - -```c -typedef int MPI_Grequest_cancel_function(void *extra_state, int complete); -``` - -In Fortran, the cancel function is - -```Fortran -SUBROUTINE GREQUEST_CANCEL_FUNCTION(EXTRA_STATE, COMPLETE, IERROR) - INTEGER IERROR - INTEGER(KIND=MPI_ADDRESS_KIND) EXTRA_STATE - LOGICAL COMPLETE -``` - -The `cancel_fn` function is invoked to start the cancellation of a -generalized request. It is called by `MPI_Request_cancel(request)`. MPI -passes to the callback function complete=true if `MPI_Grequest_complete` -has already been called on the request, and complete=false otherwise. - -# Fortran 77 Notes - -The MPI standard prescribes portable Fortran syntax for the -`EXTRA_STATE` argument only for Fortran 90. FORTRAN 77 users may use the -non-portable syntax - -```fortran -INTEGER*MPI_ADDRESS_KIND EXTRA_STATE -``` - -where `MPI_ADDRESS_KIND` is a constant defined in mpif.h and gives the -length of the declared integer in bytes. - -# Errors - -Almost all MPI routines return an error value; C routines as the value -of the function and Fortran routines in the last argument. - -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for -I/O function errors. The error handler may be changed with -`MPI_Comm_set_errhandler`; the predefined error handler `MPI_ERRORS_RETURN` -may be used to cause error values to be returned. Note that MPI does not -guarantee that an MPI program can continue past an error. - -All callback functions return an error code. The code is passed back and -dealt with as appropriate for the error code by the MPI function that -invoked the callback function. For example, if error codes are returned, -then the error code returned by the callback function will be returned -by the MPI function that invoked the callback function. In the case of a -`MPI_{Wait|Test}any` call that invokes both `query_fn` and `free_fn`, -the MPI call will return the error code returned by the last callback, -namely `free_fn`. If one or more of the `request`s in a call to -`MPI_{Wait|Test}{some|all`} has failed, then the MPI call will return -`MPI_ERR_IN_STATUS`. In such a case, if the MPI call was passed an array -of statuses, then MPI will return in each of the statuses that -correspond to a completed generalized `request` the error code returned by -the corresponding invocation of its `free_fn` callback function. -However, if the MPI function was passed `MPI_STATUSES_IGNORE`, then the -individual error codes returned by each callback function will be lost. - -See the MPI man page for a full list of MPI error codes. diff --git a/ompi/mpi/man/man3/MPI_Group_c2f.3in b/ompi/mpi/man/man3/MPI_Group_c2f.3in deleted file mode 100644 index a13fce697dd..00000000000 --- a/ompi/mpi/man/man3/MPI_Group_c2f.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/MPI_Comm_f2c.3 diff --git a/ompi/mpi/man/man3/MPI_Group_compare.3.md b/ompi/mpi/man/man3/MPI_Group_compare.3.md deleted file mode 100644 index e8fa84e34f3..00000000000 --- a/ompi/mpi/man/man3/MPI_Group_compare.3.md +++ /dev/null @@ -1,66 +0,0 @@ -# Name - -`MPI_Group_compare` - Compares two groups. - -# Syntax - -## C Syntax - -```c -#include - -int MPI_Group_compare(MPI_Group group1, MPI_Group group2, - int *result) -``` - -## Fortran Syntax - -```fortran -USE MPI -! or the older form: INCLUDE 'mpif.h' - -MPI_GROUP_COMPARE(GROUP1, GROUP2, RESULT, IERROR) - INTEGER GROUP1, GROUP2, RESULT, IERROR -``` - -## Fortran 2008 Syntax - -```fortran -USE mpi_f08 - -MPI_Group_compare(group1, group2, result, ierror) - TYPE(MPI_Group), INTENT(IN) :: group1, group2 - INTEGER, INTENT(OUT) :: result - INTEGER, OPTIONAL, INTENT(OUT) :: ierror -``` - -# Input Parameters - -* `group1` : First group (handle). -* `group2` : Second group (handle). - -# Output Parameters - -* `result` : Integer which is MPI_IDENT if the order and members of the two -groups are the same, MPI_SIMILAR if only the members are the same, -and MPI_UNEQUAL otherwise. -* `IERROR` : Fortran only: Error status (integer). - -# Description - -`MPI_IDENT` results if the group members and group order is exactly the -same in both groups. This happens for instance if `group1` and `group2` are -the same handle. `MPI_SIMILAR` results if the group members are the same -but the order is different. `MPI_UNEQUAL` results otherwise. - -# Errors - -Almost all MPI routines return an error value; C routines as the value -of the function and Fortran routines in the last argument. - -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for -I/O function errors. The error handler may be changed with -`MPI_Comm_set_errhandler`; the predefined error handler `MPI_ERRORS_RETURN` -may be used to cause error values to be returned. Note that MPI does not -guarantee that an MPI program can continue past an error. diff --git a/ompi/mpi/man/man3/MPI_Group_difference.3.md b/ompi/mpi/man/man3/MPI_Group_difference.3.md deleted file mode 100644 index 592e0dea4d3..00000000000 --- a/ompi/mpi/man/man3/MPI_Group_difference.3.md +++ /dev/null @@ -1,79 +0,0 @@ -# Name - -`MPI_Group_difference` - Makes a group from the difference of two -groups. - -# Syntax - -## C Syntax - -```c -#include - -int MPI_Group_difference(MPI_Group group1, MPI_Group group2, - MPI_Group *newgroup) -``` - -## Fortran Syntax - -```fortran -USE MPI -! or the older form: INCLUDE 'mpif.h' - -MPI_GROUP_DIFFERENCE(GROUP1, GROUP2, NEWGROUP, IERROR) - INTEGER GROUP1, GROUP2, NEWGROUP, IERROR -``` - -## Fortran 2008 Syntax - -```fortran -USE mpi_f08 - -MPI_Group_difference(group1, group2, newgroup, ierror) - TYPE(MPI_Group), INTENT(IN) :: group1, group2 - TYPE(MPI_Group), INTENT(OUT) :: newgroup - INTEGER, OPTIONAL, INTENT(OUT) :: ierror -``` - -# Input Parameters - -* `group1` : First group (handle). -* `group2` : Second group (handle). - -# Output Parameters - -* `newgroup` : Difference group (handle). -* `IERROR` : Fortran only: Error status (integer). - -# Description - -The set-like operations are defined as follows: -* `union` -- All elements of the first group (`group1`), followed by all -elements of second group (`group2`) that are not in the first group -* `intersect` -- all elements of the first group that are also in the -second group, ordered as in first group -* `difference` -- all elements of the first group that are not in the -second group, ordered as in the first group - -Note that for these operations the order of processes in the output -group is determined primarily by order in the first group (if possible) -and then, if necessary, by order in the second group. Neither union nor -intersection are commutative, but both are associative. - -The new group can be empty, that is, equal to `MPI_GROUP_EMPTY`. - -# Errors - -Almost all MPI routines return an error value; C routines as the value -of the function and Fortran routines in the last argument. - -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for -I/O function errors. The error handler may be changed with -`MPI_Comm_set_errhandler`; the predefined error handler `MPI_ERRORS_RETURN` -may be used to cause error values to be returned. Note that MPI does not -guarantee that an MPI program can continue past an error. - -# See Also - -[`MPI_Group_free`(3)](MPI_Group_free.html) diff --git a/ompi/mpi/man/man3/MPI_Group_excl.3.md b/ompi/mpi/man/man3/MPI_Group_excl.3.md deleted file mode 100644 index 7b489c9abdc..00000000000 --- a/ompi/mpi/man/man3/MPI_Group_excl.3.md +++ /dev/null @@ -1,81 +0,0 @@ -# Name - -`MPI_Group_excl` - Produces a group by reordering an existing group -and taking only unlisted members. - -# Syntax - -## C Syntax - -```c -#include - -int MPI_Group_excl(MPI_Group group, int n, const int ranks[], - MPI_Group *newgroup) -``` - -## Fortran Syntax - -```fortran -USE MPI -! or the older form: INCLUDE 'mpif.h' - -MPI_GROUP_EXCL(GROUP, N, RANKS, NEWGROUP, IERROR) - INTEGER GROUP, N, RANKS(*), NEWGROUP, IERROR -``` - -## Fortran 2008 Syntax - -```fortran -USE mpi_f08 - -MPI_Group_excl(group, n, ranks, newgroup, ierror) - TYPE(MPI_Group), INTENT(IN) :: group - INTEGER, INTENT(IN) :: n, ranks(n) - TYPE(MPI_Group), INTENT(OUT) :: newgroup - INTEGER, OPTIONAL, INTENT(OUT) :: ierror -``` - -# Input Parameters - -* `group` : Group (handle). -* `n` : Number of elements in array ranks (integer). -* `ranks` : Array of integer ranks in group not to appear in newgroup. - -# Output Parameters - -* `newgroup` : New group derived from above, preserving the order defined by group -(handle). -* `IERROR` : Fortran only: Error status (integer). - -# Description - -The function `MPI_Group_excl` creates a `group` of processes `newgroup` that -is obtained by deleting from `group` those processes with ranks -`ranks[0]`, ... `ranks[n-1]`. The ordering of processes in `newgroup` is -identical to the ordering in `group`. Each of the n elements of ranks must -be a valid rank in `group` and all elements must be distinct; otherwise, -the call is erroneous. If `n` = 0, then `newgroup` is identical to `group`. - -# Note - -Currently, each of the ranks to exclude must be a valid rank in the -`group` and all elements must be distinct or the function is erroneous. -This restriction is per the draft. - -# Errors - -Almost all MPI routines return an error value; C routines as the value -of the function and Fortran routines in the last argument. - -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for -I/O function errors. The error handler may be changed with -`MPI_Comm_set_errhandler`; the predefined error handler `MPI_ERRORS_RETURN` -may be used to cause error values to be returned. Note that MPI does not -guarantee that an MPI program can continue past an error. - -# See Also - -[`MPI_Group_range_excl`(3)](MPI_Group_range_excl.html) -[`MPI_Group_free`(3)](MPI_Group_free.html) diff --git a/ompi/mpi/man/man3/MPI_Group_f2c.3in b/ompi/mpi/man/man3/MPI_Group_f2c.3in deleted file mode 100644 index a13fce697dd..00000000000 --- a/ompi/mpi/man/man3/MPI_Group_f2c.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/MPI_Comm_f2c.3 diff --git a/ompi/mpi/man/man3/MPI_Group_free.3.md b/ompi/mpi/man/man3/MPI_Group_free.3.md deleted file mode 100644 index 387f8e24a0a..00000000000 --- a/ompi/mpi/man/man3/MPI_Group_free.3.md +++ /dev/null @@ -1,63 +0,0 @@ -# Name - -`MPI_Group_free` - Frees a group. - -# Syntax - -## C Syntax - -```c -#include - -int MPI_Group_free(MPI_Group *group) -``` - -## Fortran Syntax - -```fortran -USE MPI -! or the older form: INCLUDE 'mpif.h' - -MPI_GROUP_FREE(GROUP, IERROR) - INTEGER GROUP, IERROR -``` - -## Fortran 2008 Syntax - -```fortran -USE mpi_f08 - -MPI_Group_free(group, ierror) - TYPE(MPI_Group), INTENT(INOUT) :: group - INTEGER, OPTIONAL, INTENT(OUT) :: ierror -``` - -# Input/Output Parameter - -* `group` : Group (handle). - -# Output Parameter - -* `IERROR` : Fortran only: Error status (integer). - -# Description - -This operation marks a `group` object for deallocation. The handle `group` -is set to `MPI_GROUP_NULL` by the call. Any ongoing operation using this -`group` will complete normally. - -# Note - -On return, `group` is set to `MPI_GROUP_NULL`. - -# Errors - -Almost all MPI routines return an error value; C routines as the value -of the function and Fortran routines in the last argument. - -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for -I/O function errors. The error handler may be changed with -`MPI_Comm_set_errhandler`; the predefined error handler `MPI_ERRORS_RETURN` -may be used to cause error values to be returned. Note that MPI does not -guarantee that an MPI program can continue past an error. diff --git a/ompi/mpi/man/man3/MPI_Group_from_session_pset.3.md b/ompi/mpi/man/man3/MPI_Group_from_session_pset.3.md deleted file mode 100644 index 486d7cfcbb4..00000000000 --- a/ompi/mpi/man/man3/MPI_Group_from_session_pset.3.md +++ /dev/null @@ -1,75 +0,0 @@ -# Name - -`MPI_Group_from_session_pset` - Creates a group using a provided session handle and process set. - -# Syntax - -## C Syntax - -```c -#include - -int MPI_Group_from_session_pset(MPI_Session session, const char *pset_name, MPI_Group *newgroup) -``` - -## Fortran Syntax - -```fortran -USE MPI -! or the older form: INCLUDE 'mpif.h' - -MPI_GROUP_FROM_SESSION_PSET(SESSION, PSET_NAME, NEWGROUP, IERROR) - INTEGER SESSION, NEWGROUP, IERROR - CHARACTER*(*) PSET_NAME -``` - -## Fortran 2008 Syntax - -```fortran -USE mpi_f08 - -MPI_Group_from_session_pset(session, pset_name, newgroup, ierror) - TYPE(MPI_Session), INTENT(IN) :: session - CHARACTER(LEN=*), INTENT(IN) :: pset_name - TYPE(MPI_Group), INTENT(OUT) :: newgroup - INTEGER, OPTIONAL, INTENT(OUT) :: ierror -``` - -# Input Parameters - -* `session` : Session (handle). -* `pset_name` : name of process set to use to create the new group (string) - -# Output Parameters - -* `newgroup` : New group derived from supplied session and process set (handle). -* `IERROR` : Fortran only: Error status (integer). - -# Description - -The function `MPI_Group_from_session_pset` creates a group `newgroup` using the -provided `session` handle and `process set`. The process set name must be one returned from -an invocation of `MPI_Session_get_nth_pset` using the supplied `session` handle. If the -`pset_name` does not exist, MPI_GROUP_NULL will be returned in the `newgroup` argument. - -# Note - -As with other group constructors, `MPI_Group_from_session_pset` is a local function. - -# Errors - -Almost all MPI routines return an error value; C routines as the value -of the function and Fortran routines in the last argument. - -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for -I/O function errors. The error handler may be changed with -`MPI_Session_set_errhandler`; the predefined error handler `MPI_ERRORS_RETURN` -may be used to cause error values to be returned. Note that MPI does not -guarantee that an MPI program can continue past an error. - -# See Also - -[`MPI_Session_init`(3)](MPI_Session_init.html) -[`MPI_Session_get_nth_pset`(3)](MPI_Session_get_nth_pset.html) -[`MPI_Group_free`(3)](MPI_Group_free.html) diff --git a/ompi/mpi/man/man3/MPI_Group_incl.3.md b/ompi/mpi/man/man3/MPI_Group_incl.3.md deleted file mode 100644 index c755683c289..00000000000 --- a/ompi/mpi/man/man3/MPI_Group_incl.3.md +++ /dev/null @@ -1,83 +0,0 @@ -# Name - -`MPI_Group_incl` - Produces a group by reordering an existing group -and taking only listed members. - -# Syntax - -## C Syntax - -```c -#include - -int MPI_Group_incl(MPI_Group group, int n, const int ranks[], - MPI_Group *newgroup) -``` - -## Fortran Syntax - -```fortran -USE MPI -! or the older form: INCLUDE 'mpif.h' - -MPI_GROUP_INCL(GROUP, N, RANKS, NEWGROUP, IERROR) - INTEGER GROUP, N, RANKS(*), NEWGROUP, IERROR -``` - -## Fortran 2008 Syntax - -```fortran -USE mpi_f08 - -MPI_Group_incl(group, n, ranks, newgroup, ierror) - TYPE(MPI_Group), INTENT(IN) :: group - INTEGER, INTENT(IN) :: n, ranks(n) - TYPE(MPI_Group), INTENT(OUT) :: newgroup - INTEGER, OPTIONAL, INTENT(OUT) :: ierror -``` - -# Input Parameters - -* `group` : Group (handle). -* `n` : Number of elements in array ranks (and size of `newgroup`)(integer). -* `ranks` : Ranks of processes in group to appear in newgroup (array of -integers). - -# Output Parameters - -* `newgroup` : New group derived from above, in the order defined by ranks -(handle). -* `IERROR` : Fortran only: Error status (integer). - -# Description - -The function `MPI_Group_incl` creates a group `group_out` that consists of -the n processes in `group` with ranks `rank[0]`, ..., `rank[n-1]`; the -process with rank i in `group_out` is the process with rank `ranks[i]` in -`group`. Each of the n elements of ranks must be a valid rank in `group` and -all elements must be distinct, or else the program is erroneous. If `n` = -0, then `group_out` is `MPI_GROUP_EMPTY`. This function can, for instance, -be used to reorder the elements of a `group`. - -# Note - -This implementation does not currently check to ensure that there are no -duplicates in the list of ranks. - -# Errors - -Almost all MPI routines return an error value; C routines as the value -of the function and Fortran routines in the last argument. - -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for -I/O function errors. The error handler may be changed with -`MPI_Comm_set_errhandler`; the predefined error handler `MPI_ERRORS_RETURN` -may be used to cause error values to be returned. Note that MPI does not -guarantee that an MPI program can continue past an error. - -# See Also - -[`MPI_Group_compare`(3)](MPI_Group_compare.html) -[`MPI_Group_range_incl`(3)](MPI_Group_range_incl.html) -[`MPI_Group_free`(3)](MPI_Group_free.html) diff --git a/ompi/mpi/man/man3/MPI_Group_intersection.3.md b/ompi/mpi/man/man3/MPI_Group_intersection.3.md deleted file mode 100644 index b01767f6922..00000000000 --- a/ompi/mpi/man/man3/MPI_Group_intersection.3.md +++ /dev/null @@ -1,79 +0,0 @@ -# Name - -`MPI_Group_intersection` - Produces a group at the intersection of -two existing groups. - -# Syntax - -## C Syntax - -```c -#include - -int MPI_Group_intersection(MPI_Group group1, MPI_Group group2, - MPI_Group *newgroup) -``` - -## Fortran Syntax - -```fortran -USE MPI -! or the older form: INCLUDE 'mpif.h' - -MPI_GROUP_INTERSECTION(GROUP1, GROUP2, NEWGROUP, IERROR) - INTEGER GROUP1, GROUP2, NEWGROUP, IERROR -``` - -## Fortran 2008 Syntax - -```fortran -USE mpi_f08 - -MPI_Group_intersection(group1, group2, newgroup, ierror) - TYPE(MPI_Group), INTENT(IN) :: group1, group2 - TYPE(MPI_Group), INTENT(OUT) :: newgroup - INTEGER, OPTIONAL, INTENT(OUT) :: ierror -``` - -# Input Parameters - -* `group1` : First group (handle). -* `group2` : Second group (handle). - -# Output Parameters - -* `newgroup` : Intersection group (handle). -* `IERROR` : Fortran only: Error status (integer). - -# Description - -The set-like operations are defined as follows: -* `union` -- All elements of the first group (`group1`), followed by all -elements of second group (`group2`) not in first. -* `intersect` -- all elements of the first group that are also in the -second group, ordered as in first group. -* `difference` -- all elements of the first group that are not in the -second group, ordered as in the first group. - -Note that for these operations the order of processes in the output -group is determined primarily by order in the first group (if possible) -and then, if necessary, by order in the second group. Neither union nor -intersection are commutative, but both are associative. - -The new group can be empty, that is, equal to `MPI_GROUP_EMPTY`. - -# Errors - -Almost all MPI routines return an error value; C routines as the value -of the function and Fortran routines in the last argument. - -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for -I/O function errors. The error handler may be changed with -`MPI_Comm_set_errhandler`; the predefined error handler `MPI_ERRORS_RETURN` -may be used to cause error values to be returned. Note that MPI does not -guarantee that an MPI program can continue past an error. - -# See Also - -[`MPI_Group_free`(3)](MPI_Group_free.html) diff --git a/ompi/mpi/man/man3/MPI_Group_range_excl.3in b/ompi/mpi/man/man3/MPI_Group_range_excl.3in deleted file mode 100644 index 65934e234a4..00000000000 --- a/ompi/mpi/man/man3/MPI_Group_range_excl.3in +++ /dev/null @@ -1,80 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Group_range_excl 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Group_range_excl\fP \- Produces a group by excluding ranges of processes from an existing group. - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -int MPI_Group_range_excl(MPI_Group \fIgroup\fP, int\fI n\fP, int\fI ranges\fP[][3], - MPI_Group\fI *newgroup\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_GROUP_RANGE_EXCL(\fIGROUP, N, RANGES, NEWGROUP, IERROR\fP) - INTEGER \fIGROUP, N, RANGES(3,*), NEWGROUP, IERROR\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Group_range_excl(\fIgroup\fP, \fIn\fP, \fIranges\fP, \fInewgroup\fP, \fIierror\fP) - TYPE(MPI_Group), INTENT(IN) :: \fIgroup\fP - INTEGER, INTENT(IN) :: \fIn\fP, \fIranges(3,n)\fP - TYPE(MPI_Group), INTENT(OUT) :: \fInewgroup\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETERS -.ft R -.TP 1i -group -Group (handle). -.TP 1i -n -Number of triplets in array ranges (integer). -.TP 1i -ranges -A one-dimensional array of integer triplets of the form (first rank, last rank, stride), indicating the ranks in group of processes to be excluded from the output group newgroup. - -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -newgroup -New group derived from above, preserving the order in group (handle). -.ft R -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -Each computed rank must be a valid rank in group and all computed ranks must be distinct, or else the program is erroneous. -.sp -The functionality of this routine is specified to be equivalent to -expanding the array of ranges to an array of the excluded ranks and passing the resulting array of ranks and other arguments to MPI_Group_excl. A call to MPI_Group_excl is equivalent to a call to MPI_Group_range_excl with each rank i in ranks replaced by the triplet (i,i,1) in the argument ranges. - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for I/O function errors. The error handler may be changed with MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN may be used to cause error values to be returned. Note that MPI does not guarantee that an MPI program can continue past an error. - -.SH SEE ALSO -.ft R -.sp -MPI_Group_excl -.br -MPI_Group_free -.br - diff --git a/ompi/mpi/man/man3/MPI_Group_range_incl.3in b/ompi/mpi/man/man3/MPI_Group_range_incl.3in deleted file mode 100644 index c53ed850227..00000000000 --- a/ompi/mpi/man/man3/MPI_Group_range_incl.3in +++ /dev/null @@ -1,101 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Group_range_incl 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Group_range_incl\fP \- Creates a new group from ranges of ranks in an existing group. - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -int MPI_Group_range_incl(MPI_Group \fIgroup\fP, int\fI n\fP, int\fI ranges\fP[][3], - MPI_Group\fI *newgroup\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_GROUP_RANGE_INCL(\fIGROUP, N, RANGES, NEWGROUP, IERROR\fP) - INTEGER \fIGROUP, N, RANGES(3,*), NEWGROUP, IERROR\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Group_range_incl(\fIgroup\fP, \fIn\fP, \fIranges\fP, \fInewgroup\fP, \fIierror\fP) - TYPE(MPI_Group), INTENT(IN) :: \fIgroup\fP - INTEGER, INTENT(IN) :: \fIn\fP, \fIranges(3,n)\fP - TYPE(MPI_Group), INTENT(OUT) :: \fInewgroup\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETERS -.ft R -.TP 1i -group -Group (handle). -.TP 1i -n -Number of triplets in array ranges (integer). -.TP 1i -ranges -A one-dimensional array of integer triplets, of the form (first rank, last rank, stride) indicating ranks in group or processes to be included in newgroup. - -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -newgroup -New group derived from above, in the order defined by ranges (handle). -.ft R -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -If ranges consist of the triplets -.sp -.nf - (first1, last1, stride1),\ ..., (firstn, lastn, striden) -.fi -.sp -then newgroup consists of the sequence of processes in group with ranks -.sp -.nf - last(1)-first(1) - first(1), first(1) + stride(1),..., first(1) + ---------------- stride(1),... - stride(1) - - last(n)-first(n) - first(n), first(n) + stride(n),..., first(n) + ---------------- stride(n). - stride(n) -.fi -.sp -Each computed rank must be a valid rank in group and all computed ranks must be distinct, or else the program is erroneous. Note that we may have first(i) > last(i), and stride(i) may be negative, but cannot be zero. -.sp -The functionality of this routine is specified to be equivalent to expanding the array of ranges to an array of the included ranks and passing the resulting array of ranks and other arguments to MPI_Group_incl. A call to MPI_Group_incl is equivalent to a call to MPI_Group_range_incl with each rank i in ranks replaced by the triplet (i,i,1) in the argument ranges. - -.SH NOTE -.ft R -This implementation does not currently check to see that the list of ranges to include are valid ranks in the group. - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for I/O function errors. The error handler may be changed with MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN may be used to cause error values to be returned. Note that MPI does not guarantee that an MPI program can continue past an error. - -.SH SEE ALSO -.ft R -.sp -MPI_Group_incl -.br -MPI_Group_free - - diff --git a/ompi/mpi/man/man3/MPI_Group_rank.3in b/ompi/mpi/man/man3/MPI_Group_rank.3in deleted file mode 100644 index 17952e25c9b..00000000000 --- a/ompi/mpi/man/man3/MPI_Group_rank.3in +++ /dev/null @@ -1,61 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Group_rank 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Group_rank\fP \- Returns the rank of the calling process in the given group. - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -int MPI_Group_rank(MPI_Group \fIgroup\fP, int *\fIrank\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_GROUP_RANK(\fIGROUP, RANK, IERROR\fP) - INTEGER \fIGROUP, RANK, IERROR\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Group_rank(\fIgroup\fP, \fIrank\fP, \fIierror\fP) - TYPE(MPI_Group), INTENT(IN) :: \fIgroup\fP - INTEGER, INTENT(OUT) :: \fIrank\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETERS -.ft R -.TP 1i -group -Group (handle). - -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -rank -Rank of the calling process in group, or MPI_UNDEFINED if the process is not a member (integer). -.ft R -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -MPI_Group_rank returns as the output parameter \fIrank\fP the rank of the calling process in group. If the process is not a member of group then MPI_UNDEFINED is returned. - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for I/O function errors. The error handler may be changed with MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN may be used to cause error values to be returned. Note that MPI does not guarantee that an MPI program can continue past an error. - diff --git a/ompi/mpi/man/man3/MPI_Group_size.3in b/ompi/mpi/man/man3/MPI_Group_size.3in deleted file mode 100644 index 04ba8c002d8..00000000000 --- a/ompi/mpi/man/man3/MPI_Group_size.3in +++ /dev/null @@ -1,61 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Group_size 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Group_size\fP \- Returns the size of a group. - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -int MPI_Group_size(MPI_Group \fIgroup\fP, int \fI*size\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_GROUP_SIZE(\fIGROUP, SIZE, IERROR\fP) - INTEGER \fIGROUP, SIZE, IERROR\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Group_size(\fIgroup\fP, \fIsize\fP, \fIierror\fP) - TYPE(MPI_Group), INTENT(IN) :: \fIgroup\fP - INTEGER, INTENT(OUT) :: \fIsize\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETERS -.ft R -.TP 1i -group -Group (handle). - -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -size -Number of processes in the group (integer). -.ft R -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -MPI_Group_size returns in \fIsize\fP the number of processes in the group. Thus, if group = MPI_GROUP_EMPTY, then the call will return size = 0. On the other hand, a call with group = MPI_GROUP_NULL is erroneous. - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for I/O function errors. The error handler may be changed with MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN may be used to cause error values to be returned. Note that MPI does not guarantee that an MPI program can continue past an error. - diff --git a/ompi/mpi/man/man3/MPI_Group_translate_ranks.3in b/ompi/mpi/man/man3/MPI_Group_translate_ranks.3in deleted file mode 100644 index 5eac6382810..00000000000 --- a/ompi/mpi/man/man3/MPI_Group_translate_ranks.3in +++ /dev/null @@ -1,74 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2013 Los Alamos National Security, LLC. All rights reserved. -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Group_translate_ranks 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Group_translate_ranks\fP \- Translates the ranks of processes in one group to those in another group. - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -int MPI_Group_translate_ranks(MPI_Group \fIgroup1\fP, int\fI n\fP, - const int\fI ranks1\fP[], MPI_Group\fI group2\fP, int\fI ranks2\fP[]) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_GROUP_TRANSLATE_RANKS(\fIGROUP1, N, RANKS1, GROUP2, RANKS2, - IERROR\fP) - INTEGER \fIGROUP1, N, RANKS1(*), GROUP2, RANKS2(*), IERROR\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Group_translate_ranks(\fIgroup1\fP, \fIn\fP, \fIranks1\fP, \fIgroup2\fP, \fIranks2\fP, \fIierror\fP) - TYPE(MPI_Group), INTENT(IN) :: \fIgroup1\fP, \fIgroup2\fP - INTEGER, INTENT(IN) :: \fIn\fP, \fIranks1(n)\fP - INTEGER, INTENT(OUT) :: \fIranks2(n)\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETERS -.ft R -.TP 1i -group1 -First group (handle). -.TP 1i -n -Number of ranks in ranks1 and ranks2 arrays (integer). -.TP 1i -ranks1 -Array of zero or more valid ranks in group1. -.TP 1i -group2 -Second group (handle). - -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -ranks2 -Array of corresponding ranks in group2, MPI_UNDEFINED when no correspondence exists. -.ft R -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -This function is important for determining the relative numbering of the same processes in two different groups. For instance, if one knows the ranks of certain processes in the group of MPI_COMM_WORLD, one might want to know their ranks in a subset of that group. - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for I/O function errors. The error handler may be changed with MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN may be used to cause error values to be returned. Note that MPI does not guarantee that an MPI program can continue past an error. - diff --git a/ompi/mpi/man/man3/MPI_Group_union.3in b/ompi/mpi/man/man3/MPI_Group_union.3in deleted file mode 100644 index 84a40079631..00000000000 --- a/ompi/mpi/man/man3/MPI_Group_union.3in +++ /dev/null @@ -1,85 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Group_union 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Group_union \fP \- Produces a group by combining two groups. - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -int MPI_Group_union(MPI_Group \fIgroup1\fP, MPI_Group \fIgroup2\fP, - MPI_Group *\fInewgroup\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_GROUP_UNION(\fIGROUP1, GROUP2, NEWGROUP, IERROR\fP) - INTEGER \fIGROUP1, GROUP2, NEWGROUP, IERROR\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Group_union(\fIgroup1\fP, \fIgroup2\fP, \fInewgroup\fP, \fIierror\fP) - TYPE(MPI_Group), INTENT(IN) :: \fIgroup1\fP, \fIgroup2\fP - TYPE(MPI_Group), INTENT(OUT) :: \fInewgroup\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETERS -.ft R -.TP 1i -group1 -First group (handle). -.TP 1i -group2 -Second group (handle). - -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -newgroup -Union group (handle). -.ft R -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -The set-like operations are defined as follows: -.TP - o -union -- All elements of the first group (group1), followed by all elements -of second group (group2) not in first. -.TP - o -intersect -- all elements of the first group that are also in the second -group, ordered as in first group. -.TP - o -difference -- all elements of the first group that are not in the second group, ordered as in the first group. -.sp -.LP -Note that for these operations the order of processes in the output group is determined primarily by order in the first group (if possible) and then, if necessary, by order in the second group. Neither union nor intersection are commutative, but both are associative. -.sp -The new group can be empty, that is, equal to MPI_GROUP_EMPTY. - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for I/O function errors. The error handler may be changed with MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN may be used to cause error values to be returned. Note that MPI does not guarantee that an MPI program can continue past an error. - -.SH SEE ALSO -MPI_Group_free -.br - diff --git a/ompi/mpi/man/man3/MPI_Iallgather.3in b/ompi/mpi/man/man3/MPI_Iallgather.3in deleted file mode 100644 index f7b03f37700..00000000000 --- a/ompi/mpi/man/man3/MPI_Iallgather.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/MPI_Allgather.3 diff --git a/ompi/mpi/man/man3/MPI_Iallgatherv.3in b/ompi/mpi/man/man3/MPI_Iallgatherv.3in deleted file mode 100644 index 8fc7b812b1a..00000000000 --- a/ompi/mpi/man/man3/MPI_Iallgatherv.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/MPI_Allgatherv.3 diff --git a/ompi/mpi/man/man3/MPI_Iallreduce.3in b/ompi/mpi/man/man3/MPI_Iallreduce.3in deleted file mode 100644 index 9c97358ebe6..00000000000 --- a/ompi/mpi/man/man3/MPI_Iallreduce.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/MPI_Allreduce.3 diff --git a/ompi/mpi/man/man3/MPI_Ialltoall.3in b/ompi/mpi/man/man3/MPI_Ialltoall.3in deleted file mode 100644 index 591c20bb28d..00000000000 --- a/ompi/mpi/man/man3/MPI_Ialltoall.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/MPI_Alltoall.3 diff --git a/ompi/mpi/man/man3/MPI_Ialltoallv.3in b/ompi/mpi/man/man3/MPI_Ialltoallv.3in deleted file mode 100644 index 6cc7026e897..00000000000 --- a/ompi/mpi/man/man3/MPI_Ialltoallv.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/MPI_Alltoallv.3 diff --git a/ompi/mpi/man/man3/MPI_Ialltoallw.3in b/ompi/mpi/man/man3/MPI_Ialltoallw.3in deleted file mode 100644 index 0cca872ba48..00000000000 --- a/ompi/mpi/man/man3/MPI_Ialltoallw.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/MPI_Alltoallw.3 diff --git a/ompi/mpi/man/man3/MPI_Ibarrier.3in b/ompi/mpi/man/man3/MPI_Ibarrier.3in deleted file mode 100644 index 17e1bd261a8..00000000000 --- a/ompi/mpi/man/man3/MPI_Ibarrier.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/MPI_Barrier.3 diff --git a/ompi/mpi/man/man3/MPI_Ibcast.3in b/ompi/mpi/man/man3/MPI_Ibcast.3in deleted file mode 100644 index c0a86bebc97..00000000000 --- a/ompi/mpi/man/man3/MPI_Ibcast.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/MPI_Bcast.3 diff --git a/ompi/mpi/man/man3/MPI_Ibsend.3in b/ompi/mpi/man/man3/MPI_Ibsend.3in deleted file mode 100644 index e97d434c813..00000000000 --- a/ompi/mpi/man/man3/MPI_Ibsend.3in +++ /dev/null @@ -1,90 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2013 Los Alamos National Security, LLC. All rights reserved. -.\" Copyright (c) 2010-2015 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Ibsend 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Ibsend\fP \- Starts a nonblocking buffered send. - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -int MPI_Ibsend(const void *\fIbuf\fP, int\fI count\fP, MPI_Datatype\fI datatype\fP, - int\fI dest\fP, int\fI tag\fP, MPI_Comm\fI comm\fP, MPI_Request\fI *request\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_IBSEND(\fIBUF, COUNT, DATATYPE, DEST, TAG, COMM, REQUEST, IERROR\fP) - \fIBUF\fP(*) - INTEGER \fICOUNT, DATATYPE, DEST, TAG, COMM, REQUEST, IERROR\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Ibsend(\fIbuf\fP, \fIcount\fP, \fIdatatype\fP, \fIdest\fP, \fItag\fP, \fIcomm\fP, \fIrequest\fP, \fIierror\fP) - TYPE(*), DIMENSION(..), INTENT(IN), ASYNCHRONOUS :: \fIbuf\fP - INTEGER, INTENT(IN) :: \fIcount\fP, \fIdest\fP, \fItag\fP - TYPE(MPI_Datatype), INTENT(IN) :: \fIdatatype\fP - TYPE(MPI_Comm), INTENT(IN) :: \fIcomm\fP - TYPE(MPI_Request), INTENT(OUT) :: \fIrequest\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETERS -.ft R -.TP 1i -buf -Initial address of send buffer (choice). -.TP 1i -count -Number of elements in send buffer (integer). -.TP 1i -datatype -Data type of each send buffer element (handle). -.TP 1i -dest -Rank of destination (integer). -.TP 1i -tag -Message tag (integer). -.TP 1i -comm -Communicator (handle). - -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -request -Communication request (handle). -.ft R -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -MPI_Ibsend posts a buffered-mode, nonblocking send. Nonblocking calls allocate a communication request object and associate it with the request handle (the argument request). The request can be used later to query the status of the communication or wait for its completion. -.sp -A nonblocking send call indicates that the system may start copying data out of the send buffer. The sender should not modify any part of the send buffer after a nonblocking send operation is called, until the send completes. - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for I/O function errors. The error handler may be changed with MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN may be used to cause error values to be returned. Note that MPI does not guarantee that an MPI program can continue past an error. - -.SH SEE ALSO -MPI_Test -MPI_Wait -.br - - diff --git a/ompi/mpi/man/man3/MPI_Iexscan.3in b/ompi/mpi/man/man3/MPI_Iexscan.3in deleted file mode 100644 index c2ff4cf3254..00000000000 --- a/ompi/mpi/man/man3/MPI_Iexscan.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/MPI_Exscan.3 diff --git a/ompi/mpi/man/man3/MPI_Igather.3in b/ompi/mpi/man/man3/MPI_Igather.3in deleted file mode 100644 index d15bc2d25cf..00000000000 --- a/ompi/mpi/man/man3/MPI_Igather.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/MPI_Gather.3 diff --git a/ompi/mpi/man/man3/MPI_Igatherv.3in b/ompi/mpi/man/man3/MPI_Igatherv.3in deleted file mode 100644 index 3202cdbbd85..00000000000 --- a/ompi/mpi/man/man3/MPI_Igatherv.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/MPI_Gatherv.3 diff --git a/ompi/mpi/man/man3/MPI_Improbe.3in b/ompi/mpi/man/man3/MPI_Improbe.3in deleted file mode 100644 index ffa67e3e8cc..00000000000 --- a/ompi/mpi/man/man3/MPI_Improbe.3in +++ /dev/null @@ -1,114 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright (c) 2012 Oracle and/or its affiliates. All rights reserved. -.\" Copyright 2012 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Improbe 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Improbe\fP \- Non-blocking matched probe for a message. - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -int MPI_Improbe(int \fIsource\fP, int\fI tag\fP, MPI_Comm\fI comm\fP, - int\fI *flag\fP, MPI_Message\fI *message\fP, MPI_Status\fI *status\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_IMPROBE(\fISOURCE, TAG, COMM, FLAG, MESSAGE, STATUS, IERROR\fP) - LOGICAL \fIFLAG\fP - INTEGER \fISOURCE, TAG, COMM, MESSAGE\fP - INTEGER \fISTATUS(MPI_STATUS_SIZE), IERROR\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Improbe(\fIsource\fP, \fItag\fP, \fIcomm\fP, \fIflag\fP, \fImessage\fP, \fIstatus\fP, \fIierror\fP) - INTEGER, INTENT(IN) :: \fIsource\fP, \fItag\fP - TYPE(MPI_Comm), INTENT(IN) :: \fIcomm\fP - INTEGER, INTENT(OUT) :: \fIflag\fP - TYPE(MPI_Message), INTENT(OUT) :: \fImessage\fP - TYPE(MPI_Status) :: \fIstatus\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETERS -.ft R -.TP 1i -source -Source rank or MPI_ANY_SOURCE (integer). -.TP 1i -tag -Tag value or MPI_ANY_TAG (integer). -.TP 1i -comm -Communicator (handle). - -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -flag -Flag (logical). -.ft R -.TP 1i -message -Message (handle). -.ft R -.TP 1i -status -Status object (status). -.ft R -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -Like MPI_Probe and MPI_Iprobe, the MPI_Mprobe and MPI_Improbe operations -allow incoming messages to be queried without actually receiving -them, except that MPI_Mprobe and MPI_Improbe provide a mechanism to -receive the specific message that was matched regardless of other -intervening probe or receive operations. This gives the application -an opportunity to decide how to receive the message, based on the -information returned by the probe. In particular, the application may -allocate memory for the receive buffer according to the length of the -probed message. -.sp -A matching probe with MPI_PROC_NULL as \fIsource\fP returns \fIflag\fP -= true, \fImessage\fP = MPI_MESSAGE_NO_PROC, and the \fIstatus\fP object -returns source = MPI_PROC_NULL, tag = MPI_ANY_TAG, and count = 0. -.sp -MPI_Iprobe returns a true value in \fIflag\fP if a message has been -matched and can be received by passing the \fImessage\fP handle to the -MPI_Mrecv or MPI_Imrecv functions, provided the \fIsource\fP was not -MPI_PROC_NULL. - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value -of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for -I/O function errors. The error handler may be changed with -MPI_Comm_set_errhandler; the predefined error handler -MPI_ERRORS_RETURN may be used to cause error values to be -returned. Note that MPI does not guarantee that an MPI program can -continue past an error. - -.SH SEE ALSO -.ft R -.nf -MPI_Mprobe -MPI_Probe -MPI_Iprobe -MPI_Mrecv -MPI_Imrecv -MPI_Cancel diff --git a/ompi/mpi/man/man3/MPI_Imrecv.3in b/ompi/mpi/man/man3/MPI_Imrecv.3in deleted file mode 100644 index 72d0cb3a4b5..00000000000 --- a/ompi/mpi/man/man3/MPI_Imrecv.3in +++ /dev/null @@ -1,113 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2012 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Imrecv 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Imrecv\fP \- Non-blocking receive for a matched message - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -int MPI_Imrecv(void \fI*buf\fP, int\fI count\fP, MPI_Datatype\fI type\fP, - MPI_Message\fI *message\fP, MPI_Request\fI *request\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_IMRECV(\fIBUF, COUNT, DATATYPE, MESSAGE, REQUEST, IERROR\fP) - \fIBUF(*)\fP - INTEGER \fICOUNT, DATATYPE, MESSAGE, REQUEST, IERROR\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Imrecv(\fIbuf\fP, \fIcount\fP, \fIdatatype\fP, \fImessage\fP, \fIrequest\fP, \fIierror\fP) - TYPE(*), DIMENSION(..), ASYNCHRONOUS :: \fIbuf\fP - INTEGER, INTENT(IN) :: \fIcount\fP - TYPE(MPI_Datatype), INTENT(IN) :: \fIdatatype\fP - TYPE(MPI_Message), INTENT(INOUT) :: \fImessage\fP - TYPE(MPI_Request), INTENT(OUT) :: \fIrequest\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETERS -.ft R -.TP 1i -count -Number of elements to receive (nonnegative integer). -.TP 1i -datatype -Datatype of each send buffer element (handle). -.TP 1i -message -Message (handle). - -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -buf -Initial address of receive buffer (choice). -.TP 1i -request -Request (handle). -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -The functions MPI_Mrecv and MPI_Imrecv receive messages that have been -previously matched by a matching probe. -.sp -The \fIrequest\fP returned from MPI_Imrecv can be used with any of the -MPI_Test and MPI_Wait variants, like any non-blocking receive request. -.sp -If MPI_Imrecv is called with MPI_MESSAGE_NULL as the message argument, -a call to one of the MPI_Test or MPI_Wait variants will return -immediately with the \fIstatus\fP object set to \fIsource\fP = -MPI_PROC_NULL, \fItag\fP = MPI_ANY_TAG, and \fIcount\fP = 0, as if a -receive from MPI_PROC_NULL was issued. -.sp -If reception of a matched message is started with MPI_Imrecv, then it -is possible to cancel the returned request with MPI_Cancel. If -MPI_Cancel succeeds, the matched message must be found by a subsequent -message probe (MPI_Probe, MPI_Iprobe, MPI_Mprobe, or MPI_Improbe), -received by a subsequent receive operation or canceled by the -sender. -.sp -Note, however, that is it possible for the cancellation of operations -initiated with MPI_Imrecv to fail. An example of a failing case is -when canceling the matched message receive would violate MPI message -ordering rules (e.g., if another message matching the same message -signature has matched -- and possible received -- before this -MPI_Imrecv is canceled). - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value -of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for -I/O function errors. The error handler may be changed with -MPI_Comm_set_errhandler; the predefined error handler -MPI_ERRORS_RETURN may be used to cause error values to be -returned. Note that MPI does not guarantee that an MPI program can -continue past an error. - -.SH SEE ALSO -.ft R -.nf -MPI_Mprobe -MPI_Improbe -MPI_Probe -MPI_Iprobe -MPI_Imrecv -MPI_Cancel diff --git a/ompi/mpi/man/man3/MPI_Ineighbor_allgather.3in b/ompi/mpi/man/man3/MPI_Ineighbor_allgather.3in deleted file mode 100644 index f0569265c29..00000000000 --- a/ompi/mpi/man/man3/MPI_Ineighbor_allgather.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/MPI_Neighbor_allgather.3 diff --git a/ompi/mpi/man/man3/MPI_Ineighbor_allgatherv.3in b/ompi/mpi/man/man3/MPI_Ineighbor_allgatherv.3in deleted file mode 100644 index b8ce05e6462..00000000000 --- a/ompi/mpi/man/man3/MPI_Ineighbor_allgatherv.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/MPI_Neighbor_allgatherv.3 diff --git a/ompi/mpi/man/man3/MPI_Ineighbor_alltoall.3in b/ompi/mpi/man/man3/MPI_Ineighbor_alltoall.3in deleted file mode 100644 index 56f630ecd06..00000000000 --- a/ompi/mpi/man/man3/MPI_Ineighbor_alltoall.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/MPI_Neighbor_alltoall.3 diff --git a/ompi/mpi/man/man3/MPI_Ineighbor_alltoallv.3in b/ompi/mpi/man/man3/MPI_Ineighbor_alltoallv.3in deleted file mode 100644 index 13b4e89a23c..00000000000 --- a/ompi/mpi/man/man3/MPI_Ineighbor_alltoallv.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/MPI_Neighbor_alltoallv.3 diff --git a/ompi/mpi/man/man3/MPI_Ineighbor_alltoallw.3in b/ompi/mpi/man/man3/MPI_Ineighbor_alltoallw.3in deleted file mode 100644 index 299138b3a37..00000000000 --- a/ompi/mpi/man/man3/MPI_Ineighbor_alltoallw.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/MPI_Neighbor_alltoallw.3 diff --git a/ompi/mpi/man/man3/MPI_Info_c2f.3in b/ompi/mpi/man/man3/MPI_Info_c2f.3in deleted file mode 100644 index a13fce697dd..00000000000 --- a/ompi/mpi/man/man3/MPI_Info_c2f.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/MPI_Comm_f2c.3 diff --git a/ompi/mpi/man/man3/MPI_Info_create.3in b/ompi/mpi/man/man3/MPI_Info_create.3in deleted file mode 100644 index ddf5f60e4db..00000000000 --- a/ompi/mpi/man/man3/MPI_Info_create.3in +++ /dev/null @@ -1,67 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Info_create 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Info_create\fP \- Creates a new info object. - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -int MPI_Info_create(MPI_Info \fI*info\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_INFO_CREATE(\fIINFO, IERROR\fP) - INTEGER \fIINFO, IERROR\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Info_create(\fIinfo\fP, \fIierror\fP) - TYPE(MPI_Info), INTENT(OUT) :: \fIinfo\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -info -Info object created (handle). -.ft R -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -MPI_Info_create creates a new info object. The newly created object contains no key/value pairs. - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for I/O function errors. The error handler may be changed with MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN may be used to cause error values to be returned. Note that MPI does not guarantee that an MPI program can continue past an error. - -.SH SEE ALSO -.ft r -MPI_Info_delete -.br -MPI_Info_dup -.br -MPI_Info_free -.br -MPI_Info_get -.br -MPI_Info_set -.br - diff --git a/ompi/mpi/man/man3/MPI_Info_delete.3in b/ompi/mpi/man/man3/MPI_Info_delete.3in deleted file mode 100644 index dd8ecab5d03..00000000000 --- a/ompi/mpi/man/man3/MPI_Info_delete.3in +++ /dev/null @@ -1,78 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2013 Los Alamos National Security, LLC. All rights reserved. -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Info_delete 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Info_delete\fP \- Deletes a key/value pair from \fIinfo\fP. - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -int MPI_Info_delete(MPI_Info \fIinfo\fP, const char \fI*key\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_INFO_DELETE(\fIINFO, KEY, IERROR\fP) - INTEGER \fIINFO, IERROR\fP - CHARACTER*(*) \fIKEY\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Info_delete(\fIinfo\fP, \fIkey\fP, \fIierror\fP) - TYPE(MPI_Info), INTENT(IN) :: \fIinfo\fP - CHARACTER(LEN=*), INTENT(IN) :: \fIkey\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT/OUTPUT PARAMETER -.ft R -.TP 1i -info -Info object (handle). - -.SH INPUT PARAMETER -.ft R -.TP 1i -key -Key (string). - -.SH OUTPUT PARAMETER -.ft R -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -MPI_Info_delete deletes a (key,value) pair from \fIinfo\fP. If \fIkey\fP is not defined in \fIinfo\fP, the call raises an error of class MPI_ERR_INFO_NOKEY. - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for I/O function errors. The error handler may be changed with MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN may be used to cause error values to be returned. Note that MPI does not guarantee that an MPI program can continue past an error. - -.SH SEE ALSO -.ft r -MPI_Info_create -.br -MPI_Info_dup -.br -MPI_Info_free -.br -MPI_Info_get -.br -MPI_Info_set -.br - diff --git a/ompi/mpi/man/man3/MPI_Info_dup.3in b/ompi/mpi/man/man3/MPI_Info_dup.3in deleted file mode 100644 index 145b5831dc7..00000000000 --- a/ompi/mpi/man/man3/MPI_Info_dup.3in +++ /dev/null @@ -1,74 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Info_dup 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Info_dup\fP \- Duplicates an info object. - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -int MPI_Info_dup(MPI_Info \fIinfo\fP, MPI_Info \fI*newinfo\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_INFO_DUP(\fIINFO, NEWINFO, IERROR\fP) - INTEGER \fIINFO, NEWINFO, IERROR\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Info_dup(\fIinfo\fP, \fInewinfo\fP, \fIierror\fP) - TYPE(MPI_Info), INTENT(IN) :: \fIinfo\fP - TYPE(MPI_Info), INTENT(OUT) :: \fInewinfo\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETER -.ft R -.TP 1i -info -Info object (handle). - -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -newinfo -Info object (handle). -.ft R -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -MPI_Info_dup duplicates an existing info object, creating a new object, with the same (key,value) pairs and the same ordering of keys. - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for I/O function errors. The error handler may be changed with MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN may be used to cause error values to be returned. Note that MPI does not guarantee that an MPI program can continue past an error. - -.SH SEE ALSO -.ft r -MPI_Info_create -.br -MPI_Info_delete -.br -MPI_Info_free -.br -MPI_Info_get -.br -MPI_Info_set -.br - diff --git a/ompi/mpi/man/man3/MPI_Info_env.3in b/ompi/mpi/man/man3/MPI_Info_env.3in deleted file mode 100644 index c7ef970fa7d..00000000000 --- a/ompi/mpi/man/man3/MPI_Info_env.3in +++ /dev/null @@ -1,66 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2012 Los Alamos National Security, LLC. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" $COPYRIGHT$ -.TH MPI_INFO_ENV 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_INFO_ENV\fP \- Static MPI_Info object containing info about the application - -.SH DESCRIPTION -.ft R -The MPI-3 standard established a static MPI_Info object named \fIMPI_INFO_ENV\fP that can be used to access information about how the application was executed from the run-time. - -.SH SUPPORTED FIELDS -.ft R -.TP 1i -command -If available, the value will be set to argv[0]. Note that the value may not always be available - e.g., it is valid for a program to call MPI_Init with NULL parameters, in which case argv[0] will not be set if run as a singleton. This value will never be set in a Fortran program as the argv are not available. -.TP 1i -argv -The argv given for the application. If no arguments are passed to the application, then this value will not be set. It will also not be set in the case of a singleton that calls MPI_Init with NULL parameters, or a Fortran program. -.TP 1i -maxprocs -The number of processes in the job. -.TP 1i -soft -Open MPI does not support the \fIsoft\fP option for specifying the number of processes to be executed, so this value is set to the same as \fImaxprocs\fP. -.TP 1i -host -The name of the host this process is executing upon - the value returned from \fIgethostname()\fP. -.TP 1i -arch -The architecture of the host this process is executing upon. This value indicates the underlying chip architecture (e.g., x86_64), if it can be determined. -.TP 1i -wdir -The working directory at the time of process launch by mpiexec. Note that this value will not be set for processes launched as singletons as there is no reliable way for the MPI library to determine the location. -.TP 1i -file -Although specified by the MPI-3 standard, no value is currently set for this field. -.TP 1i -thread_level -The requested MPI thread level - note that this may differ from the \fIactual\fP MPI thread level of the application. -.TP 1i -ompi_num_apps -The number of application contexts in an MPMD job. -This is an Open MPI-specific field and value. -.TP 1i -ompi_np -The number of processes in each application context, provided as a space-delimited list of integers. -This is an Open MPI-specific field and value. -.TP 1i -ompi_first_rank -The MPI rank of the first process in each application context, provided as a space-delimited list of integers -This is an Open MPI-specific field and value. -.TP 1i -ompi_positioned_file_dir -If Open MPI was asked to pre-position files, this field provides the top-level directory where those files were place. -This is an Open MPI-specific field and value. - -.SH ERRORS -When calling MPI_INFO_GET(3), the \fIflag\fP parameter will be set to zero (false) if a value for the field has not been set. -.br - -.SH SEE ALSO -.ft r -MPI_Info_get diff --git a/ompi/mpi/man/man3/MPI_Info_f2c.3in b/ompi/mpi/man/man3/MPI_Info_f2c.3in deleted file mode 100644 index a13fce697dd..00000000000 --- a/ompi/mpi/man/man3/MPI_Info_f2c.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/MPI_Comm_f2c.3 diff --git a/ompi/mpi/man/man3/MPI_Info_free.3in b/ompi/mpi/man/man3/MPI_Info_free.3in deleted file mode 100644 index 138e9bb429c..00000000000 --- a/ompi/mpi/man/man3/MPI_Info_free.3in +++ /dev/null @@ -1,69 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Info_free 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Info_free\fP \- Frees an info object. - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -int MPI_Info_free(MPI_Info \fI*info\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_INFO_FREE(\fIINFO, IERROR\fP) - INTEGER \fIINFO, IERROR\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Info_free(\fIinfo\fP, \fIierror\fP) - TYPE(MPI_Info), INTENT(INOUT) :: \fIinfo\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT/OUTPUT PARAMETER -.ft R -.TP 1i -info -Info object (handle). - -.SH OUTPUT PARAMETER -.ft R -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -MPI_Info_free frees \fIinfo\fP and sets it to MPI_INFO_NULL. - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for I/O function errors. The error handler may be changed with MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN may be used to cause error values to be returned. Note that MPI does not guarantee that an MPI program can continue past an error. - -.SH SEE ALSO -.ft r -MPI_Info_create -.br -MPI_Info_delete -.br -MPI_Info_dup -.br -MPI_Info_get -.br -MPI_Info_set -.br - diff --git a/ompi/mpi/man/man3/MPI_Info_get.3in b/ompi/mpi/man/man3/MPI_Info_get.3in deleted file mode 100644 index 22df455ec9b..00000000000 --- a/ompi/mpi/man/man3/MPI_Info_get.3in +++ /dev/null @@ -1,100 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2013 Los Alamos National Security, LLC. All rights reserved. -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Info_get 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Info_get\fP \- Retrieves the value associated with a key in an info object. - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -int MPI_Info_get(MPI_Info \fIinfo\fP, const char \fI*key\fP, int \fIvaluelen\fP, char \fI*value\fP, int *\fIflag\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_INFO_GET(\fIINFO, KEY, VALUELEN, VALUE, FLAG, IERROR\fP) - INTEGER \fIINFO, VALUELEN, IERROR\fP - CHARACTER*(*) \fIKEY, VALUE\fP - LOGICAL \fIFLAG\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Info_get(\fIinfo\fP, \fIkey\fP, \fIvaluelen\fP, \fIvalue\fP, \fIflag\fP, \fIierror\fP) - TYPE(MPI_Info), INTENT(IN) :: \fIinfo\fP - CHARACTER(LEN=*), INTENT(IN) :: \fIkey\fP - INTEGER, INTENT(IN) :: \fIvaluelen\fP - CHARACTER(LEN=valuelen), INTENT(OUT) :: \fIvalue\fP - LOGICAL, INTENT(OUT) :: \fIflag\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETERS -.ft R -.TP 1i -info -Info object (handle). -.ft R -.TP 1i -key -Key (string). -.ft R -.TP 1i -valuelen -Length of value arg (integer). - -.SH OUTPUT PARAMETER -.ft R -.TP 1i -value -Value (string). -.ft R -.TP 1i -flag -Returns true if key defined, false if not (boolean). -.ft R -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -MPI_Info_get retrieves the value associated with \fIkey\fP in a previous call to MPI_Info_set. If such a key exists, it sets \fIflag\fP to true and returns the value in \fIvalue\fP; otherwise it sets \fIflag\fP to false and leaves \fIvalue\fP unchanged. \fIvaluelen\fP is the number of characters available in value. If it is less than the actual size of the value, the returned value is truncated. In C, \fIvaluelen\fP should be one less than the amount of allocated space to allow for the null terminator. -.sp -If \fIkey\fP is larger than MPI_MAX_INFO_KEY, the call is erroneous. - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for I/O function errors. The error handler may be changed with MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN may be used to cause error values to be returned. Note that MPI does not guarantee that an MPI program can continue past an error. - -.SH SEE ALSO -.ft r -MPI_Info_create -.br -MPI_Info_delete -.br -MPI_Info_dup -.br -MPI_Info_free -.br -MPI_Info_get_valuelen -.br -MPI_Info_get_nkeys -.br -MPI_Info_get_nthkey -.br -MPI_Info_set -.br - diff --git a/ompi/mpi/man/man3/MPI_Info_get_nkeys.3in b/ompi/mpi/man/man3/MPI_Info_get_nkeys.3in deleted file mode 100644 index dc953933631..00000000000 --- a/ompi/mpi/man/man3/MPI_Info_get_nkeys.3in +++ /dev/null @@ -1,70 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Info_get_nkeys 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Info_get_nkeys\fP \- Gets the number of keys currently defined in an info object. - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -int MPI_Info_get_nkeys(MPI_Info \fIinfo\fP, int \fI*nkeys\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_INFO_GET_NKEYS(\fIINFO, NKEYS, IERROR\fP) - INTEGER \fIINFO, NKEYS, IERROR\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Info_get_nkeys(\fIinfo\fP, \fInkeys\fP, \fIierror\fP) - TYPE(MPI_Info), INTENT(IN) :: \fIinfo\fP - INTEGER, INTENT(OUT) :: \fInkeys\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETER -.ft R -.TP 1i -info -Info object (handle). - -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -nkeys -Number of defined keys (integer). -.ft R -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -MPI_Info_get_nkeys returns the number of currently defined keys in \fIinfo\fP. - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for I/O function errors. The error handler may be changed with MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN may be used to cause error values to be returned. Note that MPI does not guarantee that an MPI program can continue past an error. - -.SH SEE ALSO -.ft r -MPI_Info_get -.br -MPI_Info_get_nthkey -.br -MPI_Info_get_valuelen -.br - diff --git a/ompi/mpi/man/man3/MPI_Info_get_nthkey.3in b/ompi/mpi/man/man3/MPI_Info_get_nthkey.3in deleted file mode 100644 index 6995a3ea610..00000000000 --- a/ompi/mpi/man/man3/MPI_Info_get_nthkey.3in +++ /dev/null @@ -1,76 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Info_get_nthkey 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Info_get_nthkey\fP \- Returns the \fIn\fPth defined key in \fIinfo\fP. - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -int MPI_Info_get_nthkey(MPI_Info \fIinfo\fP, int \fIn\fP, char \fI*key\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_INFO_GET_NTHKEY(\fIINFO, N, KEY, IERROR\fP) - INTEGER \fIINFO, N, IERROR\fP - CHARACTER*(*) \fIKEY\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Info_get_nthkey(\fIinfo\fP, \fIn\fP, \fIkey\fP, \fIierror\fP) - TYPE(MPI_Info), INTENT(IN) :: \fIinfo\fP - INTEGER, INTENT(IN) :: \fIn\fP - CHARACTER(LEN=*), INTENT(OUT) :: \fIkey\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETERS -.ft R -.TP 1i -info -Info object (handle). -.ft R -.TP 1i -n -Key number (integer). - -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -key -Key (string). -.ft R -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -MPI_Info_get_nthkey returns the \fIn\fPth defined key in \fIinfo\fP. Keys are numbered 0\...\fIN\fP - 1 where \fIN\fP is the value returned by MPI_Info_get_nkeys. All keys between 0 and \fIN\fP - 1 are guaranteed to be defined. The number of a given key does not change as long as \fIinfo\fP is not modified with MPI_Info_set or MPI_Info_delete. - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for I/O function errors. The error handler may be changed with MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN may be used to cause error values to be returned. Note that MPI does not guarantee that an MPI program can continue past an error. - -.SH SEE ALSO -.ft r -MPI_Info_get -.br -MPI_Info_get_nkeys -.br -MPI_Info_get_valuelen -.br - diff --git a/ompi/mpi/man/man3/MPI_Info_get_string.3in b/ompi/mpi/man/man3/MPI_Info_get_string.3in deleted file mode 100644 index 4d848efb418..00000000000 --- a/ompi/mpi/man/man3/MPI_Info_get_string.3in +++ /dev/null @@ -1,102 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2013 Los Alamos National Security, LLC. All rights reserved. -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Info_get 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Info_get_string\fP \- Retrieves the value associated with a key in an info object. - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -int MPI_Info_get_string(MPI_Info \fIinfo\fP, const char \fI*key\fP, int *\fIbuflen\fP, char \fI*value\fP, int *\fIflag\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_INFO_GET_STRING(\fIINFO, KEY, BUFLEN, VALUE, FLAG, IERROR\fP) - INTEGER \fIINFO, BUFLEN, IERROR\fP - CHARACTER*(*) \fIKEY, VALUE\fP - LOGICAL \fIFLAG\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Info_get_string(\fIinfo\fP, \fIkey\fP, \fIbuflen\fP, \fIvalue\fP, \fIflag\fP, \fIierror\fP) - TYPE(MPI_Info), INTENT(IN) :: \fIinfo\fP - CHARACTER(LEN=*), INTENT(IN) :: \fIkey\fP - INTEGER, INTENT(INOUT) :: \fIbuflen\fP - CHARACTER(LEN=valuelen), INTENT(OUT) :: \fIvalue\fP - LOGICAL, INTENT(OUT) :: \fIflag\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETERS -.ft R -.TP 1i -info -Info object (handle). -.ft R -.TP 1i -key -Key (string). - -.SH OUTPUT PARAMETER -.ft R -.ft 1i -buflen -On entry, length of value arg. On return, set to required size to hold value string (integer). -.ft R -.TP 1i -value -Value (string). -.ft R -.TP 1i -flag -Returns true if key defined, false if not (boolean). -.ft R -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -MPI_Info_get_string retrieves the value associated with \fIkey\fP from \fIinfo\fP, if any. If such a key exists in info, it sets \fIflag\fP to true and returns the value in \fIvalue\fP, otherwise it sets -flag to false and leaves value unchanged. \fIbuflen\fP on input is the size of the provided buffer, for the output of buflen it is the size of the buffer needed to store the value string. -If the buflen passed into the function is less than the actual size needed to store the value string (including null terminator in C), the value is truncated. On return, -the value of \fIbuflen\fP will be set to the required buffer size to hold the value string. If buflen is set to 0, value is not changed. In C, \fIbuflen\fP includes the required space for the -null terminator. In C, this function returns a null terminated string in all cases where the \fIbuflen\fP input value is greater than 0. - -If \fIkey\fP is larger than MPI_MAX_INFO_KEY, the call is erroneous. - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for I/O function errors. The error handler may be changed with MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN may be used to cause error values to be returned. Note that MPI does not guarantee that an MPI program can continue past an error. - -.SH SEE ALSO -.ft r -MPI_Info_create -.br -MPI_Info_delete -.br -MPI_Info_dup -.br -MPI_Info_free -.br -MPI_Info_get_nkeys -.br -MPI_Info_get_nthkey -.br -MPI_Info_set -.br - diff --git a/ompi/mpi/man/man3/MPI_Info_get_valuelen.3in b/ompi/mpi/man/man3/MPI_Info_get_valuelen.3in deleted file mode 100644 index d1e91840e60..00000000000 --- a/ompi/mpi/man/man3/MPI_Info_get_valuelen.3in +++ /dev/null @@ -1,86 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2013 Los Alamos National Security, LLC. All rights reserved. -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Info_get_valuelen 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Info_get_valuelen\fP \- Retrieves the length of the key value associated with an info object. - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -int MPI_Info_get_valuelen(MPI_Info \fIinfo\fP, const char \fI*key\fP, - int \fI*valuelen\fP, int \fI*flag\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_INFO_GET_VALUELEN(\fIINFO, KEY, VALUELEN, FLAG, IERROR\fP) - INTEGER \fIINFO, VALUELEN, IERROR\fP - LOGICAL \fIFLAG\fP - CHARACTER*(*) \fIKEY\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Info_get_valuelen(\fIinfo\fP, \fIkey\fP, \fIvaluelen\fP, \fIflag\fP, \fIierror\fP) - TYPE(MPI_Info), INTENT(IN) :: \fIinfo\fP - CHARACTER(LEN=*), INTENT(IN) :: \fIkey\fP - INTEGER, INTENT(OUT) :: \fIvaluelen\fP - LOGICAL, INTENT(OUT) :: \fIflag\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETERS -.ft R -.TP 1i -info -Info object (handle). -.ft R -.TP 1i -key -Key (string). - -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -valuelen -Length of value arg (integer). -.ft R -.TP 1i -flag -Returns true if key defined, false if not (boolean). -.ft R -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -MPI_Info_get_valuelen retrieves the length of the \fIvalue\fP associated with \fIkey\fP. If \fIkey\fP is defined, \fIvaluelen\fP is set to the length of its associated value and \fIflag\fP is set to true. If \fIkey\fP is not defined, \fIvaluelen\fP is not touched and \fIflag\fP is set to false. The length returned in C does not include the end-of-string character. -.sp -If \fIkey\fP is larger than MPI_MAX_INFO_KEY, the call is erroneous. - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for I/O function errors. The error handler may be changed with MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN may be used to cause error values to be returned. Note that MPI does not guarantee that an MPI program can continue past an error. - -.SH SEE ALSO -.ft r -MPI_Info_get -.br -MPI_Info_get_nkeys -.br -MPI_Info_get_nthkey -.br - diff --git a/ompi/mpi/man/man3/MPI_Info_set.3in b/ompi/mpi/man/man3/MPI_Info_set.3in deleted file mode 100644 index c13bdb8e0bb..00000000000 --- a/ompi/mpi/man/man3/MPI_Info_set.3in +++ /dev/null @@ -1,81 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Info_set 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Info_set\fP \- Adds a key/value pair to \fIinfo\fP. - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -int MPI_Info_set(MPI_Info \fIinfo\fP, char \fI*key\fP, char \fI*value\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_INFO_SET(\fIINFO, KEY, VALUE, IERROR\fP) - INTEGER \fIINFO, IERROR\fP - CHARACTER*(*) \fIKEY, VALUE\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Info_set(\fIinfo\fP, \fIkey\fP, \fIvalue\fP, \fIierror\fP) - TYPE(MPI_Info), INTENT(IN) :: \fIinfo\fP - CHARACTER(LEN=*), INTENT(IN) :: \fIkey\fP, \fIvalue\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT/OUTPUT PARAMETER -.ft R -.TP 1i -info -Info object (handle). - -.SH INPUT PARAMETERS -.ft R -.TP 1i -key -Key (string). -.ft R -.TP 1i -value -Value (string). - -.SH OUTPUT PARAMETER -.ft R -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -MPI_Info_set adds the (key,value) pair to \fIinfo\fP and overrides the value if a value for the same key was previously set. The \fIkey\fP and \fIvalue\fP parameters are null-terminated strings in C. In Fortran, leading and trailing spaces in \fIkey\fP and \fIvalue\fP are stripped. If either \fIkey\fP or \fIvalue\fP is larger than the allowed maximums, the error MPI_ERR_INFO_KEY or MPI_ERR_INFO_VALUE is raised, respectively. - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for I/O function errors. The error handler may be changed with MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN may be used to cause error values to be returned. Note that MPI does not guarantee that an MPI program can continue past an error. - -.SH SEE ALSO -.ft r -MPI_Info_create -.br -MPI_Info_delete -.br -MPI_Info_dup -.br -MPI_Info_free -.br -MPI_Info_set -.br - diff --git a/ompi/mpi/man/man3/MPI_Init.3in b/ompi/mpi/man/man3/MPI_Init.3in deleted file mode 100644 index c1790c9e38a..00000000000 --- a/ompi/mpi/man/man3/MPI_Init.3in +++ /dev/null @@ -1,104 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright (c) 2010-2015 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Init 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" - -.SH NAME -\fBMPI_Init\fP \- Initializes the MPI execution environment - -.SH SYNTAX -.ft R - -.SH C Syntax -.nf -#include -int MPI_Init(int *\fIargc\fP, char ***\fIargv\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_INIT(\fIIERROR\fP) - INTEGER \fIIERROR\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Init(\fIierror\fP) - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETERS -.ft R -.TP 1i -argc -C only: Pointer to the number of arguments. -.TP 1i -argv -C only: Argument vector. - -.SH OUTPUT PARAMETER -.ft R -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -This routine, or MPI_Init_thread, must be called before most other MPI -routines are called. There are a small number of errors, such as -MPI_Initialized and MPI_Finalized. MPI can be initialized at most -once; subsequent calls to MPI_Init or MPI_Init_thread are erroneous. -.sp -All MPI programs must contain a call to MPI_Init or -MPI_Init_thread. Open MPI accepts the C \fIargc\fP and \fIargv\fP -arguments to main, but neither modifies, interprets, nor distributes -them: -.sp -.nf - { - /* declare variables */ - MPI_Init(&argc, &argv); - /* parse arguments */ - /* main program */ - MPI_Finalize(); - } -.fi - -.SH NOTES -.ft R -The Fortran version does not have provisions for \fIargc\fP and -\fIargv\fP and takes only IERROR. -.sp -The MPI Standard does not say what a program can do before an MPI_Init -or after an MPI_Finalize. In the Open MPI implementation, it should do -as little as possible. In particular, avoid anything that changes the -external state of the program, such as opening files, reading standard -input, or writing to standard output. - -.SH ERRORS -.ft R -Almost all MPI routines return an error value; C routines as -the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for -I/O function errors. The error handler may be changed with -MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN -may be used to cause error values to be returned. Note that MPI does not -guarantee that an MPI program can continue past an error. -.sp -See the MPI man page for a full list of MPI error codes. - -.SH SEE ALSO -.ft R -.nf -MPI_Init_thread -MPI_Initialized -MPI_Finalize -MPI_Finalized diff --git a/ompi/mpi/man/man3/MPI_Init_thread.3in b/ompi/mpi/man/man3/MPI_Init_thread.3in deleted file mode 100644 index 800e1cb381c..00000000000 --- a/ompi/mpi/man/man3/MPI_Init_thread.3in +++ /dev/null @@ -1,172 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2010-2020 Cisco Systems, Inc. All rights reserved. -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Init_thread 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -. -.SH NAME -\fBMPI_Init_thread\fP \- Initializes the MPI execution environment -. -.SH SYNTAX -.ft R -. -.SH C Syntax -.nf -#include -int MPI_Init_thread(int *\fIargc\fP, char ***\fIargv\fP, - int \fIrequired\fP, int *\fIprovided\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_INIT_THREAD(\fIREQUIRED, PROVIDED, IERROR\fP) - INTEGER \fIREQUIRED, PROVIDED, IERROR\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Init_thread(\fIrequired\fP, \fIprovided\fP, \fIierror\fP) - INTEGER, INTENT(IN) :: \fIrequired\fP - INTEGER, INTENT(OUT) :: \fIprovided\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETERS -.ft R -.TP 1i -argc -C only: Pointer to the number of arguments. -.TP 1i -argv -C only: Argument vector. -.TP 1i -required -Desired level of thread support (integer). -. -. -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -provided -Available level of thread support (integer). -.TP 1i -IERROR -Fortran only: Error status (integer). -. -. -.SH DESCRIPTION -.ft R -This routine, or MPI_Init, must be called before most other MPI -routines are called. There are a small number of exceptions, such as -MPI_Initialized and MPI_Finalized. MPI can be initialized at most -once; subsequent calls to MPI_Init or MPI_Init_thread are erroneous. -.sp -MPI_Init_thread, as compared to MPI_Init, has a provision to request a -certain level of thread support in \fIrequired\fP: -.TP 2.4i -MPI_THREAD_SINGLE -Only one thread will execute. -.TP 2.4i -MPI_THREAD_FUNNELED -If the process is multithreaded, only the thread that called -MPI_Init_thread will make MPI calls. -.TP 2.4i -MPI_THREAD_SERIALIZED -If the process is multithreaded, only one thread will make MPI library -calls at one time. -.TP 2.4i -MPI_THREAD_MULTIPLE -If the process is multithreaded, multiple threads may call MPI at once -with no restrictions. -. -.PP -The level of thread support available to the program is set in -\fIprovided\fP. In Open MPI, the value is dependent on how the library was -configured and built. Note that there is no guarantee that -\fIprovided\fP will be greater than or equal to \fIrequired\fP. -.sp -Also note that calling MPI_Init_thread with a -.I required -value of -.I MPI_THREAD_SINGLE -is equivalent to calling MPI_Init. -.sp -All MPI programs must contain a call to MPI_Init or -MPI_Init_thread. Open MPI accepts the C \fIargc\fP and \fIargv\fP -arguments to main, but neither modifies, interprets, nor distributes -them: -.sp -.nf - { - /* declare variables */ - MPI_Init_thread(&argc, &argv, req, &prov); - /* parse arguments */ - /* main program */ - MPI_Finalize(); - } -.fi -. -.SH NOTES -.ft R -The Fortran version does not have provisions for \fIargc\fP and -\fIargv\fP and takes only IERROR. -.sp -It is the caller's responsibility to check the value of \fIprovided\fP, -as it may be less than what was requested in \fIrequired\fP. -.sp -The MPI Standard does not say what a program can do before an -MPI_Init_thread or after an MPI_Finalize. In the Open MPI -implementation, it should do as little as possible. In particular, -avoid anything that changes the external state of the program, such as -opening files, reading standard input, or writing to standard output. -. -. -.SH MPI_THREAD_MULTIPLE Support -. -MPI_THREAD_MULTIPLE support is included if the environment in which -Open MPI was built supports threading. You can check the -output of -.BR ompi_info (1) -to see if Open MPI has MPI_THREAD_MULTIPLE support: -. -.PP -.nf -shell$ ompi_info | grep "Thread support" - Thread support: posix (MPI_THREAD_MULTIPLE: yes, OPAL support: yes, OMPI progress: no, Event lib: yes) -shell$ -.fi -. -.PP -The "MPI_THREAD_MULTIPLE: yes" portion of the above output indicates -that Open MPI was compiled with MPI_THREAD_MULTIPLE support. -. -.PP -Note that there is a small performance penalty for using -MPI_THREAD_MULTIPLE support; latencies for short messages will be -higher as compared to when using MPI_THREAD_SINGLE, for example. -. -. -.SH ERRORS -.ft R -Almost all MPI routines return an error value; C routines as -the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for -I/O function errors. The error handler may be changed with -MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN -may be used to cause error values to be returned. Note that MPI does not -guarantee that an MPI program can continue past an error. -. -.SH SEE ALSO -.ft R -.nf -MPI_Init -MPI_Initialized -MPI_Finalize -MPI_Finalized diff --git a/ompi/mpi/man/man3/MPI_Initialized.3in b/ompi/mpi/man/man3/MPI_Initialized.3in deleted file mode 100644 index ce3764ea89f..00000000000 --- a/ompi/mpi/man/man3/MPI_Initialized.3in +++ /dev/null @@ -1,65 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright (c) 2010-2015 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Initialized 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Initialized\fP \- Checks whether MPI has been initialized - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -int MPI_Initialized(int *\fIflag\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_INITIALIZED(\fIFLAG, IERROR\fP) - LOGICAL \fIFLAG\fP - INTEGER \fIIERROR\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Initialized(\fIflag\fP, \fIierror\fP) - LOGICAL, INTENT(OUT) :: \fIflag\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -flag -True if MPI has been initialized, and false otherwise (logical). -.ft R -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -This routine may be used to determine whether MPI has been -initialized. It is one of a small number of routines that may be -called before MPI is initialized and after MPI has been finalized -(MPI_Finalized is another). - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for I/O function errors. The error handler may be changed with MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN may be used to cause error values to be returned. Note that MPI does not guarantee that an MPI program can continue past an error. - -.SH SEE ALSO -.ft R -.nf -MPI_Init -MPI_Init_thread -MPI_Finalize -MPI_Finalized diff --git a/ompi/mpi/man/man3/MPI_Intercomm_create.3in b/ompi/mpi/man/man3/MPI_Intercomm_create.3in deleted file mode 100644 index e2b56b53e85..00000000000 --- a/ompi/mpi/man/man3/MPI_Intercomm_create.3in +++ /dev/null @@ -1,112 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Intercomm_create 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Intercomm_create\fP \- Creates an intercommunicator from two intracommunicators. - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -int MPI_Intercomm_create(MPI_Comm \fIlocal_comm\fP, int\fI local_leader\fP, - MPI_Comm\fI peer_comm\fP, int\fI remote_leader\fP, int\fI tag\fP, MPI_Comm\fI *newintercomm\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_INTERCOMM_CREATE(\fILOCAL_COMM, LOCAL_LEADER, PEER_COMM, - REMOTE_LEADER, TAG, NEWINTERCOMM, IERROR\fP) - INTEGER \fILOCAL_COMM, LOCAL_LEADER, PEER_COMM, REMOTE_LEADER\fP - INTEGER \fITAG, NEWINTERCOMM, IERROR\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Intercomm_create(\fIlocal_comm\fP, \fIlocal_leader\fP, \fIpeer_comm\fP, \fIremote_leader\fP, - \fItag\fP, \fInewintercomm\fP, \fIierror\fP) - TYPE(MPI_Comm), INTENT(IN) :: \fIlocal_comm\fP, \fIpeer_comm\fP - INTEGER, INTENT(IN) :: \fIlocal_leader\fP, \fIremote_leader\fP, \fItag\fP - TYPE(MPI_Comm), INTENT(OUT) :: \fInewintercomm\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETERS -.ft R -.TP 1i -local_comm -The communicator containing the process that initiates the inter-communication (handle). -.TP 1i -local_leader -Rank of local group leader in local_comm (integer). -.TP 1i -peer_comm -"Peer" communicator; significant only at the local_leader (handle). -.TP 1i -remote_leader -Rank of remote group leader in peer_comm; significant only at the local_leader (integer). -.TP 1i -tag -Message tag used to identify new intercommunicator (integer). - -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -newintercomm -Created intercommunicator (handle). -.ft R -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -This call creates an intercommunicator. It is collective over the union of the local and remote groups. Processes should provide identical local_comm and local_leader arguments within each group. Wildcards are not permitted for remote_leader, local_leader, and tag. -.sp -This call uses point-to-point communication with communicator peer_comm, -and with tag tag between the leaders. Thus, care must be taken that there be no pending communication on peer_comm that could interfere with this communication. - -If multiple MPI_Intercomm_creates are being made, they should use different tags (more precisely, they should ensure that the local and remote leaders are using different tags for each MPI_intercomm_create). - -.SH NOTES -We recommend using a dedicated peer communicator, such as a duplicate of MPI_COMM_WORLD, to avoid trouble with peer communicators. -.sp -The MPI 1.1 Standard contains two mutually exclusive comments on the -input intracommunicators. One says that their respective groups must be -disjoint; the other that the leaders can be the same process. After -some discussion by the MPI Forum, it has been decided that the groups must -be disjoint. Note that the -.B reason -given for this in the standard is -.B not -the reason for this choice; rather, the -.B other -operations on -intercommunicators (like -.I MPI_Intercomm_merge -) do not make sense if the -groups are not disjoint. - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for I/O function errors. The error handler may be changed with MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN may be used to cause error values to be returned. Note that MPI does not guarantee that an MPI program can continue past an error. - -.SH SEE ALSO -MPI_Intercomm_merge -.br -MPI_Comm_free -.br -MPI_Comm_remote_group -.br -MPI_Comm_remote_size - - diff --git a/ompi/mpi/man/man3/MPI_Intercomm_create_from_groups.3.md b/ompi/mpi/man/man3/MPI_Intercomm_create_from_groups.3.md deleted file mode 100644 index 6cd96541653..00000000000 --- a/ompi/mpi/man/man3/MPI_Intercomm_create_from_groups.3.md +++ /dev/null @@ -1,92 +0,0 @@ -# Name - -`MPI_Intercomm_create_from_groups` - Creates a new inter-communicator from a local and remote group and stringtag - -# Syntax - -## C Syntax - -```c -#include - -int MPI_Intercomm_create_from_groups(MPI_Group local_group, int local_leader, MPI_Group remote_group, int remote_leader, const char *stringtag, MPI_Info info, MPI_Errhandler errhandler, MPI_Comm *newintercomm) -``` - -## Fortran Syntax - -```fortran -USE MPI -! or the older form: INCLUDE 'mpif.h' - -MPI_INTERCOMM_CREATE_FROM_GROUPS(LOCAL_GROUP, LOCAL_LEADER, REMOTE_GROUP, REMOTE_LEADER, STRINGTAG, INFO, ERRHANDLER, NEWINTERCOMM, IERROR) - INTEGER LOCAL_GROUP, LOCAL_LEADER, REMOTE_GROUP, REMOTE_LEADER, INFO, ERRHANDLER, NEWINTERCOMM, IERROR - CHARACTER*(*) STRINGTAG -``` - -## Fortran 2008 Syntax - -```fortran -USE mpi_f08 - -MPI_Intercomm_create_from_groups(local_group, local_leader, remote_group, remote_leader, stringtag, info, errhandler, newintercomm, ierror) - TYPE(MPI_Group), INTENT(IN) :: local_group, remote_group - INTEGER, INTENT(IN) :: local_leader, remote_leader - CHARACTER(LEN=*), INTENT(IN) :: stringtag - TYPE(MPI_Info), INTENT(IN) :: info - TYPE(MPI_Errhandler), INTENT(IN) :: errhandler - TYPE(MPI_Comm), INTENT(OUT) :: newintercomm - INTEGER, OPTIONAL, INTENT(OUT) :: ierror -``` - -# Input Parameters - -* `local_group` : Local group (handler) -* `local_leader` : rank of local group leader in local_group (integer) -* `remote_group` : Remote group (handler) -* `remote_leader` : rank of remote leader in remote_group, significant only at local_leader (integer) -* `stringtag` : Unique identifier for this operation (string) -* `info` : info object (handler) -* `errhandler` : error handler to be attached to the new inter-communicator (handle) - -# Output Parameters - -* `newintercomm` : New inter-communicator (handle). -* `IERROR` : Fortran only: Error status (integer). - -# Description - -`MPI_Intercomm_create_from_groups` creates an inter-communicator. Unlike `MPI_Intercomm_create`, this function -uses as input previously defined, disjoint local and remote groups. The calling MPI -process must be a member of the local group. The call is collective over the union of -the local and remote groups. All involved MPI processes shall provide an identical value -for the `stringtag` argument. Within each group, all MPI processes shall provide identical -`local_group`, `local_leader` arguments. Wildcards are not permitted for the -`remote_leader` or `local_leader` arguments. The `stringtag` argument serves the same purpose -as the `stringtag` used in the `MPI_Comm_create_from_group` function; it differentiates -concurrent calls in a multithreaded environment. The `stringtag` shall not exceed -`MPI_MAX_STRINGTAG_LEN` characters in length. For C, this includes space for a null terminating -character. In the event that MPI_GROUP_EMPTY is supplied as the `local_group` or `remote_group1 or both, then the -call is a local operation and MPI_COMM_NULL is returned as the newintercomm`. - -# Notes - -The `errhandler` argument specifies an error handler to be attached to the new inter-communicator. -The `info` argument provides hints and assertions, possibly MPI implementation dependent, which -indicate desired characteristics and guide communicator creation. MPI_MAX_STRINGTAG_LEN shall have a value -of at least 63. - - -# Errors - -Almost all MPI routines return an error value; C routines as the value -of the function and Fortran routines in the last argument. -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for -I/O function errors. The error handler may be changed with -`MPI_Comm_set_errhandler`; the predefined error handler `MPI_ERRORS_RETURN` -may be used to cause error values to be returned. Note that MPI does not -guarantee that an MPI program can continue past an error. - -# See Also - -[`MPI_Comm_create_from_group`(3)](MPI_Comm_create_from_group.html) diff --git a/ompi/mpi/man/man3/MPI_Intercomm_merge.3in b/ompi/mpi/man/man3/MPI_Intercomm_merge.3in deleted file mode 100644 index bb2b7ed9b34..00000000000 --- a/ompi/mpi/man/man3/MPI_Intercomm_merge.3in +++ /dev/null @@ -1,75 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright (c) 2010-2015 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Intercomm_merge 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Intercomm_merge\fP \- Creates an intracommunicator from an intercommunicator. - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -int MPI_Intercomm_merge(MPI_Comm \fIintercomm\fP, int\fI high\fP, - MPI_Comm\fI *newintracomm\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_INTERCOMM_MERGE(\fIINTERCOMM, HIGH, NEWINTRACOMM, IERROR\fP) - INTEGER \fIINTERCOMM, NEWINTRACOMM, IERROR\fP - LOGICAL \fIHIGH\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Intercomm_merge(\fIintercomm\fP, \fIhigh\fP, \fInewintracomm\fP, \fIierror\fP) - TYPE(MPI_Comm), INTENT(IN) :: \fIintercomm\fP - LOGICAL, INTENT(IN) :: \fIhigh\fP - TYPE(MPI_Comm), INTENT(OUT) :: \fInewintracomm\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETERS -.ft R -.TP 1i -intercomm -Intercommunicator (type indicator). -.TP 1i -high -Used to order the groups of the two intracommunicators within comm when creating the new communicator (type indicator). - -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -newintracomm -Created intracommunicator (type indicator). -.ft R -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -This function creates an intracommunicator from the union of the two groups that are associated with intercomm. All processes should provide the same high value within each of the two groups. If processes in one group provide the value high = false and processes in the other group provide the value high = true, then the union orders the "low" group before the "high" group. If all processes provide the same high argument, then the order of the union is arbitrary. This call is blocking and collective within the union of the two groups. - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for I/O function errors. The error handler may be changed with MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN may be used to cause error values to be returned. Note that MPI does not guarantee that an MPI program can continue past an error. - -.SH SEE ALSO -MPI_Intercomm_create -.br -MPI_Comm_free - - - - diff --git a/ompi/mpi/man/man3/MPI_Iprobe.3in b/ompi/mpi/man/man3/MPI_Iprobe.3in deleted file mode 100644 index 856fe5a684d..00000000000 --- a/ompi/mpi/man/man3/MPI_Iprobe.3in +++ /dev/null @@ -1,96 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Iprobe 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Iprobe\fP \- Nonblocking test for a message. - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -int MPI_Iprobe(int \fIsource\fP, int\fI tag\fP, MPI_Comm\fI comm\fP, int\fI *flag\fP, - MPI_Status\fI *status\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_IPROBE(\fISOURCE, TAG, COMM, FLAG, STATUS, IERROR\fP) - LOGICAL \fIFLAG\fP - INTEGER \fISOURCE, TAG, COMM, STATUS(MPI_STATUS_SIZE), IERROR\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Iprobe(\fIsource\fP, \fItag\fP, \fIcomm\fP, \fIflag\fP, \fIstatus\fP, \fIierror\fP) - INTEGER, INTENT(IN) :: \fIsource\fP, \fItag\fP - TYPE(MPI_Comm), INTENT(IN) :: \fIcomm\fP - LOGICAL, INTENT(OUT) :: \fIflag\fP - TYPE(MPI_Status) :: \fIstatus\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETERS -.ft R -.TP 1i -source -Source rank or MPI_ANY_SOURCE (integer). -.TP 1i -tag -Tag value or MPI_ANY_TAG (integer). -.TP 1i -comm -Communicator (handle). - -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -flag -Message-waiting flag (logical). -.TP 1i -status -Status object (status). -.ft R -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -The MPI_Probe and MPI_Iprobe operations allow checking of incoming messages without actual receipt of them. The user can then decide how to receive them, based on the information returned by the probe (basically, the information returned by status). In particular, the user may allocate memory for the receive buffer, according to the length of the probed message. -.sp -MPI_Iprobe(source, tag, comm, flag, status) returns flag = true if there is a message that can be received and that matches the pattern specified by the arguments source, tag, and comm. The call matches the same message that would have been received by a call to MPI_Recv(\&..., source, tag, comm, status) executed at the same point in the program, and returns in status the same value that would have been returned by MPI_Recv(). Otherwise, the call returns flag = false, and leaves status undefined. -.sp -If MPI_Iprobe returns flag = true, then the content of the status object can be subsequently accessed as described in Section 3.2.5 of the MPI-1 Standard, "Return Status," to find the source, tag, and length of the probed message. -.sp -A subsequent receive executed with the same context, and the source and tag returned in status by MPI_Iprobe will receive the message that was matched by the probe if no other intervening receive occurs after the probe. If the receiving process is multithreaded, it is the user's responsibility to ensure that the last condition holds. -.sp -The source argument of MPI_Probe can be MPI_ANY_SOURCE, and the tag argument can be MPI_ANY_TAG, so that one can probe for messages from an arbitrary source and/or with an arbitrary tag. However, a specific communication context must be provided with the comm argument. -.sp -If your application does not need to examine the \fIstatus\fP field, you can save resources by using the predefined constant MPI_STATUS_IGNORE as a special value for the \fIstatus\fP argument. -.sp -It is not necessary to receive a message immediately after it has been probed for, and the same message may be probed for several times before it is received. -.sp -.SH NOTE -Users of libmpi-mt should remember that two threads may do an MPI_Iprobe that actually returns true for the same message for both threads. - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for I/O function errors. The error handler may be changed with MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN may be used to cause error values to be returned. Note that MPI does not guarantee that an MPI program can continue past an error. - -.SH SEE ALSO -.ft R -.sp -MPI_Probe -.br -MPI_Cancel - diff --git a/ompi/mpi/man/man3/MPI_Irecv.3in b/ompi/mpi/man/man3/MPI_Irecv.3in deleted file mode 100644 index a4da7c51a2e..00000000000 --- a/ompi/mpi/man/man3/MPI_Irecv.3in +++ /dev/null @@ -1,95 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright (c) 2010-2015 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Irecv 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Irecv\fP \- Starts a standard-mode, nonblocking receive. - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -int MPI_Irecv(void *\fIbuf\fP, int\fI count\fP, MPI_Datatype\fI datatype\fP, - int\fI source\fP, int\fI tag\fP, MPI_Comm\fI comm\fP, MPI_Request\fI *request\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_IRECV(\fIBUF, COUNT, DATATYPE, SOURCE, TAG, COMM, REQUEST, - IERROR\fP) - \fIBUF\fP(*) - INTEGER \fICOUNT, DATATYPE, SOURCE, TAG, COMM, REQUEST, IERROR\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Irecv(\fIbuf\fP, \fIcount\fP, \fIdatatype\fP, \fIsource\fP, \fItag\fP, \fIcomm\fP, \fIrequest\fP, \fIierror\fP) - TYPE(*), DIMENSION(..), ASYNCHRONOUS :: \fIbuf\fP - INTEGER, INTENT(IN) :: \fIcount\fP, \fIsource\fP, \fItag\fP - TYPE(MPI_Datatype), INTENT(IN) :: \fIdatatype\fP - TYPE(MPI_Comm), INTENT(IN) :: \fIcomm\fP - TYPE(MPI_Request), INTENT(OUT) :: \fIrequest\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETERS -.ft R -.TP 1i -buf -Initial address of receive buffer (choice). -.TP 1i -count -Number of elements in receive buffer (integer). -.TP 1i -datatype -Datatype of each receive buffer element (handle). -.TP 1i -source -Rank of source (integer). -.TP 1i -tag -Message tag (integer). -.TP 1i -comm -Communicator (handle). - -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -request -Communication request (handle). -.ft R -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -Nonblocking calls allocate a communication request object and associate it with the request handle (the argument request). The request can be used later to query the status of the communication or wait for its completion. -.sp -A nonblocking receive call indicates that the system may start writing data into the receive buffer. The receiver should not access any part of the receive buffer after a nonblocking receive operation is called, until the receive completes. -.sp -A receive request can be determined being completed by calling the MPI_Wait, MPI_Waitany, MPI_Test, or MPI_Testany with request returned by this function. - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for I/O function errors. The error handler may be changed with MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN may be used to cause error values to be returned. Note that MPI does not guarantee that an MPI program can continue past an error. - -.SH SEE ALSO -MPI_Recv -MPI_Probe -MPI_Test -MPI_Testany -MPI_Wait -MPI_Waitany -.br - diff --git a/ompi/mpi/man/man3/MPI_Ireduce.3in b/ompi/mpi/man/man3/MPI_Ireduce.3in deleted file mode 100644 index f8c65fb7223..00000000000 --- a/ompi/mpi/man/man3/MPI_Ireduce.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/MPI_Reduce.3 diff --git a/ompi/mpi/man/man3/MPI_Ireduce_scatter.3in b/ompi/mpi/man/man3/MPI_Ireduce_scatter.3in deleted file mode 100644 index 4f03aec6068..00000000000 --- a/ompi/mpi/man/man3/MPI_Ireduce_scatter.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/MPI_Reduce_scatter.3 diff --git a/ompi/mpi/man/man3/MPI_Ireduce_scatter_block.3in b/ompi/mpi/man/man3/MPI_Ireduce_scatter_block.3in deleted file mode 100644 index f649a6c443d..00000000000 --- a/ompi/mpi/man/man3/MPI_Ireduce_scatter_block.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/MPI_Reduce_scatter_block.3 diff --git a/ompi/mpi/man/man3/MPI_Irsend.3in b/ompi/mpi/man/man3/MPI_Irsend.3in deleted file mode 100644 index a7d9e6cc21c..00000000000 --- a/ompi/mpi/man/man3/MPI_Irsend.3in +++ /dev/null @@ -1,86 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2013 Los Alamos National Security, LLC. All rights reserved. -.\" Copyright (c) 2010-2015 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Irsend 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Irsend\fP \- Starts a ready-mode nonblocking send. - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -int MPI_Irsend(const void *\fIbuf\fP, int\fI count\fP, MPI_Datatype\fI datatype\fP, int\fI dest\fP, - int\fI tag\fP, MPI_Comm\fI comm\fP, MPI_Request\fI *request\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_IRSEND(\fIBUF, COUNT, DATATYPE, DEST, TAG, COMM, REQUEST, IERROR\fP) - \fIBUF\fP(*) - INTEGER \fICOUNT, DATATYPE, DEST, TAG, COMM, REQUEST, IERROR\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Irsend(\fIbuf\fP, \fIcount\fP, \fIdatatype\fP, \fIdest\fP, \fItag\fP, \fIcomm\fP, \fIrequest\fP, \fIierror\fP) - TYPE(*), DIMENSION(..), INTENT(IN), ASYNCHRONOUS :: \fIbuf\fP - INTEGER, INTENT(IN) :: \fIcount\fP, \fIdest\fP, \fItag\fP - TYPE(MPI_Datatype), INTENT(IN) :: \fIdatatype\fP - TYPE(MPI_Comm), INTENT(IN) :: \fIcomm\fP - TYPE(MPI_Request), INTENT(OUT) :: \fIrequest\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETERS -.ft R -.TP 1i -buf -Initial address of send buffer (choice). -.TP 1i -count -Number of elements in send buffer (integer). -.TP 1i -datatype -Datatype of each send buffer element (handle). -.TP 1i -dest -Rank of destination (integer). -.TP 1i -tag -Message tag (integer). -.TP 1i -comm -Communicator (handle). - -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -request -Communication request (handle). -.ft R -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -MPI_Irsend starts a ready-mode nonblocking send. Nonblocking calls allocate a communication request object and associate it with the request handle (the argument request). The request can be used later to query the status of the communication or to wait for its completion. -.sp -A nonblocking send call indicates that the system may start copying data out of the send buffer. The sender should not modify any part of the send buffer after a nonblocking send operation is called, until the send completes. - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for I/O function errors. The error handler may be changed with MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN may be used to cause error values to be returned. Note that MPI does not guarantee that an MPI program can continue past an error. - -.SH SEE ALSO -MPI_Rsend diff --git a/ompi/mpi/man/man3/MPI_Is_thread_main.3in b/ompi/mpi/man/man3/MPI_Is_thread_main.3in deleted file mode 100644 index 5a7c6218d70..00000000000 --- a/ompi/mpi/man/man3/MPI_Is_thread_main.3in +++ /dev/null @@ -1,71 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Is_thread_main 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" - -.SH NAME -\fBMPI_Is_thread_main\fP \- Determines if thread called MPI_Init - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -int MPI_Is_thread_main(int *\fIflag\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_IS_THREAD_MAIN(\fIFLAG, IERROR\fP) - LOGICAL \fIFLAG\fP - INTEGER \fIIERROR\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Is_thread_main(\fIflag\fP, \fIierror\fP) - LOGICAL, INTENT(OUT) :: \fIflag\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH OUTPUT PARAMETERS -.TP 1i -flag -True if calling thread is main thread (boolean). -.ft R -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -MPI_Is_thread_main is called by a thread to find out whether the -caller is the main thread (that is, the thread that called MPI_Init or -MPI_Init_thread). - -.SH ERRORS -.ft R -Almost all MPI routines return an error value; C routines as -the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for -I/O function errors. The error handler may be changed with -MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN -may be used to cause error values to be returned. Note that MPI does not -guarantee that an MPI program can continue past an error. -.sp -See the MPI man page for a full list of MPI error codes. - -.SH SEE ALSO -.ft R -.nf -MPI_Init -MPI_Init_thread - diff --git a/ompi/mpi/man/man3/MPI_Iscan.3in b/ompi/mpi/man/man3/MPI_Iscan.3in deleted file mode 100644 index 42cdcd65e92..00000000000 --- a/ompi/mpi/man/man3/MPI_Iscan.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/MPI_Scan.3 diff --git a/ompi/mpi/man/man3/MPI_Iscatter.3in b/ompi/mpi/man/man3/MPI_Iscatter.3in deleted file mode 100644 index 05572bc5ca1..00000000000 --- a/ompi/mpi/man/man3/MPI_Iscatter.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/MPI_Scatter.3 diff --git a/ompi/mpi/man/man3/MPI_Iscatterv.3in b/ompi/mpi/man/man3/MPI_Iscatterv.3in deleted file mode 100644 index 86a7f302ccf..00000000000 --- a/ompi/mpi/man/man3/MPI_Iscatterv.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/MPI_Scatterv.3 diff --git a/ompi/mpi/man/man3/MPI_Isend.3in b/ompi/mpi/man/man3/MPI_Isend.3in deleted file mode 100644 index c10f4292a7a..00000000000 --- a/ompi/mpi/man/man3/MPI_Isend.3in +++ /dev/null @@ -1,94 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2013 Los Alamos National Security, LLC. All rights reserved. -.\" Copyright (c) 2010-2015 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2007-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Isend 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Isend\fP \- Starts a standard-mode, nonblocking send. - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -int MPI_Isend(const void *\fIbuf\fP, int\fI count\fP, MPI_Datatype\fI datatype\fP, int\fI dest\fP, - int\fI tag\fP, MPI_Comm\fI comm\fP, MPI_Request\fI *request\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_ISEND(\fIBUF, COUNT, DATATYPE, DEST, TAG, COMM, REQUEST, IERROR\fP) - \fIBUF\fP(*) - INTEGER \fICOUNT, DATATYPE, DEST, TAG, COMM, REQUEST, IERROR\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Isend(\fIbuf\fP, \fIcount\fP, \fIdatatype\fP, \fIdest\fP, \fItag\fP, \fIcomm\fP, \fIrequest\fP, \fIierror\fP) - TYPE(*), DIMENSION(..), INTENT(IN), ASYNCHRONOUS :: \fIbuf\fP - INTEGER, INTENT(IN) :: \fIcount\fP, \fIdest\fP, \fItag\fP - TYPE(MPI_Datatype), INTENT(IN) :: \fIdatatype\fP - TYPE(MPI_Comm), INTENT(IN) :: \fIcomm\fP - TYPE(MPI_Request), INTENT(OUT) :: \fIrequest\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETERS -.ft R -.TP 1i -buf -Initial address of send buffer (choice). -.TP 1i -count -Number of elements in send buffer (integer). -.TP 1i -datatype -Datatype of each send buffer element (handle). -.TP 1i -dest -Rank of destination (integer). -.TP 1i -tag -Message tag (integer). -.TP 1i -comm -Communicator (handle). - -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -request -Communication request (handle). -.ft R -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -MPI_Isend starts a standard-mode, nonblocking send. Nonblocking calls allocate a communication request object and associate it with the request handle (the argument request). The request can be used later to query the status of the communication or wait for its completion. -.sp -A nonblocking send call indicates that the system may start copying data out of the send buffer. The sender should not modify any part of the send buffer after a nonblocking send operation is called, until the send completes. -.sp -A send request can be determined being completed by calling the MPI_Wait, MPI_Waitany, MPI_Test, or MPI_Testany with request returned by this function. - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for I/O function errors. The error handler may be changed with MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN may be used to cause error values to be returned. Note that MPI does not guarantee that an MPI program can continue past an error. - -.SH SEE ALSO -MPI_Send -MPI_Wait -MPI_Waitany -MPI_Test -MPI_Testany -.br - diff --git a/ompi/mpi/man/man3/MPI_Isendrecv.3in b/ompi/mpi/man/man3/MPI_Isendrecv.3in deleted file mode 100644 index 27188af6922..00000000000 --- a/ompi/mpi/man/man3/MPI_Isendrecv.3in +++ /dev/null @@ -1,118 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2013 Los Alamos National Security, LLC. All rights reserved. -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Isendrecv 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Isendrecv\fP \- Sends and receives a message. - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -int MPI_Isendrecv(const void *\fIsendbuf\fP, int\fI sendcount\fP, MPI_Datatype\fI sendtype\fP, - int\fI dest\fP, int\fI sendtag\fP, void\fI *recvbuf\fP, int\fI recvcount\fP, - MPI_Datatype\fI recvtype\fP, int\fI source\fP, int\fI recvtag\fP, - MPI_Comm\fI comm\fP, MPI_Request\fI *request\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_ISENDRECV(\fISENDBUF, SENDCOUNT, SENDTYPE, DEST, SENDTAG, - RECVBUF, RECVCOUNT, RECVTYPE, SOURCE, RECVTAG, COMM, - REQUEST, IERROR\fP) - \fISENDBUF(*), RECVBUF(*)\fP - INTEGER \fISENDCOUNT, SENDTYPE, DEST, SENDTAG\fP - INTEGER \fIRECVCOUNT, RECVTYPE, SOURCE, RECVTAG, COMM\fP - INTEGER \fIREQUEST, IERROR\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Isendrecv(\fIsendbuf\fP, \fIsendcount\fP, \fIsendtype\fP, \fIdest\fP, \fIsendtag\fP, \fIrecvbuf\fP, - \fIrecvcount\fP, \fIrecvtype\fP, \fIsource\fP, \fIrecvtag\fP, \fIcomm\fP, \fIrequest\fP, \fIierror\fP) - TYPE(*), DIMENSION(..), INTENT(IN) :: \fIsendbuf\fP - TYPE(*), DIMENSION(..) :: \fIrecvbuf\fP - INTEGER, INTENT(IN) :: \fIsendcount\fP, \fIdest\fP, \fIsendtag\fP, \fIrecvcount\fP, \fIsource,\fP - \fIrecvtag\fP - TYPE(MPI_Datatype), INTENT(IN) :: \fIsendtype\fP, \fIrecvtype\fP - TYPE(MPI_Comm), INTENT(IN) :: \fIcomm\fP - TYPE(MPI_Request) :: \fIrequest\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETERS -.ft R -.TP 1i -sendbuf -Initial address of send buffer (choice). -.TP 1i -sendcount -Number of elements to send (integer). -.TP 1i -sendtype -Type of elements in send buffer (handle). -.TP 1i -dest -Rank of destination (integer). -.TP 1i -sendtag -Send tag (integer). -.TP 1i -recvcount -Maximum number of elements to receive (integer). -.TP 1i -recvtype -Type of elements in receive buffer (handle). -.TP 1i -source -Rank of source (integer). -.TP 1i -recvtag -Receive tag (integer). -.TP 1i -comm -Communicator (handle). - -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -recvbuf -Initial address of receive buffer (choice). -.TP 1i -request -Communication request (handle). -.ft R -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -The non-blocking send-receive operations combine in one call the sending of a message to one destination and the receiving of another message, from another process. The two (source and destination) are possibly the same. This operation is useful for executing a shift operation across a chain of processes. The send-receive operation can be used in conjunction with the functions described in the "Process Topologies" chapter of the MPI Standard in order to perform shifts on various logical topologies. -.sp -A message sent by a send-receive operation can be received by a regular receive operation or probed by a probe operation; a send-receive operation can receive a message sent by a regular send operation. -.sp -MPI_Isendrecv executes a non-blocking send and receive operation. Both send and receive use the same communicator, but possibly different tags. The send buffer and receive buffers must be disjoint, and may have different lengths and datatypes. -.sp -A non-blocking send-receive request can be determined to be completed by calling the MPI_Wait, MPI_Waitany, MPI_Test, or MPI_Testany with the request returned by this function. - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for I/O function errors. The error handler may be changed with MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN may be used to cause error values to be returned. Note that MPI does not guarantee that an MPI program can continue past an error. - -.SH SEE ALSO -.ft R -.sp -MPI_Isendrecv_replace , MPI_Sendrecv, MPI_Sendrecv_replace - - diff --git a/ompi/mpi/man/man3/MPI_Isendrecv_replace.3in b/ompi/mpi/man/man3/MPI_Isendrecv_replace.3in deleted file mode 100644 index 727d8acbfd5..00000000000 --- a/ompi/mpi/man/man3/MPI_Isendrecv_replace.3in +++ /dev/null @@ -1,107 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Isendrecv_replace 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Isendrecv_replace\fP \- Sends and receives a message using a single buffer. - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -int MPI_Isendrecv_replace(void *\fIbuf\fP, int\fI count\fP, MPI_Datatype\fI datatype\fP, - int\fI dest\fP, int\fI sendtag\fP, int\fI source\fP, int\fI recvtag\fP, MPI_Comm\fI comm\fP, - MPI_Request\fI *request\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_ISENDRECV_REPLACE(\fIBUF, COUNT, DATATYPE, DEST, SENDTAG, SOURCE, - RECVTAG, COMM, REQUEST, IERROR\fP) - \fIBUF\fP(*) - INTEGER \fICOUNT, DATATYPE, DEST, SENDTAG\fP - INTEGER \fISOURCE, RECVTAG, COMM\fP - INTEGER \fIREQUEST, IERROR\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Isendrecv_replace(\fIbuf\fP, \fIcount\fP, \fIdatatype\fP, \fIdest\fP, \fIsendtag\fP, \fIsource\fP, \fIrecvtag\fP, - \fIcomm\fP, \fIrequest\fP, \fIierror\fP) - TYPE(*), DIMENSION(..) :: \fIbuf\fP - INTEGER, INTENT(IN) :: \fIcount\fP, \fIdest\fP, \fIsendtag\fP, \fIsource\fP, \fIrecvtag\fP - TYPE(MPI_Datatype), INTENT(IN) :: \fIdatatype\fP - TYPE(MPI_Comm), INTENT(IN) :: \fIcomm\fP - TYPE(MPI_Request) :: \fIrequest\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT/OUTPUT PARAMETER -.ft R -.TP 1i -buf -Initial address of send and receive buffer (choice). - -.SH INPUT PARAMETERS -.ft R -.TP 1i -count -Number of elements in send and receive buffer (integer). -.TP 1i -datatype -Type of elements to send and receive (handle). -.TP 1i -dest -Rank of destination (integer). -.TP 1i -sendtag -Send message tag (integer). -.TP 1i -source -Rank of source (integer). -.TP 1i -recvtag -Receive message tag (integer). -.TP 1i -comm -Communicator (handle). - -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -request -Communication request (handle). -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -The non-blocking send-receive operations combine in one call the sending of a message to one destination and the receiving of another message, from another process. The two (source and destination) are possibly the same. A send-receive operation is useful for executing a shift operation across a chain of processes. The send-receive operation can be used in conjunction with the functions described in the "Process Topologies" chapter of the MPI Standard in order to perform shifts on various logical topologies. Also, a send-receive operation is useful for implementing remote procedure calls. -.sp -A message sent by a send-receive operation can be received by a regular receive operation or probed by a probe operation; a send-receive operation can receive a message sent by a regular send operation. -.sp -MPI_Isendrecv_replace executes a non-blocking send and receive. The same buffer is used both for the send and for the receive, so that the message sent is replaced by the message received. -.sp -A non-blocking send-receive request can be determined to be completed by calling the MPI_Wait, MPI_Waitany, MPI_Test, or MPI_Testany with the request returned by this function. - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for I/O function errors. The error handler may be changed with MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN may be used to cause error values to be returned. Note that MPI does not guarantee that an MPI program can continue past an error. - -.SH SEE ALSO -.ft R -.sp -MPI_Isendrecv, MPI_Sendrecv, MPI_Sendrecv_replace - - - diff --git a/ompi/mpi/man/man3/MPI_Issend.3in b/ompi/mpi/man/man3/MPI_Issend.3in deleted file mode 100644 index 03d907aab0a..00000000000 --- a/ompi/mpi/man/man3/MPI_Issend.3in +++ /dev/null @@ -1,89 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2013 Los Alamos National Security, LLC. All rights reserved. -.\" Copyright (c) 2010-2015 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Issend 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Issend\fP \- Starts a nonblocking synchronous send. - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -int MPI_Issend(const void *\fIbuf\fP, int\fI count\fP, MPI_Datatype\fI datatype\fP, int\fI dest\fP, - int\fI tag\fP, MPI_Comm\fI comm\fP, MPI_Request\fI *request\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_ISSEND(\fIBUF, COUNT, DATATYPE, DEST, TAG, COMM, REQUEST, IERROR\fP) - \fIBUF\fP(*) - INTEGER \fICOUNT, DATATYPE, DEST, TAG, COMM, REQUEST, IERROR\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Issend(\fIbuf\fP, \fIcount\fP, \fIdatatype\fP, \fIdest\fP, \fItag\fP, \fIcomm\fP, \fIrequest\fP, \fIierror\fP) - TYPE(*), DIMENSION(..), INTENT(IN), ASYNCHRONOUS :: \fIbuf\fP - INTEGER, INTENT(IN) :: \fIcount\fP, \fIdest\fP, \fItag\fP - TYPE(MPI_Datatype), INTENT(IN) :: \fIdatatype\fP - TYPE(MPI_Comm), INTENT(IN) :: \fIcomm\fP - TYPE(MPI_Request), INTENT(OUT) :: \fIrequest\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETERS -.ft R -.TP 1i -buf -Initial address of send buffer (choice). -.TP 1i -count -Number of elements in send buffer (integer). -.TP 1i -datatype -Datatype of each send buffer element (handle). -.TP 1i -dest -Rank of destination (integer). -.TP 1i -tag -Message tag (integer). -.TP 1i -comm -Communicator (handle). - -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -request -Communication request (handle). -.ft R -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -Starts a synchronous mode, nonblocking send. -.sp -Nonblocking calls allocate a communication request object and associate it with the request handle (the argument request). The request can be used later to query the status of the communication or wait for its completion. -.sp -A nonblocking send call indicates that the system may start copying data out of the send buffer. The sender should not modify any part of the send buffer after a nonblocking send operation is called, until the send completes. - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for I/O function errors. The error handler may be changed with MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN may be used to cause error values to be returned. Note that MPI does not guarantee that an MPI program can continue past an error. - -.SH SEE ALSO -MPI_Ssend -.br diff --git a/ompi/mpi/man/man3/MPI_Keyval_create.3in b/ompi/mpi/man/man3/MPI_Keyval_create.3in deleted file mode 100644 index 4f49ef18b9b..00000000000 --- a/ompi/mpi/man/man3/MPI_Keyval_create.3in +++ /dev/null @@ -1,120 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Keyval_create 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Keyval_create\fP \- Generates a new attribute key -- use of this routine is deprecated. - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -int MPI_Keyval_create(MPI_Copy_function *\fIcopy_fn\fP, - MPI_Delete_function *\fIdelete_fn\fP, int *\fIkeyval\fP, void *\fIextra_state\fP) - -.fi -.SH Fortran Syntax -.nf -INCLUDE 'mpif.h' -MPI_KEYVAL_CREATE(\fICOPY_FN, DELETE_FN, KEYVAL, EXTRA_STATE, IERROR\fP) - EXTERNAL \fICOPY_FN, DELETE_FN\fP - INTEGER \fIKEYVAL, EXTRA_STATE, IERROR\fP - - -.fi -.SH INPUT PARAMETERS -.ft R -.TP 1i -copy_fn -Copy callback function for keyval. -.TP 1i -delete_fn -Delete callback function for keyval. -.TP 1i -extra_state -Extra state for callback functions. - -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -keyval -Key value for future access (integer). -.ft R -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -Note that use of this routine is \fIdeprecated\fP as of MPI-2. Please use MPI_Comm_create_keyval instead. -.sp -Generates a new attribute key. Keys are locally unique in a process and opaque to the user, though they are explicitly stored in integers. Once allocated, the key value can be used to associate attributes and access them on any locally defined communicator. -.sp -The copy_fn function is invoked when a communicator is duplicated by MPI_COMM_DUP. copy_fn should be of type MPI_Copy_function, which is defined as follows: -.sp -.nf - typedef int MPI_Copy_function(MPI_Comm oldcomm, int keyval, - void *extra_state, void *attribute_val_in, - void *attribute_val_out, int *flag) - -.fi -A Fortran declaration for such a function is as follows: -.sp -.nf - SUBROUTINE COPY_FUNCTION(OLDCOMM, KEYVAL, EXTRA_STATE, ATTRIBUTE_VAL_IN, - ATTRIBUTE_VAL_OUT, FLAG, IERR) - INTEGER OLDCOMM, KEYVAL, EXTRA_STATE, - ATTRIBUTE_VAL_IN, ATTRIBUTE_VAL_OUT, IERR - LOGICAL FLAG -.fi -.sp -The copy callback function is invoked for each key value in oldcomm in arbitrary order. Each call to the copy callback is made with a key value and its corresponding attribute. If it returns flag = 0, then the attribute is deleted in the duplicated communicator. Otherwise ( flag = 1), the new attribute value is set to the value returned in attribute_val_out. The function returns MPI_SUCCESS on success and an error code on failure (in which case MPI_Comm_dup will fail). -.sp -copy_fn may be specified as MPI_NULL_COPY_FN or MPI_DUP_FN from either C or -Fortran; MPI_NULL_COPY_FN is a function that does nothing other than return flag = 0, and MPI_SUCCESS. MPI_DUP_FN is a simple-minded copy function that sets flag = 1, returns the value of attribute_val_in in attribute_val_out, and returns MPI_SUCCESS. - -.SH NOTES -Key values are global (available for any and all communicators). -.sp -There are subtle differences between C and Fortran that require that the copy_fn be written in the same language that MPI_Keyval_create is called from. This should not be a problem for most users; only programmers using both Fortran and C in the same program need to be sure that they follow this rule. -.sp -Even though both formal arguments attribute_val_in -and attribute_val_out are of type void*, their usage differs. The C copy function is passed by MPI in attribute_val_in the value of the attribute, and in attribute_val_out the address of the attribute, so as to allow the function to return the (new) attribute value. The use of type void* for both is to avoid messy type casts. -.sp -A valid copy function is one that completely duplicates the information by making a full duplicate copy of the data structures implied by an attribute; another might just make another reference to that data structure, while using a reference-count mechanism. Other types of attributes might not copy at all (they might be specific to oldcomm only). -.sp -Analogous to copy_fn is a callback deletion function, defined as follows. The delete_fn function is invoked when a communicator is deleted by MPI_Comm_free or when a call is made explicitly to MPI_Attr_delete. delete_fn should be of type MPI_Delete_function, which is defined as follows: -.sp -.nf - typedef int MPI_Delete_function(MPI_Comm comm, int keyval, - void *attribute_val, void *extra_state); -.fi -.sp -A Fortran declaration for such a function is as follows: -.sp -.nf - SUBROUTINE DELETE_FUNCTION(COMM, KEYVAL,ATTRIBUTE_VAL, EXTRA_STATE, IERR) - INTEGER COMM, KEYVAL, ATTRIBUTE_VAL, EXTRA_STATE, IERR -.fi -.sp -This function is called by MPI_Comm_free, MPI_Attr_delete, and MPI_Attr_put to do whatever is needed to remove an attribute. The function returns MPI_SUCCESS on success and an error code on failure (in which case MPI_COMM_FREE will fail). -.sp -delete_fn may be specified as MPI_NULL_DELETE_FN from either C or FORTRAN; MPI_NULL_DELETE_FN is a function that does nothing, other than returning MPI_SUCCESS. -.sp -The special key value MPI_KEYVAL_INVALID is never returned by MPI_Keyval_create. Therefore, it can be used for static initialization of key values. - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for I/O function errors. The error handler may be changed with MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN may be used to cause error values to be returned. Note that MPI does not guarantee that an MPI program can continue past an error. - -.SH SEE ALSO -MPI_Keyval_free -.br -MPI_Comm_create_keyval -.br diff --git a/ompi/mpi/man/man3/MPI_Keyval_free.3in b/ompi/mpi/man/man3/MPI_Keyval_free.3in deleted file mode 100644 index bd26f8b802d..00000000000 --- a/ompi/mpi/man/man3/MPI_Keyval_free.3in +++ /dev/null @@ -1,63 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Keyval_free 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Keyval_free\fP \- Frees attribute key for communicator cache attribute -- use of this routine is deprecated. - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -int MPI_Keyval_free(int *\fIkeyval\fP) - -.fi -.SH Fortran Syntax -.nf -INCLUDE 'mpif.h' -MPI_KEYVAL_FREE(\fIKEYVAL, IERROR\fP) - INTEGER \fIKEYVAL, IERROR\fP - - -.fi -.SH INPUT PARAMETER -.ft R -.TP 1i -keyval -Frees the integer key value (integer). - -.SH OUTPUT PARAMETER -.ft R -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -Note that use of this routine is \fIdeprecated\fP as of MPI-2. Please use MPI_Comm_free_keyval instead. -.sp -Frees an extant attribute key. This function sets the value of keyval to MPI_KEYVAL_INVALID. Note that it is not erroneous to free an attribute key that is in use, because the actual free does not transpire until after all references (in other communicators on the process) to the key have been freed. These references need to be explicitly freed by the program, either via calls to MPI_Attr_delete that free one attribute instance, or by calls to MPI_Comm_free that free all attribute instances associated with the freed communicator. - -.SH NOTE -.ft R -Key values are global (they can be used with any and all communicators). - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for I/O function errors. The error handler may be changed with MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN may be used to cause error values to be returned. Note that MPI does not guarantee that an MPI program can continue past an error. - -.SH SEE ALSO -MPI_Keyval_create -.br -MPI_Comm_free_keyval -.br - - - - diff --git a/ompi/mpi/man/man3/MPI_Lookup_name.3in b/ompi/mpi/man/man3/MPI_Lookup_name.3in deleted file mode 100644 index 8a0367236f6..00000000000 --- a/ompi/mpi/man/man3/MPI_Lookup_name.3in +++ /dev/null @@ -1,133 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2013 Los Alamos National Security, LLC. All rights reserved. -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2007-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Lookup_name 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" - -.SH NAME -.nf -\fBMPI_Lookup_name\fP \- Finds port associated with a service name - -.fi -.SH SYNTAX -.ft R - -.SH C Syntax -.nf -#include -int MPI_Lookup_name(const char *\fIservice_name\fP, MPI_Info \fIinfo\fP, - char *\fIport_name\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_LOOKUP_NAME(\fISERVICE_NAME, INFO, PORT_NAME, IERROR\fP) - CHARACTER*(*) \fISERVICE_NAME, PORT_NAME\fP - INTEGER \fIINFO, IERROR\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Lookup_name(\fIservice_name\fP, \fIinfo\fP, \fIport_name\fP, \fIierror\fP) - CHARACTER(LEN=*), INTENT(IN) :: \fIservice_name\fP - TYPE(MPI_Info), INTENT(IN) :: \fIinfo\fP - CHARACTER(LEN=MPI_MAX_PORT_NAME), INTENT(OUT) :: \fIport_name\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETERS -.ft R -.TP 1.4i -service_name -A service name (string). -.TP 1.4i -info -Options to the name service functions (handle). - -.SH OUTPUT PARAMETERS -.ft R -.TP 1.4i -port_name -a port name (string). -.TP 1.4i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -This function retrieves a \fIport_name\fP published under -\fIservice_name\fP by a previous invocation of MPI_Publish_name. The -application must supply a \fIport_name\fP buffer large enough to hold -the largest possible port name (i.e., MPI_MAX_PORT_NAME bytes). - -.SH INFO ARGUMENTS -The following keys for \fIinfo\fP are recognized: -.sp -.sp -.nf -Key Type Description ---- ---- ----------- - -ompi_lookup_order char * Resolution order for name lookup. -.fi - -The \fIompi_lookup_order\fP info key can specify one of four valid -string values (see the NAME SCOPE section below for more information -on name scopes): - -.TP 4 -\fIlocal\fP: Only search the local scope for name resolution. -.TP 4 -\fIglobal\fP: Only search the global scope for name resolution. -.TP 4 -\fIlocal,global\fP: Search the local scope for name resolution. If -not found, try searching the global scope for name resolution. This -behavior is the default if the \fIompi_lookup_order\fP info key is not -specified. -.TP 4 -\fIglobal,local\fP: Search the global scope for name resolution. If -not found, try searching the local scope for name resolution. -.PP -If no info key is provided, the search will first check to see if a -global server has been specified and is available. If so, then the -search will default to global scope first, followed by local. Otherwise, -the search will default to local. - -.SH NAME SCOPE -Open MPI supports two name scopes: \fIglobal\fP and \fIlocal\fP. Local scope -values are placed in a data store located on the mpirun of the calling -process' job, while global scope values reside on a central server. Calls -to MPI_Unpublish_name must correctly specify the scope to be used in -finding the value to be removed. The function will return an error if -the specified service name is not found on the indicated location. -.sp -For a more detailed description of scoping rules, please see the MPI_Publish_name -man page. - -.SH ERRORS -.ft R -Almost all MPI routines return an error value; C routines as -the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for -I/O function errors. The error handler may be changed with -MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN -may be used to cause error values to be returned. Note that MPI does not -guarantee that an MPI program can continue past an error. -.sp -See the MPI man page for a full list of MPI error codes. - -.SH SEE ALSO -.ft R -.nf -MPI_Publish_name -MPI_Open_port - - diff --git a/ompi/mpi/man/man3/MPI_Message_c2f.3in b/ompi/mpi/man/man3/MPI_Message_c2f.3in deleted file mode 100644 index a13fce697dd..00000000000 --- a/ompi/mpi/man/man3/MPI_Message_c2f.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/MPI_Comm_f2c.3 diff --git a/ompi/mpi/man/man3/MPI_Message_f2c.3in b/ompi/mpi/man/man3/MPI_Message_f2c.3in deleted file mode 100644 index a13fce697dd..00000000000 --- a/ompi/mpi/man/man3/MPI_Message_f2c.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/MPI_Comm_f2c.3 diff --git a/ompi/mpi/man/man3/MPI_Mprobe.3in b/ompi/mpi/man/man3/MPI_Mprobe.3in deleted file mode 100644 index 32f94fb54e6..00000000000 --- a/ompi/mpi/man/man3/MPI_Mprobe.3in +++ /dev/null @@ -1,107 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright (c) 2012 Oracle and/or its affiliates. All rights reserved. -.\" Copyright 2012 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Mprobe 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Mprobe\fP \- Blocking matched probe for a message. - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -int MPI_Mprobe(int \fIsource\fP, int\fI tag\fP, MPI_Comm\fI comm\fP, - MPI_Message\fI *message\fP, MPI_Status\fI *status\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_MPROBE(\fISOURCE, TAG, COMM, MESSAGE, STATUS, IERROR\fP) - INTEGER \fISOURCE, TAG, COMM, MESSAGE\fP - INTEGER \fISTATUS(MPI_STATUS_SIZE), IERROR\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Mprobe(\fIsource\fP, \fItag\fP, \fIcomm\fP, \fImessage\fP, \fIstatus\fP, \fIierror\fP) - INTEGER, INTENT(IN) :: \fIsource\fP, \fItag\fP - TYPE(MPI_Comm), INTENT(IN) :: \fIcomm\fP - TYPE(MPI_Message), INTENT(OUT) :: \fImessage\fP - TYPE(MPI_Status) :: \fIstatus\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETERS -.ft R -.TP 1i -source -Source rank or MPI_ANY_SOURCE (integer). -.TP 1i -tag -Tag value or MPI_ANY_TAG (integer). -.TP 1i -comm -Communicator (handle). - -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -message -Message (handle). -.ft R -.TP 1i -status -Status object (status). -.ft R -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -Like MPI_Probe and MPI_Iprobe, the MPI_Mprobe and MPI_Improbe operations -allow incoming messages to be queried without actually receiving -them, except that MPI_Mprobe and MPI_Improbe provide a mechanism to -receive the specific message that was matched regardless of other -intervening probe or receive operations. This gives the application -an opportunity to decide how to receive the message, based on the -information returned by the probe. In particular, the application may -allocate memory for the receive buffer according to the length of the -probed message. -.sp -A matching probe with MPI_PROC_NULL as \fIsource\fP returns -\fImessage\fP = MPI_MESSAGE_NO_PROC, and the \fIstatus\fP object returns -source = MPI_PROC_NULL, tag = MPI_ANY_TAG, and count = 0. -.sp -When MPI_Mprobe returns (from a non-MPI_PROC_NULL \fIsource\fP), the -matched message can then be received by passing the \fImessage\fP -handle to the MPI_Mrecv or MPI_Imrecv functions. - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value -of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for -I/O function errors. The error handler may be changed with -MPI_Comm_set_errhandler; the predefined error handler -MPI_ERRORS_RETURN may be used to cause error values to be -returned. Note that MPI does not guarantee that an MPI program can -continue past an error. - -.SH SEE ALSO -.ft R -.nf -MPI_Improbe -MPI_Probe -MPI_Iprobe -MPI_Mrecv -MPI_Imrecv -MPI_Cancel diff --git a/ompi/mpi/man/man3/MPI_Mrecv.3in b/ompi/mpi/man/man3/MPI_Mrecv.3in deleted file mode 100644 index f8ef9660727..00000000000 --- a/ompi/mpi/man/man3/MPI_Mrecv.3in +++ /dev/null @@ -1,96 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2012 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Mrecv 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Mrecv\fP \- Blocking receive for a matched message - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -int MPI_Mrecv(void \fI*buf\fP, int\fI count\fP, MPI_Datatype\fI type\fP, - MPI_Message\fI *message\fP, MPI_Status\fI *status\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_MRECV(\fIBUF, COUNT, DATATYPE, MESSAGE, STATUS, IERROR\fP) - \fIBUF(*)\fP - INTEGER \fICOUNT, DATATYPE, MESSAGE\fP - INTEGER \fISTATUS(MPI_STATUS_SIZE), IERROR\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Mrecv(\fIbuf\fP, \fIcount\fP, \fIdatatype\fP, \fImessage\fP, \fIstatus\fP, \fIierror\fP) - TYPE(*), DIMENSION(..) :: \fIbuf\fP - INTEGER, INTENT(IN) :: \fIcount\fP - TYPE(MPI_Datatype), INTENT(IN) :: \fIdatatype\fP - TYPE(MPI_Message), INTENT(INOUT) :: \fImessage\fP - TYPE(MPI_Status) :: \fIstatus\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETERS -.ft R -.TP 1i -count -Number of elements to receive (nonnegative integer). -.TP 1i -datatype -Datatype of each send buffer element (handle). -.TP 1i -message -Message (handle). - -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -buf -Initial address of receive buffer (choice). -.TP 1i -status -Status object (status). -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -The functions MPI_Mrecv and MPI_Imrecv receive messages that have been -previously matched by a matching probe. -.sp -If MPI_Mrecv is called with MPI_MESSAGE_NULL as the message argument, -the call returns immediately with the \fIstatus\fP object set to -\fIsource\fP = MPI_PROC_NULL, \fItag\fP = MPI_ANY_TAG, and \fIcount\fP -= 0, as if a receive from MPI_PROC_NULL was issued. - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value -of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for -I/O function errors. The error handler may be changed with -MPI_Comm_set_errhandler; the predefined error handler -MPI_ERRORS_RETURN may be used to cause error values to be -returned. Note that MPI does not guarantee that an MPI program can -continue past an error. - -.SH SEE ALSO -.ft R -.nf -MPI_Mprobe -MPI_Improbe -MPI_Probe -MPI_Iprobe -MPI_Imrecv -MPI_Cancel diff --git a/ompi/mpi/man/man3/MPI_Neighbor_allgather.3in b/ompi/mpi/man/man3/MPI_Neighbor_allgather.3in deleted file mode 100644 index 56eab70bd30..00000000000 --- a/ompi/mpi/man/man3/MPI_Neighbor_allgather.3in +++ /dev/null @@ -1,170 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2013 Los Alamos National Security, LLC. All rights reserved. -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" $COPYRIGHT$ -.TH MPI_Neighbor_allgather 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Neighbor_allgather, MPI_Ineighbor_allgather, MPI_Neighbor_allgather\fP \- Gathers and distributes data from and to all neighbors - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -int MPI_Neighbor_allgather(const void\fI *sendbuf\fP, int \fI sendcount\fP, - MPI_Datatype\fI sendtype\fP, void\fI *recvbuf\fP, int\fI recvcount\fP, - MPI_Datatype\fI recvtype\fP, MPI_Comm\fI comm\fP) - -int MPI_Ineighbor_allgather(const void\fI *sendbuf\fP, int \fI sendcount\fP, - MPI_Datatype\fI sendtype\fP, void\fI *recvbuf\fP, int\fI recvcount\fP, - MPI_Datatype\fI recvtype\fP, MPI_Comm\fI comm\fP, MPI_Request \fIreq\fP) - -int MPI_Neighbor_allgather_init(const void\fI *sendbuf\fP, int \fI sendcount\fP, - MPI_Datatype\fI sendtype\fP, void\fI *recvbuf\fP, int\fI recvcount\fP, - MPI_Datatype\fI recvtype\fP, MPI_Comm\fI comm\fP, MPI_Info\fIinfo\fP, MPI_Request \fIreq\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_NEIGHBOR_ALLGATHER(\fISENDBUF\fP,\fI SENDCOUNT\fP,\fI SENDTYPE\fP,\fI RECVBUF\fP,\fI RECVCOUNT\fP,\fI - RECVTYPE\fP,\fI COMM\fP,\fI IERROR\fP) - \fISENDBUF\fP (*), \fIRECVBUF\fP (*) - INTEGER \fISENDCOUNT\fP,\fI SENDTYPE\fP,\fI RECVCOUNT\fP,\fI RECVTYPE\fP,\fI COMM\fP, - INTEGER \fIIERROR\fP - -MPI_INEIGHBOR_ALLGATHER(\fISENDBUF\fP,\fI SENDCOUNT\fP,\fI SENDTYPE\fP,\fI RECVBUF\fP,\fI RECVCOUNT\fP,\fI - RECVTYPE\fP,\fI COMM\fP, \fIREQUEST\fP,\fI IERROR\fP) - \fISENDBUF\fP (*), \fIRECVBUF\fP (*) - INTEGER \fISENDCOUNT\fP,\fI SENDTYPE\fP,\fI RECVCOUNT\fP,\fI RECVTYPE\fP,\fI COMM\fP, - INTEGER \fIREQUEST, IERROR\fP - -MPI_NEIGHBOR_ALLGATHER_INIT(\fISENDBUF\fP,\fI SENDCOUNT\fP,\fI SENDTYPE\fP,\fI RECVBUF\fP,\fI RECVCOUNT\fP,\fI - RECVTYPE\fP,\fI COMM\fP,\fI INFO\fP,\f IREQUEST\fP,\fI IERROR\fP) - \fISENDBUF\fP (*), \fIRECVBUF\fP (*) - INTEGER \fISENDCOUNT\fP,\fI SENDTYPE\fP,\fI RECVCOUNT\fP,\fI RECVTYPE\fP,\fI COMM\fP, - INTEGER \fIINFO, REQUEST, IERROR\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Neighbor_allgather(\fIsendbuf\fP, \fIsendcount\fP, \fIsendtype\fP, \fIrecvbuf\fP, \fIrecvcount\fP, - \fIrecvtype\fP, \fIcomm\fP, \fIierror\fP) - TYPE(*), DIMENSION(..), INTENT(IN) :: \fIsendbuf\fP - TYPE(*), DIMENSION(..) :: \fIrecvbuf\fP - INTEGER, INTENT(IN) :: \fIsendcount\fP, \fIrecvcount\fP - TYPE(MPI_Datatype), INTENT(IN) :: \fIsendtype\fP, \fIrecvtype\fP - TYPE(MPI_Comm), INTENT(IN) :: \fIcomm\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -MPI_Ineighbor_allgather(\fIsendbuf\fP, \fIsendcount\fP, \fIsendtype\fP, \fIrecvbuf\fP, \fIrecvcount\fP, - \fIrecvtype\fP, \fIcomm\fP, \fIrequest\fP, \fIierror\fP) - TYPE(*), DIMENSION(..), INTENT(IN), ASYNCHRONOUS :: \fIsendbuf\fP - TYPE(*), DIMENSION(..), ASYNCHRONOUS :: \fIrecvbuf\fP - INTEGER, INTENT(IN) :: \fIsendcount\fP, \fIrecvcount\fP - TYPE(MPI_Datatype), INTENT(IN) :: \fIsendtype\fP, \fIrecvtype\fP - TYPE(MPI_Comm), INTENT(IN) :: \fIcomm\fP - TYPE(MPI_Request), INTENT(OUT) :: \fIrequest\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -MPI_Neighbor_allgather_init(\fIsendbuf\fP, \fIsendcount\fP, \fIsendtype\fP, \fIrecvbuf\fP, \fIrecvcount\fP, - \fIrecvtype\fP, \fIcomm\fP, \fIinfo\fP, \fIrequest\fP, \fIierror\fP) - TYPE(*), DIMENSION(..), INTENT(IN), ASYNCHRONOUS :: \fIsendbuf\fP - TYPE(*), DIMENSION(..), ASYNCHRONOUS :: \fIrecvbuf\fP - INTEGER, INTENT(IN) :: \fIsendcount\fP, \fIrecvcount\fP - TYPE(MPI_Datatype), INTENT(IN) :: \fIsendtype\fP, \fIrecvtype\fP - TYPE(MPI_Comm), INTENT(IN) :: \fIcomm\fP - TYPE(MPI_Info), INTENT(IN) :: \fIinfo\fP - TYPE(MPI_Request), INTENT(OUT) :: \fIrequest\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETERS -.ft R -.TP 1i -sendbuf -Starting address of send buffer (choice). -.TP 1i -sendcount -Number of elements in send buffer (integer). -.TP 1i -sendtype -Datatype of send buffer elements (handle). -.TP 1i -recvbuf -Starting address of recv buffer (choice). -.TP 1i -recvcount -Number of elements received from any process (integer). -.TP 1i -recvtype -Datatype of receive buffer elements (handle). -.TP 1i -comm -Communicator (handle). - -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -recvbuf -Address of receive buffer (choice). -.TP 1i -request -Request (handle, non-blocking only). -.ft R -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -MPI_Neighbor_allgather is similar to MPI_Allgather, except that only the neighboring processes receive the result, instead of all processes. The neighbors and buffer layout is determined by the topology of \fIcomm\fP. -.sp -The type signature associated with sendcount, sendtype at a process must be equal to the type signature associated with recvcount, recvtype at any other process. -.fi - -.sp -.SH NEIGHBOR ORDERING -For a distributed graph topology, created with MPI_Dist_graph_create, the sequence of neighbors -in the send and receive buffers at each process is defined as the sequence returned by MPI_Dist_graph_neighbors -for destinations and sources, respectively. For a general graph topology, created with MPI_Graph_create, the order of -neighbors in the send and receive buffers is defined as the sequence of neighbors as returned by MPI_Graph_neighbors. -Note that general graph topologies should generally be replaced by the distributed graph topologies. - -For a Cartesian topology, created with MPI_Cart_create, the sequence of neighbors in the send and receive -buffers at each process is defined by order of the dimensions, first the neighbor in the negative direction -and then in the positive direction with displacement 1. The numbers of sources and destinations in the -communication routines are 2*ndims with ndims defined in MPI_Cart_create. If a neighbor does not exist, i.e., at -the border of a Cartesian topology in the case of a non-periodic virtual grid dimension (i.e., -periods[...]==false), then this neighbor is defined to be MPI_PROC_NULL. - -If a neighbor in any of the functions is MPI_PROC_NULL, then the neighborhood collective communication behaves -like a point-to-point communication with MPI_PROC_NULL in this direction. That is, the buffer is still part of -the sequence of neighbors but it is neither communicated nor updated. - -.SH NOTES -.sp -The MPI_IN_PLACE option for \fIsendbuf\fP is not meaningful for this operation. - - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for I/O function errors. The error handler -may be changed with MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN may be used to cause error values to be returned. Note that MPI does not guarantee that an MPI program can continue past an error. - -.SH SEE ALSO -.ft R -.sp -MPI_Neighbor_allgatherv -MPI_Cart_create -MPI_Garph_create -MPI_Dist_graph_create -.br -MPI_Gather - diff --git a/ompi/mpi/man/man3/MPI_Neighbor_allgather_init.3in b/ompi/mpi/man/man3/MPI_Neighbor_allgather_init.3in deleted file mode 100644 index f0569265c29..00000000000 --- a/ompi/mpi/man/man3/MPI_Neighbor_allgather_init.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/MPI_Neighbor_allgather.3 diff --git a/ompi/mpi/man/man3/MPI_Neighbor_allgatherv.3in b/ompi/mpi/man/man3/MPI_Neighbor_allgatherv.3in deleted file mode 100644 index 29807404f40..00000000000 --- a/ompi/mpi/man/man3/MPI_Neighbor_allgatherv.3in +++ /dev/null @@ -1,171 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2013 Los Alamos National Security, LLC. All rights reserved. -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2007-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" $COPYRIGHT$ -.TH MPI_Neighbor_allgatherv 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Neighbor_allgatherv, MPI_Ineighbor_allgatherv, MPI_Neighbor_allgatherv_init\fP \- Gathers and distributes data from and to all neighbors. Each process may contribute a different amount of data. - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -int MPI_Neighbor_allgatherv(const void\fI *sendbuf\fP, int\fI sendcount\fP, - MPI_Datatype\fI sendtype\fP, void\fI *recvbuf\fP, const int\fI recvcounts[]\fP, - const int\fI displs[]\fP, MPI_Datatype\fI recvtype\fP, MPI_Comm\fI comm\fP) - -int MPI_Ineighbor_allgatherv(const void\fI *sendbuf\fP, int\fI sendcount\fP, - MPI_Datatype\fI sendtype\fP, void\fI *recvbuf\fP, const int\fI recvcounts[]\fP, - const int\fI displs[]\fP, MPI_Datatype\fI recvtype\fP, MPI_Comm\fI comm\fP, - MPI_Request \fI*request\fP) - -int MPI_Neighbor_allgatherv(const void\fI *sendbuf\fP, int\fI sendcount\fP, - MPI_Datatype\fI sendtype\fP, void\fI *recvbuf\fP, const int\fI recvcounts[]\fP, - const int\fI displs[]\fP, MPI_Datatype\fI recvtype\fP, MPI_Comm\fI comm\fP, - MPI_Info \fIinfo\fP, MPI_Request \fI*request\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_NEIGHBOR_ALLGATHERV(\fISENDBUF\fP,\fI SENDCOUNT\fP, \fISENDTYPE\fP,\fI RECVBUF\fP, - \fIRECVCOUNT\fP,\fI DISPLS\fP, \fIRECVTYPE\fP,\fI COMM\fP,\fI IERROR\fP) - \fISENDBUF\fP(*), \fIRECVBUF\fP(*) - INTEGER \fISENDCOUNT\fP,\fI SENDTYPE\fP, \fIRECVCOUNT\fP(*), - INTEGER \fIDISPLS\fP(*),\fI RECVTYPE\fP,\fI COMM\fP,\fI IERROR\fP - -MPI_INEIGHBOR_ALLGATHERV(\fISENDBUF\fP,\fI SENDCOUNT\fP, \fISENDTYPE\fP,\fI RECVBUF\fP, - \fIRECVCOUNT\fP,\fI DISPLS\fP, \fIRECVTYPE\fP,\fI COMM\fP,\fI REQUEST\fP,\fI IERROR\fP) - \fISENDBUF\fP(*), \fIRECVBUF\fP(*) - INTEGER \fISENDCOUNT\fP,\fI SENDTYPE\fP, \fIRECVCOUNT\fP(*), - INTEGER \fIDISPLS\fP(*),\fI RECVTYPE\fP,\fI COMM\fP,\fIREQUEST\fP,\fI IERROR\fP - -MPI_NEIGHBOR_ALLGATHERV_INIT(\fISENDBUF\fP,\fI SENDCOUNT\fP, \fISENDTYPE\fP,\fI RECVBUF\fP, - \fIRECVCOUNT\fP,\fI DISPLS\fP, \fIRECVTYPE\fP,\fI COMM\fP,\fI INFO\fP,\fI REQUEST\fP,\fI IERROR\fP) - \fISENDBUF\fP(*), \fIRECVBUF\fP(*) - INTEGER \fISENDCOUNT\fP,\fI SENDTYPE\fP, \fIRECVCOUNT\fP(*), - INTEGER \fIDISPLS\fP(*),\fI RECVTYPE\fP,\fI COMM\fP,\fIINFO\fP,\fIREQUEST\fP,\fI IERROR\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Neighbor_allgatherv(\fIsendbuf\fP, \fIsendcount\fP, \fIsendtype\fP, \fIrecvbuf\fP, \fIrecvcounts\fP, - \fIdispls\fP, \fIrecvtype\fP, \fIcomm\fP, \fIierror\fP) - TYPE(*), DIMENSION(..), INTENT(IN) :: \fIsendbuf\fP - TYPE(*), DIMENSION(..) :: \fIrecvbuf\fP - INTEGER, INTENT(IN) :: \fIsendcount\fP, \fIrecvcounts(*)\fP, \fIdispls(*)\fP - TYPE(MPI_Datatype), INTENT(IN) :: \fIsendtype\fP, \fIrecvtype\fP - TYPE(MPI_Comm), INTENT(IN) :: \fIcomm\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -MPI_Ineighbor_allgatherv(\fIsendbuf\fP, \fIsendcount\fP, \fIsendtype\fP, \fIrecvbuf\fP, \fIrecvcounts\fP, - \fIdispls\fP, \fIrecvtype\fP, \fIcomm\fP, \fIrequest\fP, \fIierror\fP) - TYPE(*), DIMENSION(..), INTENT(IN), ASYNCHRONOUS :: \fIsendbuf\fP - TYPE(*), DIMENSION(..), ASYNCHRONOUS :: \fIrecvbuf\fP - INTEGER, INTENT(IN) :: \fIsendcount\fP - INTEGER, INTENT(IN), ASYNCHRONOUS :: \fIrecvcounts(*)\fP, \fIdispls(*)\fP - TYPE(MPI_Datatype), INTENT(IN) :: \fIsendtype\fP, \fIrecvtype\fP - TYPE(MPI_Comm), INTENT(IN) :: \fIcomm\fP - TYPE(MPI_Request), INTENT(OUT) :: \fIrequest\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -MPI_Neighbor_allgatherv_init(\fIsendbuf\fP, \fIsendcount\fP, \fIsendtype\fP, \fIrecvbuf\fP, \fIrecvcounts\fP, - \fIdispls\fP, \fIrecvtype\fP, \fIcomm\fP, \fIinfo\fP, \fIrequest\fP, \fIierror\fP) - TYPE(*), DIMENSION(..), INTENT(IN), ASYNCHRONOUS :: \fIsendbuf\fP - TYPE(*), DIMENSION(..), ASYNCHRONOUS :: \fIrecvbuf\fP - INTEGER, INTENT(IN) :: \fIsendcount\fP - INTEGER, INTENT(IN), ASYNCHRONOUS :: \fIrecvcounts(*)\fP, \fIdispls(*)\fP - TYPE(MPI_Datatype), INTENT(IN) :: \fIsendtype\fP, \fIrecvtype\fP - TYPE(MPI_Comm), INTENT(IN) :: \fIcomm\fP - TYPE(MPI_Info), INTENT(IN) :: \fIinfo\fP - TYPE(MPI_Request), INTENT(OUT) :: \fIrequest\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETERS -.ft R -.TP 1i -sendbuf -Starting address of send buffer (choice). -.TP 1i -sendcount -Number of elements in send buffer (integer). -.TP 1i -sendtype -Datatype of send buffer elements (handle). -.TP 1i -recvcount -Integer array (of length group size) containing the number of elements that are received from each neighbor. -.TP 1i -displs -Integer array (of length group size). Entry i specifies the displacement (relative to recvbuf) at which to place the incoming data from neighbor i. -.TP 1i -recvtype -Datatype of receive buffer elements (handle). -.TP 1i -comm -Communicator (handle). -.TP1i -info -Info (handle, persistent only). -.sp -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -recvbuf -Address of receive buffer (choice). -.TP 1i -request -Request (handle, non-blocking only). -.ft R -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -MPI_Neighbor_allgatherv is similar to MPI_Neighbor_allgather in that all processes gather data from all neighbors, except that each process can send a different amount of data. The block of data sent from the jth neighbor is received by every neighbor and placed in the jth block of the buffer. The neighbors and buffer layout is determined by the topology of \fIcomm\fP. -.I recvbuf. -.sp -The type signature associated with sendcount, sendtype, at process j must be equal to the type signature associated with the corresponding entry in \fIrecvcounts\fP on neighboring processes. - -.sp -.SH NEIGHBOR ORDERING -For a distributed graph topology, created with MPI_Dist_graph_create, the sequence of neighbors -in the send and receive buffers at each process is defined as the sequence returned by MPI_Dist_graph_neighbors -for destinations and sources, respectively. For a general graph topology, created with MPI_Graph_create, the order of -neighbors in the send and receive buffers is defined as the sequence of neighbors as returned by MPI_Graph_neighbors. -Note that general graph topologies should generally be replaced by the distributed graph topologies. - -For a Cartesian topology, created with MPI_Cart_create, the sequence of neighbors in the send and receive -buffers at each process is defined by order of the dimensions, first the neighbor in the negative direction -and then in the positive direction with displacement 1. The numbers of sources and destinations in the -communication routines are 2*ndims with ndims defined in MPI_Cart_create. If a neighbor does not exist, i.e., at -the border of a Cartesian topology in the case of a non-periodic virtual grid dimension (i.e., -periods[...]==false), then this neighbor is defined to be MPI_PROC_NULL. - -If a neighbor in any of the functions is MPI_PROC_NULL, then the neighborhood collective communication behaves -like a point-to-point communication with MPI_PROC_NULL in this direction. That is, the buffer is still part of -the sequence of neighbors but it is neither communicated nor updated. - -.SH NOTES -The MPI_IN_PLACE option for \fIsendbuf\fP is not meaningful for this operation. - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for I/O function errors. The error handler -may be changed with MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN may be used to cause error values to be returned. Note that MPI does not guarantee that an MPI program can continue past an error. - -.SH SEE ALSO -.ft R -MPI_Neighbor_allgather -MPI_Cart_create -MPI_Graph_create -MPI_Dist_graph_create diff --git a/ompi/mpi/man/man3/MPI_Neighbor_allgatherv_init.3in b/ompi/mpi/man/man3/MPI_Neighbor_allgatherv_init.3in deleted file mode 100644 index b8ce05e6462..00000000000 --- a/ompi/mpi/man/man3/MPI_Neighbor_allgatherv_init.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/MPI_Neighbor_allgatherv.3 diff --git a/ompi/mpi/man/man3/MPI_Neighbor_alltoall.3in b/ompi/mpi/man/man3/MPI_Neighbor_alltoall.3in deleted file mode 100644 index 69959ec0ea4..00000000000 --- a/ompi/mpi/man/man3/MPI_Neighbor_alltoall.3in +++ /dev/null @@ -1,219 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2013 Los Alamos National Security, LLC. All rights reserved. -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" $COPYRIGHT$ -.TH MPI_Neighbor_alltoall 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" - -.SH NAME -\fBMPI_Neighbor_alltoall, MPI_Ineighbor_alltoall, MPI_Neighbor_alltoall\fP \- All processes send data to neighboring processes in a virtual topology communicator - -.SH SYNTAX -.ft R - -.SH C Syntax -.nf -#include -int MPI_Neighbor_alltoall(const void *\fIsendbuf\fP, int \fIsendcount\fP, - MPI_Datatype \fIsendtype\fP, void *\fIrecvbuf\fP, int \fIrecvcount\fP, - MPI_Datatype \fIrecvtype\fP, MPI_Comm \fIcomm\fP) - -int MPI_Ineighbor_alltoall(const void *\fIsendbuf\fP, int \fIsendcount\fP, - MPI_Datatype \fIsendtype\fP, void *\fIrecvbuf\fP, int \fIrecvcount\fP, - MPI_Datatype \fIrecvtype\fP, MPI_Comm \fIcomm\fP, MPI_Request \fI*request\fP) - -int MPI_Neighbor_alltoall_init(const void *\fIsendbuf\fP, int \fIsendcount\fP, - MPI_Datatype \fIsendtype\fP, void *\fIrecvbuf\fP, int \fIrecvcount\fP, - MPI_Datatype \fIrecvtype\fP, MPI_Comm \fIcomm\fP, MPI_Info \fIinfo\fP, MPI_Request \fI*request\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_NEIGHBOR_ALLTOALL(\fISENDBUF, SENDCOUNT, SENDTYPE, RECVBUF, RECVCOUNT, - RECVTYPE, COMM, IERROR\fP) - - \fISENDBUF(*), RECVBUF(*)\fP - INTEGER \fISENDCOUNT, SENDTYPE, RECVCOUNT, RECVTYPE\fP - INTEGER \fICOMM, IERROR\fP - -MPI_INEIGHBOR_ALLTOALL(\fISENDBUF, SENDCOUNT, SENDTYPE, RECVBUF, RECVCOUNT, - RECVTYPE, COMM, REQUEST, IERROR\fP) - - \fISENDBUF(*), RECVBUF(*)\fP - INTEGER \fISENDCOUNT, SENDTYPE, RECVCOUNT, RECVTYPE\fP - INTEGER \fICOMM, REQUEST, IERROR\fP - -MPI_NEIGHBOR_ALLTOALL_INIT(\fISENDBUF, SENDCOUNT, SENDTYPE, RECVBUF, RECVCOUNT, - RECVTYPE, COMM, INFO, REQUEST, IERROR\fP) - - \fISENDBUF(*), RECVBUF(*)\fP - INTEGER \fISENDCOUNT, SENDTYPE, RECVCOUNT, RECVTYPE\fP - INTEGER \fICOMM, INFO, REQUEST, IERROR\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Neighbor_alltoall(\fIsendbuf\fP, \fIsendcount\fP, \fIsendtype\fP, \fIrecvbuf\fP, \fIrecvcount\fP, - \fIrecvtype\fP, \fIcomm\fP, \fIierror\fP) - - TYPE(*), DIMENSION(..), INTENT(IN) :: \fIsendbuf\fP - TYPE(*), DIMENSION(..) :: \fIrecvbuf\fP - INTEGER, INTENT(IN) :: \fIsendcount\fP, \fIrecvcount\fP - TYPE(MPI_Datatype), INTENT(IN) :: \fIsendtype\fP, \fIrecvtype\fP - TYPE(MPI_Comm), INTENT(IN) :: \fIcomm\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -MPI_Ineighbor_alltoall(\fIsendbuf\fP, \fIsendcount\fP, \fIsendtype\fP, \fIrecvbuf\fP, \fIrecvcount\fP, - \fIrecvtype\fP, \fIcomm\fP, \fIrequest\fP, \fIierror\fP) - - TYPE(*), DIMENSION(..), INTENT(IN), ASYNCHRONOUS :: \fIsendbuf\fP - TYPE(*), DIMENSION(..), ASYNCHRONOUS :: \fIrecvbuf\fP - INTEGER, INTENT(IN) :: \fIsendcount\fP, \fIrecvcount\fP - TYPE(MPI_Datatype), INTENT(IN) :: \fIsendtype\fP, \fIrecvtype\fP - TYPE(MPI_Comm), INTENT(IN) :: \fIcomm\fP - TYPE(MPI_Request), INTENT(OUT) :: \fIrequest\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -MPI_Neighbor_alltoall_init(\fIsendbuf\fP, \fIsendcount\fP, \fIsendtype\fP, \fIrecvbuf\fP, \fIrecvcount\fP, - \fIrecvtype\fP, \fIcomm\fP, \fIinfo\fP, \fIrequest\fP, \fIierror\fP) - - TYPE(*), DIMENSION(..), INTENT(IN), ASYNCHRONOUS :: \fIsendbuf\fP - TYPE(*), DIMENSION(..), ASYNCHRONOUS :: \fIrecvbuf\fP - INTEGER, INTENT(IN) :: \fIsendcount\fP, \fIrecvcount\fP - TYPE(MPI_Datatype), INTENT(IN) :: \fIsendtype\fP, \fIrecvtype\fP - TYPE(MPI_Comm), INTENT(IN) :: \fIcomm\fP - TYPE(MPI_Info), INTENT(IN) :: \fIinfo\fP - TYPE(MPI_Request), INTENT(OUT) :: \fIrequest\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETERS -.ft R -.TP 1.2i -sendbuf -Starting address of send buffer (choice). -.TP 1.2i -sendcount -Number of elements to send to each process (integer). -.TP 1.2i -sendtype -Datatype of send buffer elements (handle). -.TP 1.2i -recvcount -Number of elements to receive from each process (integer). -.TP 1.2i -recvtype -Datatype of receive buffer elements (handle). -.TP 1.2i -comm -Communicator over which data is to be exchanged (handle). -.TP 1.2i -info -Info (handle, persistent only). - -.SH OUTPUT PARAMETERS -.ft R -.TP 1.2i -recvbuf -Starting address of receive buffer (choice). -.TP 1i -request -Request (handle, non-blocking only). -.ft R -.TP 1.2i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -MPI_Neighbor_alltoall is a collective operation in which all processes send and receive the same amount of data to each neighbor. The operation of this routine can be represented as follows, where each process performs 2n (n being the number of neighbors in communicator \fIcomm\fP) independent point-to-point communications. The neighbors and buffer layout are determined by the topology of \fIcomm\fP. -.sp -Example of MPI_Neighbor_alltoall semantics for cartesian topologies: -.sp -.nf - MPI_Cart_get(\fIcomm\fP, maxdims, dims, periods, coords); - for (dim = 0, i = 0 ; dim < dims ; ++dim) { - MPI_Cart_shift(\fIcomm\fP, dim, 1, &r0, &r1); - MPI_Isend(\fIsendbuf\fP + i * \fIsendcount\fP * extent(\fIsendtype\fP), - \fIsendcount\fP, \fIsendtype\fP, r0, ..., \fIcomm\fP, ...); - MPI_Irecv(\fIrecvbuf\fP + i * \fIrecvcount\fP * extent(\fIrecvtype\fP), - \fIrecvcount\fP, \fIrecvtype\fP, r0, ..., \fIcomm\fP, ...); - ++i; - MPI_Isend(\fIsendbuf\fP + i * \fIsendcount\fP * extent(\fIsendtype\fP), - \fIsendcount\fP, \fIsendtype\fP, r1, ..., \fIcomm\fP, &req[i]); - MPI_Irecv(\fIrecvbuf\fP + i * \fIrecvcount\fP * extent(\fIrecvtype\fP), - \fIrecvcount\fP, \fIrecvtype\fP, r1, ..., \fIcomm\fP, ...); - ++i; - } - - MPI_Waitall (...); -.fi -.sp -Each process breaks up its local \fIsendbuf\fP into n blocks \- each -containing \fIsendcount\fP elements of type \fIsendtype\fP \- and -divides its \fIrecvbuf\fP similarly according to \fIrecvcount\fP and -\fIrecvtype\fP. Process j sends the k-th block of its local -\fIsendbuf\fP to neighbor k, which places the data in the j-th block of -its local \fIrecvbuf\fP. The amount of data sent must be equal to the -amount of data received, pairwise, between every pair of processes. - -.sp -.SH NEIGHBOR ORDERING -For a distributed graph topology, created with MPI_Dist_graph_create, the sequence of neighbors -in the send and receive buffers at each process is defined as the sequence returned by MPI_Dist_graph_neighbors -for destinations and sources, respectively. For a general graph topology, created with MPI_Graph_create, the order of -neighbors in the send and receive buffers is defined as the sequence of neighbors as returned by MPI_Graph_neighbors. -Note that general graph topologies should generally be replaced by the distributed graph topologies. - -For a Cartesian topology, created with MPI_Cart_create, the sequence of neighbors in the send and receive -buffers at each process is defined by order of the dimensions, first the neighbor in the negative direction -and then in the positive direction with displacement 1. The numbers of sources and destinations in the -communication routines are 2*ndims with ndims defined in MPI_Cart_create. If a neighbor does not exist, i.e., at -the border of a Cartesian topology in the case of a non-periodic virtual grid dimension (i.e., -periods[...]==false), then this neighbor is defined to be MPI_PROC_NULL. - -If a neighbor in any of the functions is MPI_PROC_NULL, then the neighborhood collective communication behaves -like a point-to-point communication with MPI_PROC_NULL in this direction. That is, the buffer is still part of -the sequence of neighbors but it is neither communicated nor updated. - -.sp -.SH NOTES -.ft R -The MPI_IN_PLACE option for \fIsendbuf\fP is not meaningful for this function. -.sp -All arguments on all processes are significant. The \fIcomm\fP argument, -in particular, must describe the same communicator on all processes. \fIcomm\fP -must be either a cartesian, graph, or dist graph communicator. -.sp -There are two MPI library functions that are more general than -MPI_Neighbor_alltoall. MPI_Neighbor_alltoallv allows all-to-all communication to and -from buffers that need not be contiguous; different processes may -send and receive different amounts of data. MPI_Neighbor_alltoallw expands -MPI_Neighbor_alltoallv's functionality to allow the exchange of data with -different datatypes. - -.SH ERRORS -.ft R -Almost all MPI routines return an error value; C routines as -the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for -I/O function errors. The error handler may be changed with -MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN -may be used to cause error values to be returned. Note that MPI does not -guarantee that an MPI program can continue past an error. - -.SH SEE ALSO -.ft R -.nf -MPI_Neighbor_alltoallv -MPI_Neighbor_alltoallw -MPI_Cart_create -MPI_Graph_create -MPI_Dist_graph_create -MPI_Dist_graph_create_adjacent diff --git a/ompi/mpi/man/man3/MPI_Neighbor_alltoall_init.3in b/ompi/mpi/man/man3/MPI_Neighbor_alltoall_init.3in deleted file mode 100644 index 56f630ecd06..00000000000 --- a/ompi/mpi/man/man3/MPI_Neighbor_alltoall_init.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/MPI_Neighbor_alltoall.3 diff --git a/ompi/mpi/man/man3/MPI_Neighbor_alltoallv.3in b/ompi/mpi/man/man3/MPI_Neighbor_alltoallv.3in deleted file mode 100644 index 3a488b547e0..00000000000 --- a/ompi/mpi/man/man3/MPI_Neighbor_alltoallv.3in +++ /dev/null @@ -1,249 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2013 Los Alamos National Security, LLC. All rights reserved. -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" $COPYRIGHT$ -.TH MPI_Neighbor_alltoallv 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" - -.SH NAME -\fBMPI_Neighbor_alltoallv, MPI_Ineighbor_alltoallv, MPI_Neighbor_alltoallv_init\fP \- All processes send different amounts of data to, and receive different amounts of data from, all neighbors -.SH SYNTAX -.ft R - -.SH C Syntax -.nf -#include -int MPI_Neighbor_alltoallv(const void *\fIsendbuf\fP, const int \fIsendcounts\fP[], - const int \fIsdispls\fP[], MPI_Datatype \fIsendtype\fP, - void *\fIrecvbuf\fP, const int\fI recvcounts\fP[], - const int \fIrdispls\fP[], MPI_Datatype \fIrecvtype\fP, MPI_Comm \fIcomm\fP) - -int MPI_Ineighbor_alltoallv(const void *\fIsendbuf\fP, const int \fIsendcounts\fP[], - const int \fIsdispls\fP[], MPI_Datatype \fIsendtype\fP, - void *\fIrecvbuf\fP, const int\fI recvcounts\fP[], - const int \fIrdispls\fP[], MPI_Datatype \fIrecvtype\fP, MPI_Comm \fIcomm\fP, - MPI_Request \fI*request\fP) - -int MPI_Neighbor_alltoallv_init(const void *\fIsendbuf\fP, const int \fIsendcounts\fP[], - const int \fIsdispls\fP[], MPI_Datatype \fIsendtype\fP, - void *\fIrecvbuf\fP, const int\fI recvcounts\fP[], - const int \fIrdispls\fP[], MPI_Datatype \fIrecvtype\fP, MPI_Comm \fIcomm\fP, - MPI_Info \fIinfo\fP, MPI_Request \fI*request\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_NEIGHBOR_ALLTOALLV(\fISENDBUF, SENDCOUNTS, SDISPLS, SENDTYPE, - RECVBUF, RECVCOUNTS, RDISPLS, RECVTYPE, COMM, IERROR\fP) - - \fISENDBUF(*), RECVBUF(*)\fP - INTEGER \fISENDCOUNTS(*), SDISPLS(*), SENDTYPE\fP - INTEGER \fIRECVCOUNTS(*), RDISPLS(*), RECVTYPE\fP - INTEGER \fICOMM, IERROR\fP - -MPI_INEIGHBOR_ALLTOALLV(\fISENDBUF, SENDCOUNTS, SDISPLS, SENDTYPE, - RECVBUF, RECVCOUNTS, RDISPLS, RECVTYPE, COMM, REQUEST, IERROR\fP) - - \fISENDBUF(*), RECVBUF(*)\fP - INTEGER \fISENDCOUNTS(*), SDISPLS(*), SENDTYPE\fP - INTEGER \fIRECVCOUNTS(*), RDISPLS(*), RECVTYPE\fP - INTEGER \fICOMM, REQUEST, IERROR\fP - -MPI_NEIGHBOR_ALLTOALLV_INIT(\fISENDBUF, SENDCOUNTS, SDISPLS, SENDTYPE, - RECVBUF, RECVCOUNTS, RDISPLS, RECVTYPE, COMM, INFO, REQUEST, IERROR\fP) - - \fISENDBUF(*), RECVBUF(*)\fP - INTEGER \fISENDCOUNTS(*), SDISPLS(*), SENDTYPE\fP - INTEGER \fIRECVCOUNTS(*), RDISPLS(*), RECVTYPE\fP - INTEGER \fICOMM, INFO, REQUEST, IERROR\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Neighbor_alltoallv(\fIsendbuf\fP, \fIsendcounts\fP, \fIsdispls\fP, \fIsendtype\fP, \fIrecvbuf\fP, - \fIrecvcounts\fP, \fIrdispls\fP, \fIrecvtype\fP, \fIcomm\fP, \fIierror\fP) - - TYPE(*), DIMENSION(..), INTENT(IN) :: \fIsendbuf\fP - TYPE(*), DIMENSION(..) :: \fIrecvbuf\fP - INTEGER, INTENT(IN) :: \fIsendcounts(*)\fP, \fIsdispls(*)\fP, \fIrecvcounts(*),\fP - \fIrdispls(*)\fP - TYPE(MPI_Datatype), INTENT(IN) :: \fIsendtype\fP, \fIrecvtype\fP - TYPE(MPI_Comm), INTENT(IN) :: \fIcomm\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -MPI_Ineighbor_alltoallv(\fIsendbuf\fP, \fIsendcounts\fP, \fIsdispls\fP, \fIsendtype\fP, \fIrecvbuf\fP, - \fIrecvcounts\fP, \fIrdispls\fP, \fIrecvtype\fP, \fIcomm\fP, \fIrequest\fP, \fIierror\fP) - - TYPE(*), DIMENSION(..), INTENT(IN), ASYNCHRONOUS :: \fIsendbuf\fP - TYPE(*), DIMENSION(..), ASYNCHRONOUS :: \fIrecvbuf\fP - INTEGER, INTENT(IN), ASYNCHRONOUS :: \fIsendcounts(*)\fP, \fIsdispls(*),\fP - \fIrecvcounts(*)\fP, \fIrdispls(*)\fP - TYPE(MPI_Datatype), INTENT(IN) :: \fIsendtype\fP, \fIrecvtype\fP - TYPE(MPI_Comm), INTENT(IN) :: \fIcomm\fP - TYPE(MPI_Request), INTENT(OUT) :: \fIrequest\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -MPI_Neighbor_alltoallv_init(\fIsendbuf\fP, \fIsendcounts\fP, \fIsdispls\fP, \fIsendtype\fP, \fIrecvbuf\fP, - \fIrecvcounts\fP, \fIrdispls\fP, \fIrecvtype\fP, \fIcomm\fP, \fIinfo\fP, \fIrequest\fP, \fIierror\fP) - - TYPE(*), DIMENSION(..), INTENT(IN), ASYNCHRONOUS :: \fIsendbuf\fP - TYPE(*), DIMENSION(..), ASYNCHRONOUS :: \fIrecvbuf\fP - INTEGER, INTENT(IN), ASYNCHRONOUS :: \fIsendcounts(*)\fP, \fIsdispls(*),\fP - \fIrecvcounts(*)\fP, \fIrdispls(*)\fP - TYPE(MPI_Datatype), INTENT(IN) :: \fIsendtype\fP, \fIrecvtype\fP - TYPE(MPI_Comm), INTENT(IN) :: \fIcomm\fP - TYPE(MPI_Info), INTENT(IN) :: \fIinfo\fP - TYPE(MPI_Request), INTENT(OUT) :: \fIrequest\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETERS -.ft R -.TP 1.2i -sendbuf -Starting address of send buffer. -.TP 1.2i -sendcounts -Integer array, where entry i specifies the number of elements to send -to neighbor i. -.TP 1.2i -sdispls -Integer array, where entry i specifies the displacement (offset from -\fIsendbuf\fP, in units of \fIsendtype\fP) from which to send data to -neighbor i. -.TP 1.2i -sendtype -Datatype of send buffer elements. -.TP 1.2i -recvcounts -Integer array, where entry j specifies the number of elements to -receive from neighbor j. -.TP 1.2i -rdispls -Integer array, where entry j specifies the displacement (offset from -\fIrecvbuf\fP, in units of \fIrecvtype\fP) to which data from neighbor j -should be written. -.TP 1.2i -recvtype -Datatype of receive buffer elements. -.TP 1.2i -comm -Communicator over which data is to be exchanged. -.TP 1.2i -info -Info (handle, persistent only). - -.SH OUTPUT PARAMETERS -.ft R -.TP 1.2i -recvbuf -Address of receive buffer. -.TP 1i -request -Request (handle, non-blocking only). -.ft R -.TP 1.2i -IERROR -Fortran only: Error status. - -.SH DESCRIPTION -.ft R -MPI_Neighbor_alltoallv is a generalized collective operation in which all -processes send data to and receive data from all neighbors. It -adds flexibility to MPI_Neighbor_alltoall by allowing the user to specify data -to send and receive vector-style (via a displacement and element -count). The operation of this routine can be thought of as follows, -where each process performs 2n (n being the number of neighbors in -to topology of communicator \fIcomm\fP) independent point-to-point communications. -The neighbors and buffer layout are determined by the topology of \fIcomm\fP. -.sp -.nf - MPI_Cart_get(\fIcomm\fP, maxdims, dims, periods, coords); - for (dim = 0, i = 0 ; dim < dims ; ++dim) { - MPI_Cart_shift(\fIcomm\fP, dim, 1, &r0, &r1); - MPI_Isend(\fIsendbuf\fP + \fIsdispls\fP[i] * extent(\fIsendtype\fP), - \fIsendcount\fP, \fIsendtype\fP, r0, ..., \fIcomm\fP, ...); - MPI_Irecv(\fIrecvbuf\fP + \fIrdispls\fP[i] * extent(\fIrecvtype\fP), - \fIrecvcount\fP, \fIrecvtype\fP, r0, ..., \fIcomm\fP, ...); - ++i; - MPI_Isend(\fIsendbuf\fP + \fIsdispls\fP[i] * extent(\fIsendtype\fP), - \fIsendcount\fP, \fIsendtype\fP, r1, ..., \fIcomm\fP, &req[i]); - MPI_Irecv(\fIrecvbuf\fP + \fIrdispls\fP[i] * extent(\fIrecvtype\fP), - \fIrecvcount\fP, \fIrecvtype\fP, r1, ..., \fIcomm\fP, ...); - ++i; - } -.fi -.sp -Process j sends the k-th block of its local \fIsendbuf\fP to neighbor -k, which places the data in the j-th block of its local -\fIrecvbuf\fP. -.sp -When a pair of processes exchanges data, each may pass different -element count and datatype arguments so long as the sender specifies -the same amount of data to send (in bytes) as the receiver expects -to receive. -.sp -Note that process i may send a different amount of data to process j -than it receives from process j. Also, a process may send entirely -different amounts of data to different processes in the communicator. - -.sp -.SH NEIGHBOR ORDERING -For a distributed graph topology, created with MPI_Dist_graph_create, the sequence of neighbors -in the send and receive buffers at each process is defined as the sequence returned by MPI_Dist_graph_neighbors -for destinations and sources, respectively. For a general graph topology, created with MPI_Graph_create, the order of -neighbors in the send and receive buffers is defined as the sequence of neighbors as returned by MPI_Graph_neighbors. -Note that general graph topologies should generally be replaced by the distributed graph topologies. - -For a Cartesian topology, created with MPI_Cart_create, the sequence of neighbors in the send and receive -buffers at each process is defined by order of the dimensions, first the neighbor in the negative direction -and then in the positive direction with displacement 1. The numbers of sources and destinations in the -communication routines are 2*ndims with ndims defined in MPI_Cart_create. If a neighbor does not exist, i.e., at -the border of a Cartesian topology in the case of a non-periodic virtual grid dimension (i.e., -periods[...]==false), then this neighbor is defined to be MPI_PROC_NULL. - -If a neighbor in any of the functions is MPI_PROC_NULL, then the neighborhood collective communication behaves -like a point-to-point communication with MPI_PROC_NULL in this direction. That is, the buffer is still part of -the sequence of neighbors but it is neither communicated nor updated. - -.sp -.SH NOTES -.ft R -The MPI_IN_PLACE option for \fIsendbuf\fP is not meaningful for this operation. -.sp -The specification of counts and displacements should not cause -any location to be written more than once. -.sp -All arguments on all processes are significant. The \fIcomm\fP argument, -in particular, must describe the same communicator on all processes. -.sp -The offsets of \fIsdispls\fP and \fIrdispls\fP are measured in units -of \fIsendtype\fP and \fIrecvtype\fP, respectively. Compare this to -MPI_Neighbor_alltoallw, where these offsets are measured in bytes. - -.SH ERRORS -.ft R -Almost all MPI routines return an error value; C routines as -the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for -I/O function errors. The error handler may be changed with -MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN -may be used to cause error values to be returned. Note that MPI does not -guarantee that an MPI program can continue past an error. - -.SH SEE ALSO -.ft R -.nf -MPI_Neighbor_alltoall -MPI_Neighbor_alltoallw -MPI_Cart_create -MPI_Graph_create -MPI_Dist_graph_create - - diff --git a/ompi/mpi/man/man3/MPI_Neighbor_alltoallv_init.3in b/ompi/mpi/man/man3/MPI_Neighbor_alltoallv_init.3in deleted file mode 100644 index 13b4e89a23c..00000000000 --- a/ompi/mpi/man/man3/MPI_Neighbor_alltoallv_init.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/MPI_Neighbor_alltoallv.3 diff --git a/ompi/mpi/man/man3/MPI_Neighbor_alltoallw.3in b/ompi/mpi/man/man3/MPI_Neighbor_alltoallw.3in deleted file mode 100644 index ed21676afe3..00000000000 --- a/ompi/mpi/man/man3/MPI_Neighbor_alltoallw.3in +++ /dev/null @@ -1,247 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2013 Los Alamos National Security, LLC. All rights reserved. -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" $COPYRIGHT$ -.TH MPI_Neighbor_alltoallw 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" - -.SH NAME -\fBMPI_Neighbor_alltoallw, MPI_Ineighbor_alltoallw, MPI_Neighbor_alltoallw_init\fP \- All processes send data of different types to, and receive data of different types from, all processes - -.SH SYNTAX -.ft R - -.SH C Syntax -.nf -#include -int MPI_Neighbor_alltoallw(const void *\fIsendbuf\fP, const int \fIsendcounts\fP[], - const MPI_Aint \fIsdispls\fP[], const MPI_Datatype \fIsendtypes\fP[], - void *\fIrecvbuf\fP, const int \fIrecvcounts\fP[], const MPI_Aint \fIrdispls\fP[], - const MPI_Datatype \fIrecvtypes\fP[], MPI_Comm \fIcomm\fP) - -int MPI_Ineighbor_alltoallw(const void *\fIsendbuf\fP, const int \fIsendcounts\fP[], - const MPI_Aint \fIsdispls\fP[], const MPI_Datatype \fIsendtypes\fP[], - void *\fIrecvbuf\fP, const int \fIrecvcounts\fP[], const MPI_Aint \fIrdispls\fP[], - const MPI_Datatype \fIrecvtypes\fP[], MPI_Comm \fIcomm\fP, MPI_Request \fI*request\fP) - -int MPI_Neighbor_alltoallw_init(const void *\fIsendbuf\fP, const int \fIsendcounts\fP[], - const MPI_Aint \fIsdispls\fP[], const MPI_Datatype \fIsendtypes\fP[], - void *\fIrecvbuf\fP, const int \fIrecvcounts\fP[], const MPI_Aint \fIrdispls\fP[], - const MPI_Datatype \fIrecvtypes\fP[], MPI_Comm \fIcomm\fP, MPI_Info \fIinfo\fP, MPI_Request \fI*request\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_NEIGHBOR_ALLTOALLW(\fISENDBUF, SENDCOUNTS, SDISPLS, SENDTYPES, - RECVBUF, RECVCOUNTS, RDISPLS, RECVTYPES, COMM, IERROR\fP) - - \fISENDBUF(*), RECVBUF(*)\fP - INTEGER \fISENDCOUNTS(*), SENDTYPES(*)\fP - INTEGER \fIRECVCOUNTS(*), RECVTYPES(*)\fP - INTEGER(KIND=MPI_ADDRESS_KIND) \fISDISPLS(*), RDISPLS(*)\fP - INTEGER \fICOMM, IERROR\fP - -MPI_INEIGHBOR_ALLTOALLW(\fISENDBUF, SENDCOUNTS, SDISPLS, SENDTYPES, - RECVBUF, RECVCOUNTS, RDISPLS, RECVTYPES, COMM, REQUEST, IERROR\fP) - - \fISENDBUF(*), RECVBUF(*)\fP - INTEGER \fISENDCOUNTS(*), SENDTYPES(*)\fP - INTEGER \fIRECVCOUNTS(*), RECVTYPES(*)\fP - INTEGER(KIND=MPI_ADDRESS_KIND) \fISDISPLS(*), RDISPLS(*)\fP - INTEGER \fICOMM, REQUEST, IERROR\fP - -MPI_NEIGHBOR_ALLTOALLW_INIT(\fISENDBUF, SENDCOUNTS, SDISPLS, SENDTYPES, - RECVBUF, RECVCOUNTS, RDISPLS, RECVTYPES, COMM, INFO, REQUEST, IERROR\fP) - - \fISENDBUF(*), RECVBUF(*)\fP - INTEGER \fISENDCOUNTS(*), SENDTYPES(*)\fP - INTEGER \fIRECVCOUNTS(*), RECVTYPES(*)\fP - INTEGER(KIND=MPI_ADDRESS_KIND) \fISDISPLS(*), RDISPLS(*)\fP - INTEGER \fICOMM, INFO, REQUEST, IERROR\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Neighbor_alltoallw(\fIsendbuf\fP, \fIsendcounts\fP, \fIsdispls\fP, \fIsendtypes\fP, \fIrecvbuf\fP, - \fIrecvcounts\fP, \fIrdispls\fP, \fIrecvtypes\fP, \fIcomm\fP, \fIierror\fP) - - TYPE(*), DIMENSION(..), INTENT(IN) :: \fIsendbuf\fP - TYPE(*), DIMENSION(..) :: \fIrecvbuf\fP - INTEGER, INTENT(IN) :: \fIsendcounts(*)\fP, \fIrecvcounts(*)\fP - INTEGER(KIND=MPI_ADDRESS_KIND), INTENT(IN) :: \fIsdispls(*)\fP, \fIrdispls(*)\fP - TYPE(MPI_Datatype), INTENT(IN) :: \fIsendtypes(*)\fP, \fIrecvtypes(*)\fP - TYPE(MPI_Comm), INTENT(IN) :: \fIcomm\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -MPI_Ineighbor_alltoallw(\fIsendbuf\fP, \fIsendcounts\fP, \fIsdispls\fP, \fIsendtypes\fP, \fIrecvbuf\fP, - \fIrecvcounts\fP, \fIrdispls\fP, \fIrecvtypes\fP, \fIcomm\fP, \fIrequest\fP, \fIierror\fP) - - TYPE(*), DIMENSION(..), INTENT(IN), ASYNCHRONOUS :: \fIsendbuf\fP - TYPE(*), DIMENSION(..), ASYNCHRONOUS :: \fIrecvbuf\fP - INTEGER, INTENT(IN), ASYNCHRONOUS :: \fIsendcounts(*)\fP, \fIrecvcounts(*)\fP - INTEGER(KIND=MPI_ADDRESS_KIND), INTENT(IN), ASYNCHRONOUS :: - \fIsdispls(*)\fP, \fIrdispls(*)\fP - TYPE(MPI_Datatype), INTENT(IN), ASYNCHRONOUS :: \fIsendtypes(*),\fP - \fIrecvtypes(*)\fP - TYPE(MPI_Comm), INTENT(IN) :: \fIcomm\fP - TYPE(MPI_Request), INTENT(OUT) :: \fIrequest\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -MPI_Neighbor_alltoallw_init(\fIsendbuf\fP, \fIsendcounts\fP, \fIsdispls\fP, \fIsendtypes\fP, \fIrecvbuf\fP, - \fIrecvcounts\fP, \fIrdispls\fP, \fIrecvtypes\fP, \fIcomm\fP, \fIinfo\fP, \fIrequest\fP, \fIierror\fP) - - TYPE(*), DIMENSION(..), INTENT(IN), ASYNCHRONOUS :: \fIsendbuf\fP - TYPE(*), DIMENSION(..), ASYNCHRONOUS :: \fIrecvbuf\fP - INTEGER, INTENT(IN), ASYNCHRONOUS :: \fIsendcounts(*)\fP, \fIrecvcounts(*)\fP - INTEGER(KIND=MPI_ADDRESS_KIND), INTENT(IN), ASYNCHRONOUS :: - \fIsdispls(*)\fP, \fIrdispls(*)\fP - TYPE(MPI_Datatype), INTENT(IN), ASYNCHRONOUS :: \fIsendtypes(*),\fP - \fIrecvtypes(*)\fP - TYPE(MPI_Comm), INTENT(IN) :: \fIcomm\fP - TYPE(MPI_Info), INTENT(IN) :: \fIinfo\fP - TYPE(MPI_Request), INTENT(OUT) :: \fIrequest\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETERS -.ft R -.TP 1.2i -sendbuf -Starting address of send buffer. -.TP 1.2i -sendcounts -Integer array, where entry i specifies the number of elements to send -to neighbor i. -.TP 1.2i -sdispls -Integer array, where entry i specifies the displacement (in bytes, -offset from \fIsendbuf\fP) from which to send data to neighbor i. -.TP 1.2i -sendtypes -Datatype array, where entry i specifies the datatype to use when -sending data to neighbor i. -.TP 1.2i -recvcounts -Integer array, where entry j specifies the number of elements to -receive from neighbor j. -.TP 1.2i -rdispls -Integer array, where entry j specifies the displacement (in bytes, -offset from \fIrecvbuf\fP) to which data from neighbor j should -be written. -.TP 1.2i -recvtypes -Datatype array, where entry j specifies the datatype to use when -receiving data from neighbor j. -.TP 1.2i -comm -Communicator over which data is to be exchanged. -.TP 1.2i -info -Info (handle, persistent only). - -.SH OUTPUT PARAMETERS -.ft R -.TP 1.2i -recvbuf -Address of receive buffer. -.TP 1i -request -Request (handle, non-blocking only). -.ft R -.TP 1.2i -IERROR -Fortran only: Error status. - -.SH DESCRIPTION -.ft R -MPI_Neighbor_alltoallw is a generalized collective operation in which all -processes send data to and receive data from all neighbors. It -adds flexibility to MPI_Neighbor_alltoallv by allowing the user to specify the -datatype of individual data blocks (in addition to displacement and -element count). Its operation can be thought of in the following way, -where each process performs 2n (n being the number of neighbors in -the topology of communicator \fIcomm\fP) independent point-to-point communications. -The neighbors and buffer layout are determined by the topology of \fIcomm\fP. -.sp -.nf - MPI_Cart_get(\fIcomm\fP, maxdims, dims, periods, coords); - for (dim = 0, i = 0 ; dim < dims ; ++dim) { - MPI_Cart_shift(\fIcomm\fP, dim, 1, &r0, &r1); - MPI_Isend(\fIsendbuf\fP + \fIsdispls\fP[i] * extent(\fIsendtype\fP), - \fIsendcount\fP, \fIsendtypes\fP[i], r0, ..., \fIcomm\fP, ...); - MPI_Irecv(\fIrecvbuf\fP + \fIrdispls\fP[i] * extent(\fIrecvtype\fP), - \fIrecvcount\fP, \fIrecvtypes\fP[i], r0, ..., \fIcomm\fP, ...); - ++i; - MPI_Isend(\fIsendbuf\fP + \fIsdispls\fP[i] * extent(\fIsendtype\fP), - \fIsendcount\fP, \fIsendtypes\fP[i], r1, ..., \fIcomm\fP, &req[i]); - MPI_Irecv(\fIrecvbuf\fP + \fIrdispls\fP[i] * extent(\fIrecvtype\fP), - \fIrecvcount\fP, \fIrecvtypes\fP[i], r1, ..., \fIcomm\fP, ...); - ++i; - } - - MPI_Wait_all (...); - - MPI_Comm_size(\fIcomm\fP, &n); - for (i = 0, i < n; i++) - MPI_Send(\fIsendbuf\fP + \fIsdispls\fP[i], \fIsendcounts\fP[i], - \fIsendtypes\fP[i], i, ..., \fIcomm\fP); - for (i = 0, i < n; i++) - MPI_Recv(\fIrecvbuf\fP + \fIrdispls\fP[i], \fIrecvcounts\fP[i], - \fIrecvtypes\fP[i], i, ..., \fIcomm\fP); -.fi -.sp -Process j sends the k-th block of its local \fIsendbuf\fP to neighbor -k, which places the data in the j-th block of its local -\fIrecvbuf\fP. -.sp -When a pair of processes exchanges data, each may pass different -element count and datatype arguments so long as the sender specifies -the same amount of data to send (in bytes) as the receiver expects -to receive. -.sp -Note that process i may send a different amount of data to process j -than it receives from process j. Also, a process may send entirely -different amounts and types of data to different processes in the -communicator. - -.sp -.SH NOTES -.ft R -The MPI_IN_PLACE option for \fIsendbuf\fP is not meaningful for this operation -.sp -The specification of counts, types, and displacements should not cause -any location to be written more than once. -.sp -All arguments on all processes are significant. The \fIcomm\fP argument, -in particular, must describe the same communicator on all processes. -.sp -The offsets of \fIsdispls\fP and \fIrdispls\fP are measured in bytes. -Compare this to MPI_Neighbor_alltoallv, where these offsets are measured in units -of \fIsendtype\fP and \fIrecvtype\fP, respectively. - -.SH ERRORS -.ft R -Almost all MPI routines return an error value; C routines as -the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for -I/O function errors. The error handler may be changed with -MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN -may be used to cause error values to be returned. Note that MPI does not -guarantee that an MPI program can continue past an error. - -.SH SEE ALSO -.ft R -.nf -MPI_Neighbor_alltoall -MPI_Neighbor_alltoallv -MPI_Cart_create -MPI_Graph_create -MPI_Dist_graph_create - diff --git a/ompi/mpi/man/man3/MPI_Neighbor_alltoallw_init.3in b/ompi/mpi/man/man3/MPI_Neighbor_alltoallw_init.3in deleted file mode 100644 index 299138b3a37..00000000000 --- a/ompi/mpi/man/man3/MPI_Neighbor_alltoallw_init.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/MPI_Neighbor_alltoallw.3 diff --git a/ompi/mpi/man/man3/MPI_Op_c2f.3in b/ompi/mpi/man/man3/MPI_Op_c2f.3in deleted file mode 100644 index a13fce697dd..00000000000 --- a/ompi/mpi/man/man3/MPI_Op_c2f.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/MPI_Comm_f2c.3 diff --git a/ompi/mpi/man/man3/MPI_Op_commutative.3in b/ompi/mpi/man/man3/MPI_Op_commutative.3in deleted file mode 100644 index 84599cc49a2..00000000000 --- a/ompi/mpi/man/man3/MPI_Op_commutative.3in +++ /dev/null @@ -1,63 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2015 FUJITSU LIMITED. All rights reserved. -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Op_commutative 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Op_commutative\fP \- Query of commutativity of reduction operation. - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -int MPI_Op_commutative(MPI_Op \fIop\fP, int *\fIcommute\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_OP_COMMUTATIVE(\fIOP, COMMUTE, IERROR\fP) - LOGICAL \fICOMMUTE\fP - INTEGER \fIOP, IERROR\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Op_commutative(\fIop\fP, \fIcommute\fP, \fIierror\fP) - TYPE(MPI_Op), INTENT(IN) :: \fIop\fP - INTEGER, INTENT(OUT) :: \fIcommute\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETER -.TP 1i -op -Operation (handle). - -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -commute -True if op is commutative, false otherwise (logical). -.ft R -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -Reduction operations can be queried for their commutativity. - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for I/O function errors. The error handler may be changed with MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN may be used to cause error values to be returned. Note that MPI does not guarantee that an MPI program can continue past an error. - -.SH SEE ALSO -.sp -MPI_Op_create - diff --git a/ompi/mpi/man/man3/MPI_Op_create.3in b/ompi/mpi/man/man3/MPI_Op_create.3in deleted file mode 100644 index 05d3768e2e6..00000000000 --- a/ompi/mpi/man/man3/MPI_Op_create.3in +++ /dev/null @@ -1,190 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Op_create 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Op_create\fP \- Creates a user-defined combination function handle. - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -int MPI_Op_create(MPI_User_function *\fIfunction\fP, int\fI commute\fP, - MPI_Op *\fIop\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_OP_CREATE(\fIFUNCTION, COMMUTE, OP, IERROR\fP) - EXTERNAL \fIFUNCTION\fP - LOGICAL \fICOMMUTE\fP - INTEGER \fIOP, IERROR\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Op_create(\fIuser_fn\fP, \fIcommute\fP, \fIop\fP, \fIierror\fP) - PROCEDURE(MPI_User_function) :: \fIuser_fn\fP - LOGICAL, INTENT(IN) :: \fIcommute\fP - TYPE(MPI_Op), INTENT(OUT) :: \fIop\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETERS -.ft R -.TP 1i -function -User-defined function (function). -.TP 1i -commute -True if commutative; false otherwise. - -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -op -Operation (handle). -.ft R -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -MPI_Op_create binds a user-defined global operation to an op handle that can subsequently be used in MPI_Reduce, MPI_Allreduce, MPI_Reduce_scatter, and MPI_Scan. The user-defined operation is assumed to be associative. If commute = true, then the operation should be both commutative and associative. If commute = false, then the order of operands is fixed and is defined to be in ascending, process rank order, beginning with process zero. The order of evaluation can be changed, taking advantage of the associativity of the operation. If commute = true then the order of evaluation can be changed, taking advantage of commutativity and associativity. -.sp -\fIfunction\fP is the user-defined function, which must have the following four arguments: invec, inoutvec, len, and datatype. - -.sp -The ANSI-C prototype for the function is the following: -.sp -.nf - typedef void MPI_User_function(void *invec, void *inoutvec, - int *len, - MPI_Datatype *datatype); -.fi -.sp -The Fortran declaration of the user-defined function appears below. -.sp -.nf - FUNCTION USER_FUNCTION( INVEC(*), INOUTVEC(*), LEN, TYPE) - INVEC(LEN), INOUTVEC(LEN) - INTEGER LEN, TYPE -.fi -.sp -The datatype argument is a handle to the data type that was passed into the -call to MPI_Reduce. The user reduce function should be written such that -the following holds: Let u[0],\ ...,\ u[len-1] be the len elements in the communication buffer described by the arguments invec, len, and datatype when the function is invoked; let v[0],\ ...,\ v[len-1] be len elements in the communication buffer described by the arguments inoutvec, len, and datatype when the function is invoked; let w[0],\ ...,\ w[len-1] be len elements in the communication buffer described by the arguments inoutvec, len, and datatype when the function returns; then w[i] = u[i] o v[i], for i=0\ ,...,\ len-1, where o is the reduce operation that the function computes. -.sp -Informally, we can think of invec and inoutvec as arrays of len elements -that function is combining. The result of the reduction over-writes values -in inoutvec, hence the name. Each invocation of the function results in the -pointwise evaluation of the reduce operator on len elements: i.e, the -function returns in inoutvec[i] the value invec[i] o inoutvec[i], for i = -0\,...,\ count-1, where o is the combining operation computed by the function. -.sp -By internally comparing the value of the datatype argument to known, global handles, it is possible to overload the use of a single user-defined function for several different data types. -.sp -General datatypes may be passed to the user function. However, use of datatypes that are not contiguous is likely to lead to inefficiencies. -.sp -No MPI communication function may be called inside the user function. -MPI_Abort may be called inside the function in case of an error. - -.SH NOTES -Suppose one defines a library of user-defined reduce -functions that are overloaded: The datatype argument is used to select the right execution path at each invocation, according to the types of the operands. The user-defined reduce function cannot "decode" the datatype argument that it is passed, and cannot identify, by itself, the correspondence between the datatype handles and the datatype they represent. This correspondence was established when the datatypes were created. Before the library is used, a library initialization preamble must be executed. This preamble code will define the datatypes that are used by the library and store handles to these datatypes in global, static variables that are shared by the user code and the library code. - -\fBExample:\fP Example of user-defined reduce: -.sp -Compute the product of an array of complex numbers, in C. -.sp -.nf - typedef struct { - double real,imag; - } Complex; - - /* the user-defined function - */ - void myProd( Complex *in, Complex *inout, int *len, - MPI_Datatype *dptr ) - { - int i; - Complex c; - - for (i=0; i< *len; ++i) { - c.real = inout->real*in->real - - inout->imag*in->imag; - c.imag = inout->real*in->imag + - inout->imag*in->real; - *inout = c; - in++; inout++; - } - } - - /* and, to call it\&... - */ - \&... - - /* each process has an array of 100 Complexes - */ - Complex a[100], answer[100]; - MPI_Op myOp; - MPI_Datatype ctype; - - /* explain to MPI how type Complex is defined - */ - MPI_Type_contiguous( 2, MPI_DOUBLE, &ctype ); - MPI_Type_commit( &ctype ); - /* create the complex-product user-op - */ - MPI_Op_create( myProd, True, &myOp ); - - MPI_Reduce( a, answer, 100, ctype, myOp, root, comm ); - - /* At this point, the answer, which consists of 100 Complexes, - * resides on process root - */ -.fi -.sp -The Fortran version of MPI_Reduce will invoke a user-defined reduce function using the Fortran calling conventions and will pass a Fortran-type datatype argument; the C version will use C calling convention and the C representation of a datatype handle. Users who plan to mix languages should define their reduction functions accordingly. - -.SH NOTES ON COLLECTIVE OPERATIONS - -The reduction functions ( -.I MPI_Op -) do not return an error value. As a result, -if the functions detect an error, all they can do is either call -.I MPI_Abort -or silently skip the problem. Thus, if you change the error handler from -.I MPI_ERRORS_ARE_FATAL -to something else, for example, -.I MPI_ERRORS_RETURN -, -then no error may be indicated. - -The reason for this is the performance problems in ensuring that -all collective routines return the same error value. - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for I/O function errors. The error handler may be changed with MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN may be used to cause error values to be returned. Note that MPI does not guarantee that an MPI program can continue past an error. - -.SH SEE ALSO -.ft R -.sp -.nf -MPI_Reduce -MPI_Reduce_scatter -MPI_Allreduce -MPI_Scan -MPI_Op_free - diff --git a/ompi/mpi/man/man3/MPI_Op_f2c.3in b/ompi/mpi/man/man3/MPI_Op_f2c.3in deleted file mode 100644 index a13fce697dd..00000000000 --- a/ompi/mpi/man/man3/MPI_Op_f2c.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/MPI_Comm_f2c.3 diff --git a/ompi/mpi/man/man3/MPI_Op_free.3in b/ompi/mpi/man/man3/MPI_Op_free.3in deleted file mode 100644 index d4e436428e7..00000000000 --- a/ompi/mpi/man/man3/MPI_Op_free.3in +++ /dev/null @@ -1,67 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Op_free 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Op_free\fP \- Frees a user-defined combination function handle. - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -int MPI_Op_free(MPI_Op *\fIop\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_OP_FREE(\fIOP, IERROR\fP) - INTEGER \fIOP, IERROR\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Op_free(\fIop\fP, \fIierror\fP) - TYPE(MPI_Op), INTENT(INOUT) :: \fIop\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT/OUTPUT PARAMETER -.TP 1i -op -Operation (handle). - -.SH OUTPUT PARAMETER -.ft R -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -Marks a user-defined reduction operation for deallocation and sets \fIop\fP to MPI_OP_NULL. - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for I/O function errors. The error handler may be changed with MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN may be used to cause error values to be returned. Note that MPI does not guarantee that an MPI program can continue past an error. - -.SH SEE ALSO -.sp -MPI_Op_create -.br -MPI_Reduce -.br -MPI_Allreduce -.br -MPI_Reduce_scatter -.br -MPI_Scan - diff --git a/ompi/mpi/man/man3/MPI_Open_port.3in b/ompi/mpi/man/man3/MPI_Open_port.3in deleted file mode 100644 index 8a334384bf4..00000000000 --- a/ompi/mpi/man/man3/MPI_Open_port.3in +++ /dev/null @@ -1,71 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2007-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Open_port 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Open_port\fP \- Establishes a network address for a server to accept connections from clients. - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -int MPI_Open_port(MPI_Info \fIinfo\fP, char *\fIport_name\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_OPEN_PORT(\fIINFO, PORT_NAME, IERROR\fP) - CHARACTER*(*) \fIPORT_NAME\fP - INTEGER \fIINFO, IERROR\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Open_port(\fIinfo\fP, \fIport_name\fP, \fIierror\fP) - TYPE(MPI_Info), INTENT(IN) :: \fIinfo\fP - CHARACTER(LEN=MPI_MAX_PORT_NAME), INTENT(OUT) :: \fIport_name\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETER -.ft R -.TP 1i -info -Options on how to establish an address (handle). No options currently supported. - -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -port_name -Newly established port (string). -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -MPI_Open_port establishes a network address, encoded in the \fIport_name\fP string, at which the server will be able to accept connections from clients. \fIport_name\fP is supplied by the system. -.sp -MPI copies a system-supplied port name into \fIport_name\fP. \fIport_name\fP identifies the newly opened port and can be used by a client to contact the server. The maximum size string that may be supplied by the system is MPI_MAX_PORT_NAME. - -.SH SUPPORTED INFO KEYS -None. - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for I/O function errors. The error handler may be changed with MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN may be used to cause error values to be returned. Note that MPI does not guarantee that an MPI program can continue past an error. - -.SH SEE ALSO -MPI_Comm_accept -MPI_Comm_connect -.br - diff --git a/ompi/mpi/man/man3/MPI_Pack.3in b/ompi/mpi/man/man3/MPI_Pack.3in deleted file mode 100644 index ac1a0fb5004..00000000000 --- a/ompi/mpi/man/man3/MPI_Pack.3in +++ /dev/null @@ -1,119 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2013 Los Alamos National Security, LLC. All rights reserved. -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Pack 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Pack\fP \- Packs data of a given datatype into contiguous memory. - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -int MPI_Pack(const void *\fIinbuf\fP, int\fI incount\fP, MPI_Datatype\fI datatype\fP, - void\fI *outbuf\fP, int\fI outsize\fP, int\fI *position\fP, MPI_Comm\fI comm\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_PACK(\fIINBUF, INCOUNT, DATATYPE, OUTBUF,OUTSIZE, POSITION, - COMM, IERROR\fP) - \fIINBUF(*), OUTBUF(*)\fP - INTEGER \fIINCOUNT, DATATYPE, OUTSIZE, POSITION, COMM, IERROR\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Pack(\fIinbuf\fP, \fIincount\fP, \fIdatatype\fP, \fIoutbuf\fP, \fIoutsize\fP, \fIposition\fP, \fIcomm\fP, \fIierror\fP) - TYPE(*), DIMENSION(..), INTENT(IN) :: \fIinbuf\fP - TYPE(*), DIMENSION(..) :: \fIoutbuf\fP - INTEGER, INTENT(IN) :: \fIincount\fP, \fIoutsize\fP - TYPE(MPI_Datatype), INTENT(IN) :: \fIdatatype\fP - INTEGER, INTENT(INOUT) :: \fIposition\fP - TYPE(MPI_Comm), INTENT(IN) :: \fIcomm\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETERS -.ft R -.TP 1i -inbuf -Input buffer start (choice). -.TP 1i -incount -Number of input data items (integer). -.TP 1i -datatype -Datatype of each input data item (handle). -.TP 1i -outsize -Output buffer size, in bytes (integer). -.TP 1i -comm -Communicator for packed message (handle). - -.SH INPUT/OUTPUT PARAMETER -.ft R -.TP 1i -position -Current position in buffer, in bytes (integer). - -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -outbuf -Output buffer start (choice). -.ft R -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -Packs the message in the send buffer specified by \fIinbuf\fP, \fIincount\fP, \fIdatatype\fP into the buffer space specified by \fIoutbuf\fP and \fIoutsize\fP. The input buffer can be any communication buffer allowed in MPI_Send. The output buffer is a contiguous storage area containing \fIoutsize\fP bytes, starting at the address \fIoutbuf\fP (length is counted in bytes, not elements, as if it were a communication buffer for a message of type MPI_Packed). -.sp -The input value of \fIposition\fP is the first location in the output buffer to be used for packing. \fIposition\fP is incremented by the size of the packed message, and the output value of \fIposition\fP is the first location in the output buffer following the locations occupied by the packed message. The \fIcomm\fP argument is the communicator that will be subsequently used for sending the packed message. -.sp -\fBExample:\fP An example using MPI_Pack: -.sp -.nf - int position, i, j, a[2]; - char buff[1000]; - - \&.... - - MPI_Comm_rank(MPI_COMM_WORLD, &myrank); - if (myrank == 0) - { - / * SENDER CODE */ - - position = 0; - MPI_Pack(&i, 1, MPI_INT, buff, 1000, &position, MPI_COMM_WORLD); - MPI_Pack(&j, 1, MPI_INT, buff, 1000, &position, MPI_COMM_WORLD); - MPI_Send( buff, position, MPI_PACKED, 1, 0, MPI_COMM_WORLD); - } - else /* RECEIVER CODE */ - MPI_Recv( a, 2, MPI_INT, 0, 0, MPI_COMM_WORLD) - - } - -.fi -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for I/O function errors. The error handler may be changed with MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN may be used to cause error values to be returned. Note that MPI does not guarantee that an MPI program can continue past an error. - -.SH SEE ALSO -.ft R -MPI_Unpack -.br -MPI_Pack_size - diff --git a/ompi/mpi/man/man3/MPI_Pack_external.3in b/ompi/mpi/man/man3/MPI_Pack_external.3in deleted file mode 100644 index d785c87c3ae..00000000000 --- a/ompi/mpi/man/man3/MPI_Pack_external.3in +++ /dev/null @@ -1,205 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2013 Los Alamos National Security, LLC. All rights reserved. -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Pack_external 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" - -.SH NAME -\fBMPI_Pack_external\fP \- Writes data to a portable format - -.SH SYNTAX -.ft R - -.SH C Syntax -.nf -#include -int MPI_Pack_external(const char *\fIdatarep\fP, const void *\fIinbuf\fP, - int \fIincount\fP, MPI_Datatype\fI datatype\fP, - void *\fIoutbuf\fP, MPI_Aint \fIoutsize\fP, - MPI_Aint *\fIposition\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_PACK_EXTERNAL(\fIDATAREP, INBUF, INCOUNT, DATATYPE, - OUTBUF, OUTSIZE, POSITION, IERROR\fP) - - INTEGER \fIINCOUNT, DATATYPE, IERROR\fP - INTEGER(KIND=MPI_ADDRESS_KIND) \fIOUTSIZE, POSITION\fP - CHARACTER*(*) \fIDATAREP\fP - \fIINBUF(*), OUTBUF(*)\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Pack_external(\fIdatarep\fP, \fIinbuf\fP, \fIincount\fP, \fIdatatype\fP, \fIoutbuf\fP, \fIoutsize\fP, - \fIposition\fP, \fIierror\fP) - CHARACTER(LEN=*), INTENT(IN) :: \fIdatarep\fP - TYPE(*), DIMENSION(..), INTENT(IN) :: \fIinbuf\fP - TYPE(*), DIMENSION(..) :: \fIoutbuf\fP - INTEGER, INTENT(IN) :: \fIincount\fP - TYPE(MPI_Datatype), INTENT(IN) :: \fIdatatype\fP - INTEGER(KIND=MPI_ADDRESS_KIND), INTENT(IN) :: \fIoutsize\fP - INTEGER(KIND=MPI_ADDRESS_KIND), INTENT(INOUT) :: \fIposition\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETERS -.ft R -.TP 1i -datarep -Data representation (string). -.ft R -.TP 1i -inbuf -Input buffer start (choice). -.TP 1i -incount -Number of input data items (integer). -.TP 1i -datatype -Datatype of each input data item (handle). -.TP 1i -outsize -Output buffer size, in bytes (integer). - -.SH INPUT/OUTPUT PARAMETER -.ft R -.TP 1i -position -Current position in buffer, in bytes (integer). - -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -outbuf -Output buffer start (choice). -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -MPI_Pack_external packs data into the external32 format, a universal -data representation defined by the MPI Forum. This format is useful -for exchanging data between MPI implementations, or when writing data -to a file. -.sp -The input buffer is specified by \fIinbuf\fP, \fIincount\fP and -\fIdatatype\fP, and may be any communication buffer allowed in -MPI_Send. The output buffer \fIoutbuf\fP must be a contiguous storage -area containing \fIoutsize\fP bytes. -.sp -The input value of \fIposition\fP is the first position in -\fIoutbuf\fP to be used for packing (measured in bytes, not elements, -relative to the start of the buffer). When the function returns, -\fIposition\fP is incremented by the size of the packed message, so -that it points to the first location in \fIoutbuf\fP following the -packed message. This way it may be used as input to a subsequent call -to MPI_Pack_external. -.sp - -\fBExample:\fP An example using MPI_Pack_external: -.sp -.nf - int position, i; - double msg[5]; - char buf[1000]; - - \&... - - MPI_Comm_rank(MPI_COMM_WORLD, &myrank); - if (myrank == 0) { /* SENDER CODE */ - position = 0; - i = 5; /* number of doubles in msg[] */ - MPI_Pack_external("external32", &i, 1, MPI_INT, - buf, 1000, &position); - MPI_Pack_external("external32", &msg, i, MPI_DOUBLE, - buf, 1000, &position); - MPI_Send(buf, position, MPI_BYTE, 1, 0, - MPI_COMM_WORLD); - } else { /* RECEIVER CODE */ - MPI_Recv(buf, 1, MPI_BYTE, 0, 0, MPI_COMM_WORLD, - MPI_STATUS_IGNORE); - MPI_Unpack_external("external32", buf, 1000, - MPI_INT, &i, 1, &position); - MPI_Unpack_external("external32", buf, 1000, - MPI_DOUBLE, &msg, i, &position); - } - -.fi -.SH NOTES -.ft R -The \fIdatarep\fP argument specifies the data format. The only valid -value in the current version of MPI is "external32". The argument is -provided for future extensibility. -.sp -To understand the behavior of pack and unpack, it is convenient to -think of the data part of a message as being the sequence obtained by -concatenating the successive values sent in that message. The pack -operation stores this sequence in the buffer space, as if sending the -message to that buffer. The unpack operation retrieves this sequence -from buffer space, as if receiving a message from that buffer. (It is -helpful to think of internal Fortran files or sscanf in C for a -similar function.) -.sp -Several messages can be successively packed into one packing -unit. This is effected by several successive related calls to -MPI_Pack_external, where the first call provides \fIposition\fP=0, -and each successive call inputs the value of \fIposition\fP that was -output by the previous call, along with the same values for -\fIoutbuf\fP and \fIoutcount\fP. This packing unit now contains the -equivalent information that would have been stored in a message by one -send call with a send buffer that is the "concatenation" of the -individual send buffers. -.sp -A packing unit can be sent using type MPI_BYTE. Any point-to-point -or collective communication function can be used to move the sequence -of bytes that forms the packing unit from one process to another. This -packing unit can now be received using any receive operation, with any -datatype. (The type-matching rules are relaxed for messages sent with -type MPI_BYTE.) -.sp -A packing unit can be unpacked into several successive messages. This -is effected by several successive related calls to -MPI_Unpack_external, where the first call provides \fIposition\fP=0, -and each successive call inputs the value of position that was output -by the previous call, and the same values for \fIinbuf\fP and -\fIinsize\fP. -.sp -The concatenation of two packing units is not necessarily a packing -unit; nor is a substring of a packing unit necessarily a packing -unit. Thus, one cannot concatenate two packing units and then unpack -the result as one packing unit; nor can one unpack a substring of a -packing unit as a separate packing unit. Each packing unit that was -created by a related sequence of pack calls must be unpacked as a unit -by a sequence of related unpack calls. - -.SH ERRORS -.ft R -Almost all MPI routines return an error value; C routines as -the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for -I/O function errors. The error handler may be changed with -MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN -may be used to cause error values to be returned. Note that MPI does not -guarantee that an MPI program can continue past an error. -.sp -See the MPI man page for a full list of MPI error codes. - -.SH SEE ALSO -.ft R -.nf -MPI_Pack_external_size -MPI_Send -MPI_Unpack_external -sscanf(3C) - diff --git a/ompi/mpi/man/man3/MPI_Pack_external_size.3in b/ompi/mpi/man/man3/MPI_Pack_external_size.3in deleted file mode 100644 index 8d67ddeb66b..00000000000 --- a/ompi/mpi/man/man3/MPI_Pack_external_size.3in +++ /dev/null @@ -1,105 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Pack_external_size 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" - -.SH NAME -\fBMPI_Pack_external_size\fP \- Calculates upper bound on space needed -to write to a portable format - -.SH SYNTAX -.ft R - -.SH C Syntax -.nf -#include -int MPI_Pack_external_size(char *\fIdatarep\fP, int \fIincount\fP, - MPI_Datatype \fIdatatype\fP, MPI_Aint *\fIsize\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_PACK_EXTERNAL_SIZE(\fIDATAREP, INCOUNT, DATATYPE, SIZE, IERROR\fP) - - INTEGER \fIINCOUNT, DATATYPE, IERROR\fP - INTEGER(KIND=MPI_ADDRESS_KIND) \fISIZE\fP - CHARACTER*(*) \fIDATAREP\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Pack_external_size(\fIdatarep\fP, \fIincount\fP, \fIdatatype\fP, \fIsize\fP, \fIierror\fP) - TYPE(MPI_Datatype), INTENT(IN) :: \fIdatatype\fP - INTEGER, INTENT(IN) :: \fIincount\fP - CHARACTER(LEN=*), INTENT(IN) :: \fIdatarep\fP - INTEGER(KIND=MPI_ADDRESS_KIND), INTENT(OUT) :: \fIsize\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETERS -.ft R -.TP 1i -datarep -Data representation (string). -.TP 1i -incount -Number of input data items (integer). -.TP 1i -datatype -Datatype of each input data item (handle). - -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -size -Upper bound on size of packed message, in bytes (integer). -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -MPI_Pack_external_size allows the application to find out how much -space is needed to pack a message in the portable format defined by -the MPI Forum. It returns in \fIsize\fP an upper bound on the -increment in \fIposition\fP that would occur in a call to -MPI_Pack_external with the same values for \fIdatarep\fP, -\fIincount\fP, and \fIdatatype\fP. -.sp -The call returns an upper bound, rather than an exact bound, as the -exact amount of space needed to pack the message may depend on context -and alignment (e.g., the first message packed in a packing unit may -take more space). - -.SH NOTES -.ft R -The \fIdatarep\fP argument specifies the data format. The only valid -value in the current version of MPI is "external32". The argument is -provided for future extensibility. - -.SH ERRORS -.ft R -Almost all MPI routines return an error value; C routines as -the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for -I/O function errors. The error handler may be changed with -MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN -may be used to cause error values to be returned. Note that MPI does not -guarantee that an MPI program can continue past an error. -.sp -See the MPI man page for a full list of MPI error codes. - -.SH SEE ALSO -.ft R -.nf -MPI_Pack_external -MPI_Unpack_external - diff --git a/ompi/mpi/man/man3/MPI_Pack_size.3in b/ompi/mpi/man/man3/MPI_Pack_size.3in deleted file mode 100644 index 3abc9e435bf..00000000000 --- a/ompi/mpi/man/man3/MPI_Pack_size.3in +++ /dev/null @@ -1,81 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Pack_size 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Pack_size\fP \- Returns the upper bound on the amount of space needed to pack a message. - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -int MPI_Pack_size(int \fIincount\fP, MPI_Datatype\fI datatype\fP, MPI_Comm\fI comm\fP, - int\fI *size\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_PACK_SIZE(\fIINCOUNT, DATATYPE, COMM, SIZE, IERROR\fP) - INTEGER \fIINCOUNT, DATATYPE, COMM, SIZE, IERROR\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Pack_size(\fIincount\fP, \fIdatatype\fP, \fIcomm\fP, \fIsize\fP, \fIierror\fP) - INTEGER, INTENT(IN) :: \fIincount\fP - TYPE(MPI_Datatype), INTENT(IN) :: \fIdatatype\fP - TYPE(MPI_Comm), INTENT(IN) :: \fIcomm\fP - INTEGER, INTENT(OUT) :: \fIsize\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETERS -.ft R -.TP 1i -incount -Count argument to packing call (integer). -.TP 1i -datatype -Datatype argument to packing call (handle). -.TP 1i -comm -Communicator argument to packing call (handle). - -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -size -Upper bound on size of packed message, in bytes (integer). -.ft R -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -MPI_Pack_size allows the application to find out how much space is needed to pack a message. A call to MPI_Pack_size(incount, datatype, comm, size) returns in size an -upper bound on the increment in position that would occur in a call to MPI_Pack, with the same values for \fIincount\fP, \fIdatatype\fP, and \fIcomm\fP. -.sp -\fBRationale:\fP The call returns an upper bound, rather than an exact bound, since the exact amount of space needed to pack the message may depend on the context (e.g., first message packed in a packing unit may take more space). - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for I/O function errors. The error handler may be changed with MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN may be used to cause error values to be returned. Note that MPI does not guarantee that an MPI program can continue past an error. - -.SH SEE ALSO -.ft R -.sp -MPI_Pack -.br -MPI_Unpack - - diff --git a/ompi/mpi/man/man3/MPI_Parrived.3in b/ompi/mpi/man/man3/MPI_Parrived.3in deleted file mode 100644 index a7ffa129b0b..00000000000 --- a/ompi/mpi/man/man3/MPI_Parrived.3in +++ /dev/null @@ -1,66 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2013 Los Alamos National Security, LLC. All rights reserved. -.\" Copyright (c) 2010-2015 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" Copyright (c) 2020 Sandia National Laboratories. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Parrived 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Parrived\fP \- Tests for completion of a specified receive-side partition. - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -int MPI_Parrived(MPI_Request\fI *request\fP, int\fI partition\fP, int\fI *flag\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_PARRIVED(REQUEST, PARTITION, FLAG IERROR\fP) - INTEGER \fIREQUEST, PARTITION, FLAG(*), IERROR\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Parrived(\fIrequest\fP, \fIpartition\fP, \fIflag\fP, \fIierror\fP) - TYPE(MPI_Request), INTENT(in) :: \fIrequest\fP - INTEGER, INTENT(IN) :: \fIpartition\fP - INTEGER, INTENT(out) :: \fIflag\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETERS -.ft R -.TP 1i -request -Communication request (handle). -.TP 1i -partition -The number of the partition to test for completion (integer). - -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -flag -True if partition is completed. -.ft R -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for I/O function errors. The error handler may be changed with MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN may be used to cause error values to be returned. Note that MPI does not guarantee that an MPI program can continue past an error. - -.SH SEE ALSO -MPI_Pready_list, MPI_Pready_range, MPI_Parrived -.br diff --git a/ompi/mpi/man/man3/MPI_Pcontrol.3in b/ompi/mpi/man/man3/MPI_Pcontrol.3in deleted file mode 100644 index 4a15f069c1d..00000000000 --- a/ompi/mpi/man/man3/MPI_Pcontrol.3in +++ /dev/null @@ -1,71 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Pcontrol 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Pcontrol\fP \- Controls profiling. - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -int MPI_Pcontrol(const int \fIlevel\fP, \&... ) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_PCONTROL(\fILEVEL\fP) - INTEGER \fILEVEL\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Pcontrol(\fIlevel\fP) - INTEGER, INTENT(IN) :: \fIlevel\fP - -.fi -.SH INPUT PARAMETER -.ft R -.TP 1i -level -Profiling level. - -.SH DESCRIPTION -.ft R -MPI libraries themselves make no use of this routine; they simply return immediately to the user code. However the presence of calls to this routine allows a profiling package to be explicitly called by the user. -.sp -Since MPI has no control of the implementation of the profiling code, we are unable to specify precisely the semantics that will be provided by calls to MPI_Pcontrol. This vagueness extends to the number of arguments to the function, and their datatypes. -.sp -However to provide some level of portability of user codes to different -profiling libraries, we request the following meanings for certain values of level: -.TP - o -level==0 Profiling is disabled. -.TP - o -level==1 Profiling is enabled at a normal default level of detail. -.TP - o -level==2 Profile buffers are flushed. (This may be a no-op in some -profilers). -.TP - o -All other values of level have profile library-defined effects and additional arguments. -.LP -.sp -We also request that the default state after MPI_Init has been called is for profiling to be enabled at the normal default level (i.e., as if MPI_Pcontrol had just been called with the argument 1). This allows users to link with a profiling library and obtain profile output without having to modify their source code at all. -.sp -The provision of MPI_Pcontrol as a no-op in the standard MPI library allows users to modify their source code to obtain more detailed profiling information, but still be able to link exactly the same code against the standard MPI library. - -.SH NOTES -.ft R -This routine provides a common interface for profiling control. The interpretation of level and any other arguments is left to the profiling library. -.sp -This function does not return an error value. Consequently, the result of calling it before MPI_Init or after MPI_Finalize is undefined. - diff --git a/ompi/mpi/man/man3/MPI_Pready.3in b/ompi/mpi/man/man3/MPI_Pready.3in deleted file mode 100644 index 609f3fba2c4..00000000000 --- a/ompi/mpi/man/man3/MPI_Pready.3in +++ /dev/null @@ -1,61 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2013 Los Alamos National Security, LLC. All rights reserved. -.\" Copyright (c) 2010-2015 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" Copyright (c) 2020 Sandia National Laboratories. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Pready 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Pready\fP \- Indicates that a given send-side partition is ready to be transferred. - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -int MPI_Pready(int\fI partition\fP, MPI_Request\fI *request\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_PREADY(PARTITION, REQUEST, IERROR\fP) - INTEGER \fIPARTITION, REQUEST, IERROR\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Pready(\fIpartition\fP, \fIrequest\fP, \fIierror\fP) - INTEGER, INTENT(IN) :: \fIpartition\fP - TYPE(MPI_Request), INTENT(IN) :: \fIrequest\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETERS -.ft R -.TP 1i -partition -The number of the partition to mark ready for transfer (integer). -.TP 1i -request -Communication request (handle). - -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for I/O function errors. The error handler may be changed with MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN may be used to cause error values to be returned. Note that MPI does not guarantee that an MPI program can continue past an error. - -.SH SEE ALSO -MPI_Pready_list, MPI_Pready_range, MPI_Parrived -.br diff --git a/ompi/mpi/man/man3/MPI_Pready_list.3in b/ompi/mpi/man/man3/MPI_Pready_list.3in deleted file mode 100644 index 44302fa6232..00000000000 --- a/ompi/mpi/man/man3/MPI_Pready_list.3in +++ /dev/null @@ -1,65 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2013 Los Alamos National Security, LLC. All rights reserved. -.\" Copyright (c) 2010-2015 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" Copyright (c) 2020 Sandia National Laboratories. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Pready_list 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Pready_list\fP \- Indicates that a list given send-side partitions are ready to be transferred. - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -int MPI_Pready_list(int\fI length\fP, int\fI *partitions\fP, MPI_Request\fI *request\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_PREADY_LIST(LENGTH, PARTITIONS, REQUEST, IERROR\fP) - INTEGER \fILENGTH, PARTITIONS(*), REQUEST, IERROR\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Pready_list(\fIlength\fP, \fIpartitions\fP, \fIrequest\fP, \fIierror\fP) - INTEGER, INTENT(IN) :: \fIlength\fP - INTEGER, INTENT(IN) :: \fIpartitions\fP - TYPE(MPI_Request), INTENT(IN) :: \fIrequest\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETERS -.ft R -.TP 1i -length -The length of the given partition array (integer). -.TP 1i -partitions -An array of numbers of partitions to mark ready for transfer (integer). -.TP 1i -request -Communication request (handle). - -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for I/O function errors. The error handler may be changed with MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN may be used to cause error values to be returned. Note that MPI does not guarantee that an MPI program can continue past an error. - -.SH SEE ALSO -MPI_Pready, MPI_Pready_range, MPI_Parrived -.br diff --git a/ompi/mpi/man/man3/MPI_Pready_range.3in b/ompi/mpi/man/man3/MPI_Pready_range.3in deleted file mode 100644 index d32b30141b7..00000000000 --- a/ompi/mpi/man/man3/MPI_Pready_range.3in +++ /dev/null @@ -1,64 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2013 Los Alamos National Security, LLC. All rights reserved. -.\" Copyright (c) 2010-2015 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" Copyright (c) 2020 Sandia National Laboratories. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Pready_range 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Pready_range\fP \- Indicates that a given range os send-side partitions are ready to be transferred. - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -int MPI_Pready_range(int\fI partition_low\fP, int\fI partition_high\fP, MPI_Request\fI *request\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_PREADY(PARTITION_LOW, PARTITION_HIGH, REQUEST, IERROR\fP) - INTEGER \fIPARTITION_LOW, PARTITION_HIGH, REQUEST, IERROR\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Pready(\fIpartition_low\fP, \fIpartition_high\fP, \fIrequest\fP, \fIierror\fP) - INTEGER, INTENT(IN) :: \fIpartition_low\fP, \fIpartition_high\fP - TYPE(MPI_Request), INTENT(IN) :: \fIrequest\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETERS -.ft R -.TP 1i -partition_low -The lowest of the range of partitions to mark ready for transfer (integer). -.TP 1i -partition_high -The highest of the range of partitions to mark ready for transfer (integer). -.TP 1i -request -Communication request (handle). - -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for I/O function errors. The error handler may be changed with MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN may be used to cause error values to be returned. Note that MPI does not guarantee that an MPI program can continue past an error. - -.SH SEE ALSO -MPI_Pready, MPI_Pready_list, MPI_Parrived -.br diff --git a/ompi/mpi/man/man3/MPI_Precv_init.3in b/ompi/mpi/man/man3/MPI_Precv_init.3in deleted file mode 100644 index 4db68db98af..00000000000 --- a/ompi/mpi/man/man3/MPI_Precv_init.3in +++ /dev/null @@ -1,89 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2013 Los Alamos National Security, LLC. All rights reserved. -.\" Copyright (c) 2010-2015 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" Copyright (c) 2020 Sandia National Laboratories. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Precv_init 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Precv_init\fP \- Initializes a partitioned receive. - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -int MPI_Precv_init(const void *\fIbuf\fP, int\fI partitions\fP, int\fI count\fP, MPI_Datatype\fI datatype\fP, int\fI dest\fP, - int\fI tag\fP, MPI_Comm\fI comm\fP, MPI_Request\fI *request\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_PRECV_INIT(\fIBUF, PARTITIONS, COUNT, DATATYPE, DEST, TAG, COMM, REQUEST, IERROR\fP) - \fIBUF\fP(*) - INTEGER \fIPARTITIONS, COUNT, DATATYPE, DEST, TAG, COMM, REQUEST, IERROR\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Precv_init(\fIbuf\fP, \fIpartitions\fP, \fIcount\fP, \fIdatatype\fP, \fIdest\fP, \fItag\fP, \fIcomm\fP, \fIrequest\fP, \fIierror\fP) - TYPE(*), DIMENSION(..), INTENT(IN), ASYNCHRONOUS :: \fIbuf\fP - INTEGER, INTENT(IN) :: \fIpartitions\fP, \fIcount\fP, \fIdest\fP, \fItag\fP - TYPE(MPI_Datatype), INTENT(IN) :: \fIdatatype\fP - TYPE(MPI_Comm), INTENT(IN) :: \fIcomm\fP - TYPE(MPI_Request), INTENT(OUT) :: \fIrequest\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETERS -.ft R -.TP 1i -buf -Initial address of receive buffer (choice). -.TP 1i -partitions -Number of partitions (integer). -.TP 1i -count -Number of elements to be received per partition (integer). -.TP 1i -datatype -Datatype of each element (handle). -.TP 1i -dest -Rank of source (integer). -.TP 1i -tag -Message tag (integer). -.TP 1i -comm -Communicator (handle). - -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -request -Communication request (handle). -.ft R -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for I/O function errors. The error handler may be changed with MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN may be used to cause error values to be returned. Note that MPI does not guarantee that an MPI program can continue past an error. - -.SH NOTE -.ft R -The current implementation is an early prototype and is not fully compliant with the MPI-4.0 specification. Specifically this function and it's counterpart (MPI_Psend_init) will block until the partitioned communication request is initialized on both ends. This behavior will be corrected in future versions. - -.SH SEE ALSO -MPI_Psend_init -.br diff --git a/ompi/mpi/man/man3/MPI_Probe.3in b/ompi/mpi/man/man3/MPI_Probe.3in deleted file mode 100644 index a7839a0c8af..00000000000 --- a/ompi/mpi/man/man3/MPI_Probe.3in +++ /dev/null @@ -1,129 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Probe 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Probe\fP \- Blocking test for a message. - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -int MPI_Probe(int \fIsource\fP, int\fI tag\fP, MPI_Comm\fI comm\fP, MPI_Status\fI *status\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_PROBE(\fISOURCE, TAG, COMM, STATUS, IERROR\fP) - INTEGER \fISOURCE, TAG, COMM, STATUS(MPI_STATUS_SIZE), IERROR\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Probe(\fIsource\fP, \fItag\fP, \fIcomm\fP, \fIstatus\fP, \fIierror\fP) - INTEGER, INTENT(IN) :: \fIsource\fP, \fItag\fP - TYPE(MPI_Comm), INTENT(IN) :: \fIcomm\fP - TYPE(MPI_Status) :: \fIstatus\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETERS -.ft R -.TP 1i -source -Source rank or MPI_ANY_SOURCE (integer). -.TP 1i -tag -Tag value or MPI_ANY_TAG (integer). -.TP 1i -comm -Communicator (handle). - -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -status -Status object (status). -.ft R -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -The MPI_Probe and MPI_Iprobe operations allow checking of incoming messages, without actual receipt of them. The user can then decide how to receive them, based on the information returned by the probe in the status variable. For example, the user may allocate memory for the receive buffer, according to the length of the probed message. -.sp -MPI_Probe behaves like MPI_Iprobe except that it is a blocking call that returns only after a matching message has been found. -.sp -If your application does not need to examine the \fIstatus\fP field, you can save resources by using the predefined constant MPI_STATUS_IGNORE as a special value for the \fIstatus\fP argument. -.sp -The semantics of MPI_Probe and MPI_Iprobe guarantee progress: If a call to MPI_Probe has been issued by a process, and a send that matches the probe has been initiated by some process, then the call to MPI_Probe will return, unless the message is received by another concurrent receive operation (that is executed by another thread at the probing process). Similarly, if a process busy waits with MPI_Iprobe and a matching message has been issued, then the call to MPI_Iprobe will eventually return flag = true unless the message is received by another concurrent receive operation. -.sp -\fBExample 1:\fP Use blocking probe to wait for an incoming message. -.sp -.nf -CALL MPI_COMM_RANK(comm, rank, ierr) - IF (rank.EQ.0) THEN - CALL MPI_SEND(i, 1, MPI_INTEGER, 2, 0, comm, ierr) - ELSE IF(rank.EQ.1) THEN - CALL MPI_SEND(x, 1, MPI_REAL, 2, 0, comm, ierr) - ELSE ! rank.EQ.2 - DO i=1, 2 - CALL MPI_PROBE(MPI_ANY_SOURCE, 0, - comm, status, ierr) - IF (status(MPI_SOURCE) = 0) THEN -100 CALL MPI_RECV(i, 1, MPI_INTEGER, 0, 0, status, ierr) - ELSE -200 CALL MPI_RECV(x, 1, MPI_REAL, 1, 0, status, ierr) - END IF - END DO - END IF -.fi -.sp -Each message is received with the right type. -.sp -\fBExample 2:\fP A program similar to the previous example, but with a problem. -.sp -.nf -CALL MPI_COMM_RANK(comm, rank, ierr) - IF (rank.EQ.0) THEN - CALL MPI_SEND(i, 1, MPI_INTEGER, 2, 0, comm, ierr) - ELSE IF(rank.EQ.1) THEN - CALL MPI_SEND(x, 1, MPI_REAL, 2, 0, comm, ierr) - ELSE - DO i=1, 2 - CALL MPI_PROBE(MPI_ANY_SOURCE, 0, - comm, status, ierr) - IF (status(MPI_SOURCE) = 0) THEN -100 CALL MPI_RECV(i, 1, MPI_INTEGER, MPI_ANY_SOURCE, - 0, status, ierr) - ELSE -200 CALL MPI_RECV(x, 1, MPI_REAL, MPI_ANY_SOURCE, - 0, status, ierr) - END IF - END DO - END IF -.fi -.sp -We slightly modified Example 2, using MPI_ANY_SOURCE as the source argument in the two receive calls in statements labeled 100 and 200. The program is now incorrect: The receive operation may receive a message that is distinct from the message probed by the preceding call to MPI_Probe. - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for I/O function errors. The error handler may be changed with MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN may be used to cause error values to be returned. Note that MPI does not guarantee that an MPI program can continue past an error. - -.SH SEE ALSO -.ft R -.sp -MPI_Iprobe -.br -MPI_Cancel - diff --git a/ompi/mpi/man/man3/MPI_Psend_init.3in b/ompi/mpi/man/man3/MPI_Psend_init.3in deleted file mode 100644 index 75ec8443d30..00000000000 --- a/ompi/mpi/man/man3/MPI_Psend_init.3in +++ /dev/null @@ -1,89 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2013 Los Alamos National Security, LLC. All rights reserved. -.\" Copyright (c) 2010-2015 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" Copyright (c) 2020 Sandia National Laboratories. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Psend_init 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Psend_init\fP \- Initializes a partitioned send. - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -int MPI_Psend_init(const void *\fIbuf\fP, int\fI partitions\fP, int\fI count\fP, MPI_Datatype\fI datatype\fP, int\fI dest\fP, - int\fI tag\fP, MPI_Comm\fI comm\fP, MPI_Request\fI *request\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_PSEND_INIT(\fIBUF, PARTITIONS, COUNT, DATATYPE, DEST, TAG, COMM, REQUEST, IERROR\fP) - \fIBUF\fP(*) - INTEGER \fIPARTITIONS, COUNT, DATATYPE, DEST, TAG, COMM, REQUEST, IERROR\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Psend_init(\fIbuf\fP, \fIpartitions\fP, \fIcount\fP, \fIdatatype\fP, \fIdest\fP, \fItag\fP, \fIcomm\fP, \fIrequest\fP, \fIierror\fP) - TYPE(*), DIMENSION(..), INTENT(IN), ASYNCHRONOUS :: \fIbuf\fP - INTEGER, INTENT(IN) :: \fIpartitions\fP, \fIcount\fP, \fIdest\fP, \fItag\fP - TYPE(MPI_Datatype), INTENT(IN) :: \fIdatatype\fP - TYPE(MPI_Comm), INTENT(IN) :: \fIcomm\fP - TYPE(MPI_Request), INTENT(OUT) :: \fIrequest\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETERS -.ft R -.TP 1i -buf -Initial address of send buffer (choice). -.TP 1i -partitions -Number of partitions (integer). -.TP 1i -count -Number of elements to be sent per partition (integer). -.TP 1i -datatype -Datatype of each element (handle). -.TP 1i -dest -Rank of source (integer). -.TP 1i -tag -Message tag (integer). -.TP 1i -comm -Communicator (handle). - -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -request -Communication request (handle). -.ft R -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for I/O function errors. The error handler may be changed with MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN may be used to cause error values to be returned. Note that MPI does not guarantee that an MPI program can continue past an error. - -.SH NOTE -.ft R -The current implementation is an early prototype and is not fully compliant with the MPI-4.0 specification. Specifically this function and it's counterpart (MPI_Precv_init) will block until the partitioned communication request is initialized on both ends. This behavior will be corrected in future versions. - -.SH SEE ALSO -MPI_Precv_init -.br diff --git a/ompi/mpi/man/man3/MPI_Publish_name.3in b/ompi/mpi/man/man3/MPI_Publish_name.3in deleted file mode 100644 index 36fbfc7523b..00000000000 --- a/ompi/mpi/man/man3/MPI_Publish_name.3in +++ /dev/null @@ -1,167 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2013 Los Alamos National Security, LLC. All rights reserved. -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2007-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Publish_name 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" - -.SH NAME -.nf -\fBMPI_Publish_name\fP \- Publishes a service name associated with a port - -.fi -.SH SYNTAX -.ft R - -.SH C Syntax -.nf -#include -int MPI_Publish_name(const char *\fIservice_name\fP, MPI_Info \fIinfo\fP, - const char *\fIport_name\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_PUBLISH_NAME(\fISERVICE_NAME, INFO, PORT_NAME, IERROR\fP) - CHARACTER*(*) \fISERVICE_NAME, PORT_NAME\fP - INTEGER \fIINFO, IERROR\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Publish_name(\fIservice_name\fP, \fIinfo\fP, \fIport_name\fP, \fIierror\fP) - TYPE(MPI_Info), INTENT(IN) :: \fIinfo\fP - CHARACTER(LEN=*), INTENT(IN) :: \fIservice_name\fP, \fIport_name\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETERS -.ft R -.TP 1.4i -service_name -A service name (string). -.TP 1.4i -info -Options to the name service functions (handle). -.ft R -.TP 1.4i -port_name -A port name (string). - -.SH OUTPUT PARAMETER -.TP 1.4i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -This routine publishes the pair (\fIservice_name, port_name\fP) so that -an application may retrieve \fIport_name\fP by calling MPI_Lookup_name -with \fIservice_name\fP as an argument. It is an error to publish the same -\fIservice_name\fP twice, or to use a \fIport_name\fP argument that was -not previously opened by the calling process via a call to MPI_Open_port. - -.SH INFO ARGUMENTS -The following keys for \fIinfo\fP are recognized: -.sp -.sp -.nf -Key Type Description ---- ---- ----------- - -ompi_global_scope bool If set to true, publish the name in - the global scope. Publish in the local - scope otherwise. See the NAME SCOPE - section for more details. - -ompi_unique bool If set to true, return an error if the - specified service_name already exists. - Default to overwriting any pre-existing - value. -.fi - -.sp -\fIbool\fP info keys are actually strings but are evaluated as -follows: if the string value is a number, it is converted to an -integer and cast to a boolean (meaning that zero integers are false -and non-zero values are true). If the string value is -(case-insensitive) "yes" or "true", the boolean is true. If the -string value is (case-insensitive) "no" or "false", the boolean is -false. All other string values are unrecognized, and therefore false. -.PP -If no info key is provided, the function will first check to see if a -global server has been specified and is available. If so, then the -publish function will default to global scope first, followed by local. Otherwise, -the data will default to publish with local scope. - -.SH NAME SCOPE -Open MPI supports two name scopes: \fIglobal\fP and \fIlocal\fP. Local scope will -place the specified service/port pair in a data store located on the -mpirun of the calling process' job. Thus, data published with local -scope will only be accessible to processes in jobs spawned by that -mpirun - e.g., processes in the calling process' job, or in jobs -spawned via MPI_Comm_spawn. -.sp -Global scope places the specified service/port pair in a data store -located on a central server that is accessible to all jobs running -in the cluster or environment. Thus, data published with global -scope can be accessed by multiple mpiruns and used for MPI_Comm_Connect -and MPI_Comm_accept between jobs. -.sp -Note that global scope operations require both the presence of the -central server and that the calling process be able to communicate -to that server. MPI_Publish_name will return an error if global -scope is specified and a global server is either not specified or -cannot be found. -.sp -Open MPI provides a server called \fIompi-server\fP to support global -scope operations. Please refer to its manual page for a more detailed -description of data store/lookup operations. -.sp -As an example of the impact of these scoping rules, consider the case -where a job has been started with -mpirun - call this job "job1". A process in job1 creates and publishes -a service/port pair using a local scope. Open MPI will store this -data in the data store within mpirun. -.sp -A process in job1 (perhaps the same as did the publish, or perhaps -some other process in the job) subsequently calls MPI_Comm_spawn to -start another job (call it "job2") under this mpirun. Since the two -jobs share a common mpirun, both jobs have access to local scope data. Hence, -a process in job2 can perform an MPI_Lookup_name with a local scope -to retrieve the information. -.sp -However, assume another user starts a job using mpirun - call -this job "job3". Because the service/port data published by job1 specified -local scope, processes in job3 cannot access that data. In contrast, if the -data had been published using global scope, then any process in job3 could -access the data, provided that mpirun was given knowledge of how to contact -the central server and the process could establish communication -with it. - -.SH ERRORS -.ft R -Almost all MPI routines return an error value; C routines as -the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for -I/O function errors. The error handler may be changed with -MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN -may be used to cause error values to be returned. Note that MPI does not -guarantee that an MPI program can continue past an error. -.sp -See the MPI man page for a full list of MPI error codes. - -.SH SEE ALSO -.ft R -.nf -MPI_Lookup_name -MPI_Open_port - - diff --git a/ompi/mpi/man/man3/MPI_Put.3in b/ompi/mpi/man/man3/MPI_Put.3in deleted file mode 100644 index 3f1e440e134..00000000000 --- a/ompi/mpi/man/man3/MPI_Put.3in +++ /dev/null @@ -1,155 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2013-2014 Los Alamos National Security, LLC. All rights reserved. -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Put 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Put\fP, \fBMPI_Rput\fP \- Copies data from the origin memory to the target. - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -MPI_Put(const void *\fIorigin_addr\fP, int \fIorigin_count\fP, MPI_Datatype - \fIorigin_datatype\fP, int \fItarget_rank\fP, MPI_Aint \fItarget_disp\fP, - int \fItarget_count\fP, MPI_Datatype \fItarget_datatype\fP, MPI_Win \fIwin\fP) - -MPI_Rput(const void *\fIorigin_addr\fP, int \fIorigin_count\fP, MPI_Datatype - \fIorigin_datatype\fP, int \fItarget_rank\fP, MPI_Aint \fItarget_disp\fP, - int \fItarget_count\fP, MPI_Datatype \fItarget_datatype\fP, MPI_Win \fIwin\fP, - MPI_Request *\fIrequest\fP) - -.fi -.SH Fortran Syntax (see FORTRAN 77 NOTES) -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_PUT(\fIORIGIN_ADDR, ORIGIN_COUNT, ORIGIN_DATATYPE, TARGET_RANK, - TARGET_DISP, TARGET_COUNT, TARGET_DATATYPE, WIN, IERROR\fP) - \fIORIGIN_ADDR\fP(*) - INTEGER(KIND=MPI_ADDRESS_KIND) \fITARGET_DISP\fP - INTEGER \fIORIGIN_COUNT, ORIGIN_DATATYPE, TARGET_RANK, TARGET_COUNT, - TARGET_DATATYPE, WIN, IERROR\fP - -MPI_RPUT(\fIORIGIN_ADDR, ORIGIN_COUNT, ORIGIN_DATATYPE, TARGET_RANK, - TARGET_DISP, TARGET_COUNT, TARGET_DATATYPE, WIN, REQUEST, IERROR\fP) - \fIORIGIN_ADDR\fP(*) - INTEGER(KIND=MPI_ADDRESS_KIND) \fITARGET_DISP\fP - INTEGER \fIORIGIN_COUNT, ORIGIN_DATATYPE, TARGET_RANK, TARGET_COUNT, - TARGET_DATATYPE, WIN, REQUEST, IERROR\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Put(\fIorigin_addr\fP, \fIorigin_count\fP, \fIorigin_datatype\fP, \fItarget_rank\fP, - \fItarget_disp\fP, \fItarget_count\fP, \fItarget_datatype\fP, \fIwin\fP, \fIierror\fP) - TYPE(*), DIMENSION(..), INTENT(IN), ASYNCHRONOUS :: \fIorigin_addr\fP - INTEGER, INTENT(IN) :: \fIorigin_count\fP, \fItarget_rank\fP, \fItarget_count\fP - TYPE(MPI_Datatype), INTENT(IN) :: \fIorigin_datatype\fP, \fItarget_datatype\fP - INTEGER(KIND=MPI_ADDRESS_KIND), INTENT(IN) :: \fItarget_disp\fP - TYPE(MPI_Win), INTENT(IN) :: \fIwin\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -MPI_Rput(\fIorigin_addr\fP, \fIorigin_count\fP, \fIorigin_datatype\fP, \fItarget_rank\fP, - \fItarget_disp\fP, \fItarget_count\fP, \fItarget_datatype\fP, \fIwin\fP, \fIrequest,\fP - \fIierror\fP) - TYPE(*), DIMENSION(..), INTENT(IN), ASYNCHRONOUS :: \fIorigin_addr\fP - INTEGER, INTENT(IN) :: \fIorigin_count\fP, \fItarget_rank\fP, \fItarget_count\fP - TYPE(MPI_Datatype), INTENT(IN) :: \fIorigin_datatype\fP, \fItarget_datatype\fP - INTEGER(KIND=MPI_ADDRESS_KIND), INTENT(IN) :: \fItarget_disp\fP - TYPE(MPI_Win), INTENT(IN) :: \fIwin\fP - TYPE(MPI_Request), INTENT(OUT) :: \fIrequest\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETERS -.ft R -.TP 1i -origin_addr -Initial address of origin buffer (choice). -.TP 1i -origin_count -Number of entries in origin buffer (nonnegative integer). -.TP 1i -origin_datatype -Data type of each entry in origin buffer (handle). -.TP 1i -target_rank -Rank of target (nonnegative integer). -.TP 1i -target_disp -Displacement from start of window to target buffer (nonnegative integer). -.TP 1i -target_count -Number of entries in target buffer (nonnegative integer). -.TP 1i -target_datatype -Data type of each entry in target buffer (handle). -.TP 1i -win -Window object used for communication (handle). - -.SH OUTPUT PARAMETER -.ft R -.TP 1i -request -MPI_Rput: RMA request -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -\fBMPI_Put\fP transfers \fIorigin_count\fP successive entries of the type specified by \fIorigin_datatype\fP, starting at address \fIorigin_addr\fP on the origin node to the target node specified by the \fIwin\fP, \fItarget_rank\fP pair. The data are written in the target buffer at address \fItarget_addr\fP = \fIwindow_base\fP + \fItarget_disp\fP x \fIdisp_unit\fP, where \fIwindow_base\fP and \fIdisp_unit\fP are the base address and window displacement unit specified at window initialization, by the target process. -.sp -The target buffer is specified by the arguments \fItarget_count\fP and \fItarget_datatype\fP. -.sp -The data transfer is the same as that which would occur if the origin process executed a send operation with arguments \fIorigin_addr\fP, \fIorigin_count\fP, \fIorigin_datatype\fP, \fItarget_rank\fP, \fItag\fP, \fIcomm\fP, and the target process executed a receive operation with arguments \fItarget_addr\fP, \fItarget_count\fP, \fItarget_datatype\fP, \fIsource\fP, \fItag\fP, \fIcomm\fP, where \fItarget_addr\fP is the target buffer address computed as explained above, and \fIcomm\fP is a communicator for the group of \fIwin\fP. -.sp -The communication must satisfy the same constraints as for a similar message-passing communication. The \fItarget_datatype\fP may not specify overlapping entries in the target buffer. The message sent must fit, without truncation, in the target buffer. Furthermore, the target buffer must fit in the target window. In addition, only processes within the same buffer can access the target window. -.sp -The \fItarget_datatype\fP argument is a handle to a datatype object defined at the origin process. However, this object is interpreted at the target process: The outcome is as if the target datatype object were defined at the target process, by the same sequence of calls used to define it at the origin process. The target data type must contain only relative displacements, not absolute addresses. The same holds for get and accumulate. -.sp -\fBMPI_Rput\fP is similar to \fBMPI_Put\fP, except that it allocates a communication request object and associates it with the request handle (the argument \fIrequest\fP). The completion of an MPI_Rput operation (i.e., after the corresponding test or wait) indicates that the sender is now free to update the locations in the \fIorigin_addr\fP buffer. It does not indicate that the data is available at the target window. If remote completion is required, \fBMPI_Win_flush\fP, \fBMPI_Win_flush_all\fP, \fBMPI_Win_unlock\fP, or \fBMPI_Win_unlock_all\fP can be used. - -.SH NOTES -The \fItarget_datatype\fP argument is a handle to a datatype object that is defined at the origin process, even though it defines a data layout in the target process memory. This does not cause problems in a homogeneous or heterogeneous environment, as long as only portable data types are used (portable data types are defined in Section 2.4 of the MPI-2 Standard). -.sp -The performance of a put transfer can be significantly affected, on some systems, from the choice of window location and the shape and location of the origin and target buffer: Transfers to a target window in memory allocated by MPI_Alloc_mem may be much faster on shared memory systems; transfers from contiguous buffers will be faster on most, if not all, systems; the alignment of the communication buffers may also impact performance. - -.SH FORTRAN 77 NOTES -.ft R -The MPI standard prescribes portable Fortran syntax for -the \fITARGET_DISP\fP argument only for Fortran 90. FORTRAN 77 -users may use the non-portable syntax -.sp -.nf - INTEGER*MPI_ADDRESS_KIND \fITARGET_DISP\fP -.fi -.sp -where MPI_ADDRESS_KIND is a constant defined in mpif.h -and gives the length of the declared integer in bytes. - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for I/O function errors. The error handler may be changed with MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN may be used to cause error values to be returned. Note that MPI does not guarantee that an MPI program can continue past an error. - -.SH SEE ALSO -.ft R -.sp -MPI_Get -MPI_Rget -.br -MPI_Accumulate -MPI_Win_flush -MPI_Win_flush_all -MPI_Win_unlock -MPI_Win_unlock_all - diff --git a/ompi/mpi/man/man3/MPI_Query_thread.3in b/ompi/mpi/man/man3/MPI_Query_thread.3in deleted file mode 100644 index c25667fc61d..00000000000 --- a/ompi/mpi/man/man3/MPI_Query_thread.3in +++ /dev/null @@ -1,95 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Query_thread 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" - -.SH NAME -\fBMPI_Query_thread\fP \- Returns the current level of thread support - -.SH SYNTAX -.ft R - -.SH C Syntax -.nf -#include -int MPI_Query_thread(int *\fIprovided\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_QUERY_THREAD(\fIPROVIDED, IERROR\fP) - INTEGER \fIPROVIDED, IERROR \fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Query_thread(\fIprovided\fP, \fIierror\fP) - INTEGER, INTENT(OUT) :: \fIprovided\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -provided -C/Fortran only: Level of thread support (integer). -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -This routine returns in \fIprovided\fP the current level of thread -support. If MPI was initialized by a call to MPI_Init_thread, -\fIprovided\fP will have the same value as was returned by that -function. -.sp -The possible values of \fIprovided\fP are as follows: -.TP 2.4i -MPI_THREAD_SINGLE -Only one thread may execute. -.TP 2.4i -MPI_THREAD_FUNNELED -If the process is multithreaded, only the thread -that called MPI_Init[_thread] may make MPI calls. -.TP 2.4i -MPI_THREAD_SERIALIZED -If the process is multithreaded, only one thread -may make MPI library calls at one time. -.TP 2.4i -MPI_THREAD_MULTIPLE -If the process is multithreaded, multiple threads -may call MPI at once with no restrictions. - -.SH NOTES -.ft R -In Open MPI, \fIprovided\fP is always MPI_THREAD_SINGLE, unless the -program has been linked with the multithreaded library, in which case -\fIprovided\fP is MPI_THREAD_MULTIPLE. - -.SH ERRORS -.ft R -Almost all MPI routines return an error value; C routines as -the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for -I/O function errors. The error handler may be changed with -MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN -may be used to cause error values to be returned. Note that MPI does not -guarantee that an MPI program can continue past an error. -.sp -See the MPI man page for a full list of MPI error codes. - -.SH SEE ALSO -.ft R -.nf -MPI_Init -MPI_Init_thread - diff --git a/ompi/mpi/man/man3/MPI_Raccumulate.3in b/ompi/mpi/man/man3/MPI_Raccumulate.3in deleted file mode 100644 index d1e293cc85f..00000000000 --- a/ompi/mpi/man/man3/MPI_Raccumulate.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/MPI_Accumulate.3 diff --git a/ompi/mpi/man/man3/MPI_Recv.3in b/ompi/mpi/man/man3/MPI_Recv.3in deleted file mode 100644 index 8e01654457d..00000000000 --- a/ompi/mpi/man/man3/MPI_Recv.3in +++ /dev/null @@ -1,116 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2007-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Recv 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Recv\fP \- Performs a standard-mode blocking receive. - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -int MPI_Recv(void *\fIbuf\fP, int\fI count\fP, MPI_Datatype\fI datatype\fP, - int\fI source\fP, int\fI tag\fP, MPI_Comm\fI comm\fP, MPI_Status\fI *status\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_RECV(\fIBUF, COUNT, DATATYPE, SOURCE, TAG, COMM, STATUS, IERROR\fP) - \fIBUF\fP(*) - INTEGER \fICOUNT, DATATYPE, SOURCE, TAG, COMM\fP - INTEGER \fISTATUS(MPI_STATUS_SIZE), IERROR\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Recv(\fIbuf\fP, \fIcount\fP, \fIdatatype\fP, \fIsource\fP, \fItag\fP, \fIcomm\fP, \fIstatus\fP, \fIierror\fP) - TYPE(*), DIMENSION(..) :: \fIbuf\fP - INTEGER, INTENT(IN) :: \fIcount\fP, \fIsource\fP, \fItag\fP - TYPE(MPI_Datatype), INTENT(IN) :: \fIdatatype\fP - TYPE(MPI_Comm), INTENT(IN) :: \fIcomm\fP - TYPE(MPI_Status) :: \fIstatus\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETERS -.ft R -.TP 1i -count -Maximum number of elements to receive (integer). -.TP 1i -datatype -Datatype of each receive buffer entry (handle). -.TP 1i -source -Rank of source (integer). -.TP 1i -tag -Message tag (integer). -.TP 1i -comm -Communicator (handle). - -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -buf -Initial address of receive buffer (choice). -.TP 1i -status -Status object (status). -.ft R -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -This basic receive operation, MPI_Recv, is blocking: it returns only after the receive buffer contains the newly received message. A receive can complete before the matching send has completed (of course, it can complete only after the matching send has started). -.sp -The blocking semantics of this call are described in Section 3.4 of the MPI-1 Standard, "Communication Modes." -.sp -The receive buffer contains a number (defined by the value of \fIcount\fP) of consecutive elements. The first element in the set of elements is located at \fIaddress_buf\fP. The type of each of these elements is specified by \fIdatatype\fP. -.sp -The length of the received message must be less than or equal to the length of the receive buffer. An MPI_ERR_TRUNCATE is returned upon the overflow condition. -.sp -If a message that is shorter than the length of the receive buffer arrives, then only -those locations corresponding to the (shorter) received message are modified. - -.SH NOTES -The \fIcount\fP argument indicates the maximum number of entries of type \fIdatatype\fP that can be received in a message. Once a message is received, use the MPI_Get_count function to determine the actual number of entries within that message. -.sp -To receive messages of unknown length, use the MPI_Probe function. (For more information about MPI_Probe and MPI_Cancel, see their respective man pages; also, see Section 3.8 of the MPI-1 Standard, "Probe and Cancel.") -.sp -A message can be received by a receive operation only if it is addressed to the receiving process, and if its source, tag, and communicator (comm) values match the source, tag, and comm values specified by the receive operation. The receive operation may specify a wildcard value for source and/or tag, indicating that any source and/or tag are acceptable. The wildcard value for source is source = MPI_ANY_SOURCE. The wildcard value for tag is tag = MPI_ANY_TAG. There is no wildcard value for comm. The scope of these wildcards is limited to the proceses in the group of the specified communicator. -.sp -The message tag is specified by the tag argument of the receive operation. -.sp -The argument source, if different from MPI_ANY_SOURCE, is specified as a rank within the process group associated with that same communicator (remote process group, for intercommunicators). Thus, the range of valid values for the source argument is {0,...,n-1} {MPI_ANY_SOURCE}, where n is the number of processes in this group. -.sp -Note the asymmetry between send and receive operations: A receive operation may accept messages from an arbitrary sender; on the other hand, a send operation must specify a unique receiver. This matches a "push" communication mechanism, where data transfer is effected by the sender (rather than a "pull" mechanism, where data transfer is effected by the receiver). -.sp -Source = destination is allowed, that is, a process can send a message to itself. However, it is not recommended for a process to send messages to itself using the blocking send and receive operations described above, since this may lead to deadlock. See Section 3.5 of the MPI-1 Standard, "Semantics of Point-to-Point Communication." -.sp -If your application does not need to examine the \fIstatus\fP field, you can save resources by using the predefined constant MPI_STATUS_IGNORE as a special value for the \fIstatus\fP argument. - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for I/O function errors. The error handler may be changed with MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN may be used to cause error values to be returned. Note that MPI does not guarantee that an MPI program can continue past an error. - -.SH SEE ALSO -.ft R -.nf -MPI_Irecv -MPI_Probe - - - diff --git a/ompi/mpi/man/man3/MPI_Recv_init.3in b/ompi/mpi/man/man3/MPI_Recv_init.3in deleted file mode 100644 index d24d6be4602..00000000000 --- a/ompi/mpi/man/man3/MPI_Recv_init.3in +++ /dev/null @@ -1,105 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Recv_init 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Recv_init\fP \- Builds a handle for a receive. - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -int MPI_Recv_init(void *\fIbuf\fP, int\fI count\fP, MPI_Datatype\fI datatype\fP, - int\fI source\fP, int\fI tag\fP, MPI_Comm\fI comm\fP, MPI_Request\fI *request\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_RECV_INIT(\fIBUF, COUNT, DATATYPE, SOURCE, TAG, COMM, REQUEST, - IERROR\fP) - \fIBUF\fP(*) - INTEGER \fICOUNT, DATATYPE, SOURCE, TAG, COMM, REQUEST, IERROR\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Recv_init(\fIbuf\fP, \fIcount\fP, \fIdatatype\fP, \fIsource\fP, \fItag\fP, \fIcomm\fP, \fIrequest\fP, \fIierror\fP) - TYPE(*), DIMENSION(..), ASYNCHRONOUS :: \fIbuf\fP - INTEGER, INTENT(IN) :: \fIcount\fP, \fIsource\fP, \fItag\fP - TYPE(MPI_Datatype), INTENT(IN) :: \fIdatatype\fP - TYPE(MPI_Comm), INTENT(IN) :: \fIcomm\fP - TYPE(MPI_Request), INTENT(OUT) :: \fIrequest\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETERS -.ft R -.TP 1i -count -Maximum number of elements to receive (integer). -.TP 1i -datatype -Type of each entry (handle). -.TP 1i -source -Rank of source (integer). -.TP 1i -tag -Message tag (integer). -.TP 1i -comm -Communicator (handle). - -.SH INPUT/OUTPUT PARAMETER -.TP 1i -buf -Initial address of receive buffer (choice). - -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -request -Communication request (handle). -.ft R -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -Creates a persistent communication request for a receive operation. The argument \fIbuf\fP is marked as OUT because the user gives permission to write on the receive buffer by passing the argument to MPI_Recv_init. -.sp -A persistent communication request is inactive after it is created -- no active communication is attached to the request. -.sp -A communication (send or receive) that uses a persistent request is initiated by the function MPI_Start or MPI_Startall. - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for I/O function errors. The error handler may be changed with MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN may be used to cause error values to be returned. Note that MPI does not guarantee that an MPI program can continue past an error. - -.SH SEE ALSO -.ft R -.sp -MPI_Bsend_init -.br -MPI_Rsend_init -.br -MPI_Send_init -.br -MPI_Sssend_init -.br -MPI_Start -.br -MPI_Startall -.br -MPI_Request_free - diff --git a/ompi/mpi/man/man3/MPI_Reduce.3in b/ompi/mpi/man/man3/MPI_Reduce.3in deleted file mode 100644 index 8af09fe4a93..00000000000 --- a/ompi/mpi/man/man3/MPI_Reduce.3in +++ /dev/null @@ -1,498 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2013 Los Alamos National Security, LLC. All rights reserved. -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Reduce 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Reduce, MPI_Ireduce, MPI_Reduce_init\fP \- Reduces values on all processes within a group. - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -int MPI_Reduce(const void *\fIsendbuf\fP, void *\fIrecvbuf\fP, int\fI count\fP, - MPI_Datatype\fI datatype\fP, MPI_Op\fI op\fP, int\fI root\fP, - MPI_Comm\fI comm\fP) - -int MPI_Ireduce(const void *\fIsendbuf\fP, void *\fIrecvbuf\fP, int\fI count\fP, - MPI_Datatype\fI datatype\fP, MPI_Op\fI op\fP, int\fI root\fP, - MPI_Comm\fI comm\fP, MPI_Request \fI*request\fP) - - -int MPI_Reduce_init(const void *\fIsendbuf\fP, void *\fIrecvbuf\fP, int\fI count\fP, - MPI_Datatype\fI datatype\fP, MPI_Op\fI op\fP, int\fI root\fP, - MPI_Comm\fI comm\fP, MPI_Info \fIinfo\fP, MPI_Request \fI*request\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_REDUCE(\fISENDBUF, RECVBUF, COUNT, DATATYPE, OP, ROOT, COMM, - IERROR\fP) - \fISENDBUF(*), RECVBUF(*)\fP - INTEGER \fICOUNT, DATATYPE, OP, ROOT, COMM, IERROR\fP - -MPI_IREDUCE(\fISENDBUF, RECVBUF, COUNT, DATATYPE, OP, ROOT, COMM, - REQUEST, IERROR\fP) - \fISENDBUF(*), RECVBUF(*)\fP - INTEGER \fICOUNT, DATATYPE, OP, ROOT, COMM, REQUEST, IERROR\fP - -MPI_REDUCE_INIT(\fISENDBUF, RECVBUF, COUNT, DATATYPE, OP, ROOT, COMM, - INFO, REQUEST, IERROR\fP) - \fISENDBUF(*), RECVBUF(*)\fP - INTEGER \fICOUNT, DATATYPE, OP, ROOT, COMM, INFO, REQUEST, IERROR\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Reduce(\fIsendbuf\fP, \fIrecvbuf\fP, \fIcount\fP, \fIdatatype\fP, \fIop\fP, \fIroot\fP, \fIcomm\fP, \fIierror\fP) - TYPE(*), DIMENSION(..), INTENT(IN) :: \fIsendbuf\fP - TYPE(*), DIMENSION(..) :: \fIrecvbuf\fP - INTEGER, INTENT(IN) :: \fIcount\fP, \fIroot\fP - TYPE(MPI_Datatype), INTENT(IN) :: \fIdatatype\fP - TYPE(MPI_Op), INTENT(IN) :: \fIop\fP - TYPE(MPI_Comm), INTENT(IN) :: \fIcomm\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -MPI_Ireduce(\fIsendbuf\fP, \fIrecvbuf\fP, \fIcount\fP, \fIdatatype\fP, \fIop\fP, \fIroot\fP, \fIcomm\fP, \fIrequest\fP, - \fIierror\fP) - TYPE(*), DIMENSION(..), INTENT(IN), ASYNCHRONOUS :: \fIsendbuf\fP - TYPE(*), DIMENSION(..), ASYNCHRONOUS :: \fIrecvbuf\fP - INTEGER, INTENT(IN) :: \fIcount\fP, \fIroot\fP - TYPE(MPI_Datatype), INTENT(IN) :: \fIdatatype\fP - TYPE(MPI_Op), INTENT(IN) :: \fIop\fP - TYPE(MPI_Comm), INTENT(IN) :: \fIcomm\fP - TYPE(MPI_Request), INTENT(OUT) :: \fIrequest\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - - -MPI_Reduce_init(\fIsendbuf\fP, \fIrecvbuf\fP, \fIcount\fP, \fIdatatype\fP, \fIop\fP, \fIroot\fP, \fIcomm\fP, \fIinfo\fP, \fIrequest\fP, - \fIierror\fP) - TYPE(*), DIMENSION(..), INTENT(IN), ASYNCHRONOUS :: \fIsendbuf\fP - TYPE(*), DIMENSION(..), ASYNCHRONOUS :: \fIrecvbuf\fP - INTEGER, INTENT(IN) :: \fIcount\fP, \fIroot\fP - TYPE(MPI_Datatype), INTENT(IN) :: \fIdatatype\fP - TYPE(MPI_Op), INTENT(IN) :: \fIop\fP - TYPE(MPI_Comm), INTENT(IN) :: \fIcomm\fP - TYPE(MPI_Info), INTENT(IN) :: \fIinfo\fP - TYPE(MPI_Request), INTENT(OUT) :: \fIrequest\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETERS -.ft R -.TP 1i -sendbuf -Address of send buffer (choice). -.TP 1i -count -Number of elements in send buffer (integer). -.TP 1i -datatype -Data type of elements of send buffer (handle). -.TP 1i -op -Reduce operation (handle). -.TP 1i -root -Rank of root process (integer). -.TP 1i -comm -Communicator (handle). -.TP 1i -info -Info (handle, persistent). - -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -recvbuf -Address of receive buffer (choice, significant only at root). -.TP 1i -request -Request (handle, non-blocking only). -.ft R -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -The global reduce functions (MPI_Reduce, MPI_Op_create, MPI_Op_free, MPI_Allreduce, MPI_Reduce_scatter, MPI_Scan) perform a global reduce operation (such as sum, max, logical AND, etc.) across all the members of a group. The reduction operation can be either one of a predefined list of operations, or a user-defined operation. The global reduction functions come in several flavors: a reduce that returns the result of the reduction at one node, an all-reduce that returns this result at all nodes, and a scan (parallel prefix) operation. In addition, a reduce-scatter operation combines the functionality of a reduce and a scatter operation. -.sp -MPI_Reduce combines the elements provided in the input buffer of each process in the group, using the operation op, and returns the combined value in the output buffer of the process with rank root. The input buffer is defined by the arguments sendbuf, count, and datatype; the output buffer is defined by the arguments recvbuf, count, and datatype; both have the same number of elements, with the same type. The routine is called by all group members using the same arguments for count, datatype, op, root, and comm. Thus, all processes provide input buffers and output buffers of the same length, with elements of the same type. Each process can provide one element, or a sequence of elements, in which case the combine operation is executed element-wise on each entry of the sequence. For example, if the operation is MPI_MAX and the send buffer contains two elements that are floating-point numbers (count = 2 and datatype = MPI_FLOAT), then recvbuf(1) = global max (sendbuf(1)) and recvbuf(2) = global max(sendbuf(2)). -.sp -.SH USE OF IN-PLACE OPTION -When the communicator is an intracommunicator, you can perform a reduce operation in-place (the output buffer is used as the input buffer). Use the variable MPI_IN_PLACE as the value of the root process \fIsendbuf\fR. In this case, the input data is taken at the root from the receive buffer, where it will be replaced by the output data. -.sp -Note that MPI_IN_PLACE is a special kind of value; it has the same restrictions on its use as MPI_BOTTOM. -.sp -Because the in-place option converts the receive buffer into a send-and-receive buffer, a Fortran binding that includes INTENT must mark these as INOUT, not OUT. -.sp -.SH WHEN COMMUNICATOR IS AN INTER-COMMUNICATOR -.sp -When the communicator is an inter-communicator, the root process in the first group combines data from all the processes in the second group and then performs the \fIop\fR operation. The first group defines the root process. That process uses MPI_ROOT as the value of its \fIroot\fR argument. The remaining processes use MPI_PROC_NULL as the value of their \fIroot\fR argument. All processes in the second group use the rank of that root process in the first group as the value of their \fIroot\fR argument. Only the send buffer arguments are significant in the second group, and only the receive buffer arguments are significant in the root process of the first group. -.sp -.SH PREDEFINED REDUCE OPERATIONS -.sp -The set of predefined operations provided by MPI is listed below (Predefined Reduce Operations). That section also enumerates the datatypes each operation can be applied to. In addition, users may define their own operations that can be overloaded to operate on several datatypes, either basic or derived. This is further explained in the description of the user-defined operations (see the man pages for MPI_Op_create and MPI_Op_free). -.sp -The operation op is always assumed to be associative. All predefined operations are also assumed to be commutative. Users may define operations that are assumed to be associative, but not commutative. The ``canonical'' evaluation order of a reduction is determined by the ranks of the processes in the group. However, the implementation can take advantage of associativity, or associativity and commutativity, in order to change the order of evaluation. This may change the result of the reduction for operations that are not strictly associative and commutative, such as floating point addition. -.sp -Predefined operators work only with the MPI types listed below (Predefined Reduce Operations, and the section MINLOC and MAXLOC, below). User-defined operators may operate on general, derived datatypes. In this case, each argument that the reduce operation is applied to is one element described by such a datatype, which may contain several basic values. This is further explained in Section 4.9.4 of the MPI Standard, "User-Defined Operations." - -The following predefined operations are supplied for MPI_Reduce and related functions MPI_Allreduce, MPI_Reduce_scatter, and MPI_Scan. These operations are invoked by placing the following in op: -.sp -.nf - Name Meaning - --------- -------------------- - MPI_MAX maximum - MPI_MIN minimum - MPI_SUM sum - MPI_PROD product - MPI_LAND logical and - MPI_BAND bit-wise and - MPI_LOR logical or - MPI_BOR bit-wise or - MPI_LXOR logical xor - MPI_BXOR bit-wise xor - MPI_MAXLOC max value and location - MPI_MINLOC min value and location -.fi -.sp -The two operations MPI_MINLOC and MPI_MAXLOC are discussed separately below (MINLOC and MAXLOC). For the other predefined operations, we enumerate below the allowed combinations of op and datatype arguments. First, define groups of MPI basic datatypes in the following way: -.sp -.nf - C integer: MPI_INT, MPI_LONG, MPI_SHORT, - MPI_UNSIGNED_SHORT, MPI_UNSIGNED, - MPI_UNSIGNED_LONG - Fortran integer: MPI_INTEGER - Floating-point: MPI_FLOAT, MPI_DOUBLE, MPI_REAL, - MPI_DOUBLE_PRECISION, MPI_LONG_DOUBLE - Logical: MPI_LOGICAL - Complex: MPI_COMPLEX - Byte: MPI_BYTE -.fi -.sp -Now, the valid datatypes for each option is specified below. -.sp -.nf - Op Allowed Types - ---------------- --------------------------- - MPI_MAX, MPI_MIN C integer, Fortran integer, - floating-point - - MPI_SUM, MPI_PROD C integer, Fortran integer, - floating-point, complex - - MPI_LAND, MPI_LOR, C integer, logical - MPI_LXOR - - MPI_BAND, MPI_BOR, C integer, Fortran integer, byte - MPI_BXOR -.fi -.sp -\fBExample 1:\fR A routine that computes the dot product of two vectors that are distributed across a group of processes and returns the answer at process zero. -.sp -.nf - SUBROUTINE PAR_BLAS1(m, a, b, c, comm) - REAL a(m), b(m) ! local slice of array - REAL c ! result (at process zero) - REAL sum - INTEGER m, comm, i, ierr - - ! local sum - sum = 0.0 - DO i = 1, m - sum = sum + a(i)*b(i) - END DO - - ! global sum - CALL MPI_REDUCE(sum, c, 1, MPI_REAL, MPI_SUM, 0, comm, ierr) - RETURN -.fi -.sp -\fBExample 2:\fR A routine that computes the product of a vector and an array that are distributed across a group of processes and returns the answer at process zero. -.sp -.nf - SUBROUTINE PAR_BLAS2(m, n, a, b, c, comm) - REAL a(m), b(m,n) ! local slice of array - REAL c(n) ! result - REAL sum(n) - INTEGER n, comm, i, j, ierr - - ! local sum - DO j= 1, n - sum(j) = 0.0 - DO i = 1, m - sum(j) = sum(j) + a(i)*b(i,j) - END DO - END DO - - ! global sum - CALL MPI_REDUCE(sum, c, n, MPI_REAL, MPI_SUM, 0, comm, ierr) - - ! return result at process zero (and garbage at the other nodes) - RETURN - -.fi -.SH MINLOC AND MAXLOC -.ft R -The operator MPI_MINLOC is used to compute a global minimum and also an index attached to the minimum value. MPI_MAXLOC similarly computes a global maximum and index. One application of these is to compute a global minimum (maximum) and the rank of the process containing this value. - -.sp -The operation that defines MPI_MAXLOC is -.sp -.nf - ( u ) ( v ) ( w ) - ( ) o ( ) = ( ) - ( i ) ( j ) ( k ) - -where - - w = max(u, v) - -and - - ( i if u > v - ( - k = ( min(i, j) if u = v - ( - ( j if u < v) - - -MPI_MINLOC is defined similarly: - - ( u ) ( v ) ( w ) - ( ) o ( ) = ( ) - ( i ) ( j ) ( k ) - -where - - w = min(u, v) - -and - - ( i if u < v - ( - k = ( min(i, j) if u = v - ( - ( j if u > v) - - -.fi -.sp - -Both operations are associative and commutative. Note that if MPI_MAXLOC is -applied to reduce a sequence of pairs (u(0), 0), (u(1), 1),\ ..., (u(n-1), -n-1), then the value returned is (u , r), where u= max(i) u(i) and r is -the index of the first global maximum in the sequence. Thus, if each -process supplies a value and its rank within the group, then a reduce -operation with op = MPI_MAXLOC will return the maximum value and the rank -of the first process with that value. Similarly, MPI_MINLOC can be used to -return a minimum and its index. More generally, MPI_MINLOC computes a -lexicographic minimum, where elements are ordered according to the first -component of each pair, and ties are resolved according to the second -component. -.sp -The reduce operation is defined to operate on arguments that consist of a -pair: value and index. For both Fortran and C, types are provided to -describe the pair. The potentially mixed-type nature of such arguments is a -problem in Fortran. The problem is circumvented, for Fortran, by having the -MPI-provided type consist of a pair of the same type as value, and coercing -the index to this type also. In C, the MPI-provided pair type has distinct -types and the index is an int. -.sp -In order to use MPI_MINLOC and MPI_MAXLOC in a reduce operation, one must -provide a datatype argument that represents a pair (value and index). MPI -provides nine such predefined datatypes. The operations MPI_MAXLOC and -MPI_MINLOC can be used with each of the following datatypes: -.sp -.nf - Fortran: - Name Description - MPI_2REAL pair of REALs - MPI_2DOUBLE_PRECISION pair of DOUBLE-PRECISION variables - MPI_2INTEGER pair of INTEGERs - - C: - Name Description - MPI_FLOAT_INT float and int - MPI_DOUBLE_INT double and int - MPI_LONG_INT long and int - MPI_2INT pair of ints - MPI_SHORT_INT short and int - MPI_LONG_DOUBLE_INT long double and int -.fi -.sp -The data type MPI_2REAL is equivalent to: -.nf - MPI_TYPE_CONTIGUOUS(2, MPI_REAL, MPI_2REAL) -.fi -.sp -Similar statements apply for MPI_2INTEGER, MPI_2DOUBLE_PRECISION, and -MPI_2INT. -.sp -The datatype MPI_FLOAT_INT is as if defined by the following sequence of -instructions. -.sp -.nf - type[0] = MPI_FLOAT - type[1] = MPI_INT - disp[0] = 0 - disp[1] = sizeof(float) - block[0] = 1 - block[1] = 1 - MPI_TYPE_STRUCT(2, block, disp, type, MPI_FLOAT_INT) -.fi -.sp -Similar statements apply for MPI_LONG_INT and MPI_DOUBLE_INT. -.sp -\fBExample 3:\fR Each process has an array of 30 doubles, in C. For each of -the 30 locations, compute the value and rank of the process containing the -largest value. -.sp -.nf - \&... - /* each process has an array of 30 double: ain[30] - */ - double ain[30], aout[30]; - int ind[30]; - struct { - double val; - int rank; - } in[30], out[30]; - int i, myrank, root; - - MPI_Comm_rank(MPI_COMM_WORLD, &myrank); - for (i=0; i<30; ++i) { - in[i].val = ain[i]; - in[i].rank = myrank; - } - MPI_Reduce( in, out, 30, MPI_DOUBLE_INT, MPI_MAXLOC, root, comm ); - /* At this point, the answer resides on process root - */ - if (myrank == root) { - /* read ranks out - */ - for (i=0; i<30; ++i) { - aout[i] = out[i].val; - ind[i] = out[i].rank; - } - } -.fi -.sp -.fi -\fBExample 4:\fR Same example, in Fortran. -.sp -.nf - \&... - ! each process has an array of 30 double: ain(30) - - DOUBLE PRECISION ain(30), aout(30) - INTEGER ind(30); - DOUBLE PRECISION in(2,30), out(2,30) - INTEGER i, myrank, root, ierr; - - MPI_COMM_RANK(MPI_COMM_WORLD, myrank); - DO I=1, 30 - in(1,i) = ain(i) - in(2,i) = myrank ! myrank is coerced to a double - END DO - - MPI_REDUCE( in, out, 30, MPI_2DOUBLE_PRECISION, MPI_MAXLOC, root, - comm, ierr ); - ! At this point, the answer resides on process root - - IF (myrank .EQ. root) THEN - ! read ranks out - DO I= 1, 30 - aout(i) = out(1,i) - ind(i) = out(2,i) ! rank is coerced back to an integer - END DO - END IF -.fi -.sp -\fBExample 5:\fR Each process has a nonempty array of values. Find the minimum global value, the rank of the process that holds it, and its index on this process. -.sp -.nf - #define LEN 1000 - - float val[LEN]; /* local array of values */ - int count; /* local number of values */ - int myrank, minrank, minindex; - float minval; - - struct { - float value; - int index; - } in, out; - - /* local minloc */ - in.value = val[0]; - in.index = 0; - for (i=1; i < count; i++) - if (in.value > val[i]) { - in.value = val[i]; - in.index = i; - } - - /* global minloc */ - MPI_Comm_rank(MPI_COMM_WORLD, &myrank); - in.index = myrank*LEN + in.index; - MPI_Reduce( in, out, 1, MPI_FLOAT_INT, MPI_MINLOC, root, comm ); - /* At this point, the answer resides on process root - */ - if (myrank == root) { - /* read answer out - */ - minval = out.value; - minrank = out.index / LEN; - minindex = out.index % LEN; -.fi -.sp -All MPI objects (e.g., MPI_Datatype, MPI_Comm) are of type INTEGER in Fortran. -.SH NOTES ON COLLECTIVE OPERATIONS - -The reduction functions ( -.I MPI_Op -) do not return an error value. As a result, -if the functions detect an error, all they can do is either call -.I MPI_Abort -or silently skip the problem. Thus, if you change the error handler from -.I MPI_ERRORS_ARE_FATAL -to something else, for example, -.I MPI_ERRORS_RETURN -, -then no error may be indicated. - -The reason for this is the performance problems in ensuring that -all collective routines return the same error value. - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for I/O function errors. The error handler may be changed with MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN may be used to cause error values to be returned. Note that MPI does not guarantee that an MPI program can continue past an error. - -.SH SEE ALSO -.ft R -.sp -MPI_Allreduce -.br -MPI_Reduce_scatter -.br -MPI_Scan -.br -MPI_Op_create -.br -MPI_Op_free - - - diff --git a/ompi/mpi/man/man3/MPI_Reduce_init.3in b/ompi/mpi/man/man3/MPI_Reduce_init.3in deleted file mode 100644 index f8c65fb7223..00000000000 --- a/ompi/mpi/man/man3/MPI_Reduce_init.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/MPI_Reduce.3 diff --git a/ompi/mpi/man/man3/MPI_Reduce_local.3in b/ompi/mpi/man/man3/MPI_Reduce_local.3in deleted file mode 100644 index 1b5d2f1ea90..00000000000 --- a/ompi/mpi/man/man3/MPI_Reduce_local.3in +++ /dev/null @@ -1,287 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2013 Los Alamos National Security, LLC. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright 2009-2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Reduce_local 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Reduce_local\fP \- Perform a local reduction - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -int MPI_Reduce_local(const void *\fIinbuf\fP, void *\fIinoutbuf\fP, int\fI count\fP, - MPI_Datatype\fI datatype\fP, MPI_Op\fI op\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_REDUCE_LOCAL(\fIINBUF, INOUTBUF, COUNT, DATATYPE, OP, IERROR\fP) - \fIINBUF(*), INOUTBUF(*)\fP - INTEGER \fICOUNT, DATATYPE, OP, IERROR\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Reduce_local(\fIinbuf\fP, \fIinoutbuf\fP, \fIcount\fP, \fIdatatype\fP, \fIop\fP, \fIierror\fP) - TYPE(*), DIMENSION(..), INTENT(IN) :: \fIinbuf\fP - TYPE(*), DIMENSION(..) :: \fIinoutbuf\fP - INTEGER, INTENT(IN) :: \fIcount\fP - TYPE(MPI_Datatype), INTENT(IN) :: \fIdatatype\fP - TYPE(MPI_Op), INTENT(IN) :: \fIop\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETERS -.ft R -.TP 1i -inbuf -Address of input buffer (choice). -.TP 1i -count -Number of elements in input buffer (integer). -.TP 1i -datatype -Data type of elements of input buffer (handle). -.TP 1i -op -Reduce operation (handle). - -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -inoutbuf -Address of in/out buffer (choice). -.ft R -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -The global reduce functions (MPI_Reduce_local, MPI_Op_create, MPI_Op_free, MPI_Allreduce, MPI_Reduce_local_scatter, MPI_Scan) perform a global reduce operation (such as sum, max, logical AND, etc.) across all the members of a group. The reduction operation can be either one of a predefined list of operations, or a user-defined operation. The global reduction functions come in several flavors: a reduce that returns the result of the reduction at one node, an all-reduce that returns this result at all nodes, and a scan (parallel prefix) operation. In addition, a reduce-scatter operation combines the functionality of a reduce and a scatter operation. -.sp -MPI_Reduce_local combines the elements provided in the input and input/output buffers of the local process, using the operation op, and returns the combined value in the inout/output buffer. The input buffer is defined by the arguments inbuf, count, and datatype; the output buffer is defined by the arguments inoutbuf, count, and datatype; both have the same number of elements, with the same type. The routine is a local call. The process can provide one element, or a sequence of elements, in which case the combine operation is executed element-wise on each entry of the sequence. For example, if the operation is MPI_MAX and the input buffer contains two elements that are floating-point numbers (count = 2 and datatype = MPI_FLOAT), then inoutbuf(1) = global max (inbuf(1)) and inoutbuf(2) = global max(inbuf(2)). -.sp -.SH USE OF IN-PLACE OPTION -The use of MPI_IN_PLACE is disallowed with MPI_Reduce_local. -.sp -.SH PREDEFINED REDUCE OPERATIONS -.sp -The set of predefined operations provided by MPI is listed below (Predefined Reduce Operations). That section also enumerates the datatypes each operation can be applied to. In addition, users may define their own operations that can be overloaded to operate on several datatypes, either basic or derived. This is further explained in the description of the user-defined operations (see the man pages for MPI_Op_create and MPI_Op_free). -.sp -The operation op is always assumed to be associative. All predefined operations are also assumed to be commutative. Users may define operations that are assumed to be associative, but not commutative. The ``canonical'' evaluation order of a reduction is determined by the ranks of the processes in the group. However, the implementation can take advantage of associativity, or associativity and commutativity, in order to change the order of evaluation. This may change the result of the reduction for operations that are not strictly associative and commutative, such as floating point addition. -.sp -Predefined operators work only with the MPI types listed below (Predefined Reduce Operations, and the section MINLOC and MAXLOC, below). User-defined operators may operate on general, derived datatypes. In this case, each argument that the reduce operation is applied to is one element described by such a datatype, which may contain several basic values. This is further explained in Section 4.9.4 of the MPI Standard, "User-Defined Operations." - -The following predefined operations are supplied for MPI_Reduce_local and related functions MPI_Allreduce, MPI_Reduce_scatter, and MPI_Scan. These operations are invoked by placing the following in op: -.sp -.nf - Name Meaning - --------- -------------------- - MPI_MAX maximum - MPI_MIN minimum - MPI_SUM sum - MPI_PROD product - MPI_LAND logical and - MPI_BAND bit-wise and - MPI_LOR logical or - MPI_BOR bit-wise or - MPI_LXOR logical xor - MPI_BXOR bit-wise xor - MPI_MAXLOC max value and location - MPI_MINLOC min value and location -.fi -.sp -The two operations MPI_MINLOC and MPI_MAXLOC are discussed separately below (MINLOC and MAXLOC). For the other predefined operations, we enumerate below the allowed combinations of op and datatype arguments. First, define groups of MPI basic datatypes in the following way: -.sp -.nf - C integer: MPI_INT, MPI_LONG, MPI_SHORT, - MPI_UNSIGNED_SHORT, MPI_UNSIGNED, - MPI_UNSIGNED_LONG - Fortran integer: MPI_INTEGER - Floating-point: MPI_FLOAT, MPI_DOUBLE, MPI_REAL, - MPI_DOUBLE_PRECISION, MPI_LONG_DOUBLE - Logical: MPI_LOGICAL - Complex: MPI_COMPLEX - Byte: MPI_BYTE -.fi -.sp -Now, the valid datatypes for each option is specified below. -.sp -.nf - Op Allowed Types - ---------------- --------------------------- - MPI_MAX, MPI_MIN C integer, Fortran integer, - floating-point - - MPI_SUM, MPI_PROD C integer, Fortran integer, - floating-point, complex - - MPI_LAND, MPI_LOR, C integer, logical - MPI_LXOR - - MPI_BAND, MPI_BOR, C integer, Fortran integer, byte - MPI_BXOR -.fi -.sp -.SH MINLOC AND MAXLOC -.ft R -The operator MPI_MINLOC is used to compute a global minimum and also an index attached to the minimum value. MPI_MAXLOC similarly computes a global maximum and index. One application of these is to compute a global minimum (maximum) and the rank of the process containing this value. - -.sp -The operation that defines MPI_MAXLOC is -.sp -.nf - ( u ) ( v ) ( w ) - ( ) o ( ) = ( ) - ( i ) ( j ) ( k ) - -where - - w = max(u, v) - -and - - ( i if u > v - ( - k = ( min(i, j) if u = v - ( - ( j if u < v) - - -MPI_MINLOC is defined similarly: - - ( u ) ( v ) ( w ) - ( ) o ( ) = ( ) - ( i ) ( j ) ( k ) - -where - - w = min(u, v) - -and - - ( i if u < v - ( - k = ( min(i, j) if u = v - ( - ( j if u > v) - - -.fi -.sp - -Both operations are associative and commutative. Note that if MPI_MAXLOC is -applied to reduce a sequence of pairs (u(0), 0), (u(1), 1),\ ..., (u(n-1), -n-1), then the value returned is (u , r), where u= max(i) u(i) and r is -the index of the first global maximum in the sequence. Thus, if each -process supplies a value and its rank within the group, then a reduce -operation with op = MPI_MAXLOC will return the maximum value and the rank -of the first process with that value. Similarly, MPI_MINLOC can be used to -return a minimum and its index. More generally, MPI_MINLOC computes a -lexicographic minimum, where elements are ordered according to the first -component of each pair, and ties are resolved according to the second -component. -.sp -The reduce operation is defined to operate on arguments that consist of a -pair: value and index. For both Fortran and C, types are provided to -describe the pair. The potentially mixed-type nature of such arguments is a -problem in Fortran. The problem is circumvented, for Fortran, by having the -MPI-provided type consist of a pair of the same type as value, and coercing -the index to this type also. In C, the MPI-provided pair type has distinct -types and the index is an int. -.sp -In order to use MPI_MINLOC and MPI_MAXLOC in a reduce operation, one must -provide a datatype argument that represents a pair (value and index). MPI -provides nine such predefined datatypes. The operations MPI_MAXLOC and -MPI_MINLOC can be used with each of the following datatypes: -.sp -.nf - Fortran: - Name Description - MPI_2REAL pair of REALs - MPI_2DOUBLE_PRECISION pair of DOUBLE-PRECISION variables - MPI_2INTEGER pair of INTEGERs - - C: - Name Description - MPI_FLOAT_INT float and int - MPI_DOUBLE_INT double and int - MPI_LONG_INT long and int - MPI_2INT pair of ints - MPI_SHORT_INT short and int - MPI_LONG_DOUBLE_INT long double and int -.fi -.sp -The data type MPI_2REAL is equivalent to: -.nf - MPI_TYPE_CONTIGUOUS(2, MPI_REAL, MPI_2REAL) -.fi -.sp -Similar statements apply for MPI_2INTEGER, MPI_2DOUBLE_PRECISION, and -MPI_2INT. -.sp -The datatype MPI_FLOAT_INT is as if defined by the following sequence of -instructions. -.sp -.nf - type[0] = MPI_FLOAT - type[1] = MPI_INT - disp[0] = 0 - disp[1] = sizeof(float) - block[0] = 1 - block[1] = 1 - MPI_TYPE_STRUCT(2, block, disp, type, MPI_FLOAT_INT) -.fi -.sp -Similar statements apply for MPI_LONG_INT and MPI_DOUBLE_INT. -.sp -All MPI objects (e.g., MPI_Datatype, MPI_Comm) are of type INTEGER in Fortran. -.SH NOTES ON COLLECTIVE OPERATIONS - -The reduction operators ( -.I MPI_Op -) do not return an error value. As a result, -if the functions detect an error, all they can do is either call -.I MPI_Abort -or silently skip the problem. Thus, if you change the error handler from -.I MPI_ERRORS_ARE_FATAL -to something else, for example, -.I MPI_ERRORS_RETURN -, -then no error may be indicated. - -The reason for this is the performance problems in ensuring that -all collective routines return the same error value. - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for I/O function errors. The error handler may be changed with MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN may be used to cause error values to be returned. Note that MPI does not guarantee that an MPI program can continue past an error. - -.SH SEE ALSO -.ft R -.sp -MPI_Allreduce -.br -MPI_Reduce -.br -MPI_Reduce_scatter -.br -MPI_Scan -.br -MPI_Op_create -.br -MPI_Op_free - - - diff --git a/ompi/mpi/man/man3/MPI_Reduce_scatter.3in b/ompi/mpi/man/man3/MPI_Reduce_scatter.3in deleted file mode 100644 index 01fa0d9f8f7..00000000000 --- a/ompi/mpi/man/man3/MPI_Reduce_scatter.3in +++ /dev/null @@ -1,160 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2013 Los Alamos National Security, LLC. All rights reserved. -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Reduce_scatter 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Reduce_scatter, MPI_Ireduce_scatter, MPI_Reduce_scatter_init\fP \- Combines values and scatters the results. - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -int MPI_Reduce_scatter(const void *\fIsendbuf\fP, void\fI *recvbuf\fP, const int\fI recvcounts\fP[], - MPI_Datatype\fI datatype\fP, MPI_Op\fI op\fP, MPI_Comm\fI comm\fP) - -int MPI_Ireduce_scatter(const void *\fIsendbuf\fP, void\fI *recvbuf\fP, const int\fI recvcounts\fP[], - MPI_Datatype\fI datatype\fP, MPI_Op\fI op\fP, MPI_Comm\fI comm\fP, MPI_Request \fI*request\fP) - -.fi -int MPI_Reduce_scatter_init(const void *\fIsendbuf\fP, void\fI *recvbuf\fP, const int\fI recvcounts\fP[], - MPI_Datatype\fI datatype\fP, MPI_Op\fI op\fP, MPI_Comm\fI comm\fP, MPI_Info \fIinfo\fP, MPI_Request \fI*request\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_REDUCE_SCATTER(\fISENDBUF, RECVBUF, RECVCOUNTS, DATATYPE, OP, - COMM, IERROR\fP) - \fISENDBUF(*), RECVBUF(*)\fP - INTEGER \fIRECVCOUNTS(*), DATATYPE, OP, COMM, IERROR \fP - -MPI_IREDUCE_SCATTER(\fISENDBUF, RECVBUF, RECVCOUNTS, DATATYPE, OP, - COMM, REQUEST, IERROR\fP) - \fISENDBUF(*), RECVBUF(*)\fP - INTEGER \fIRECVCOUNTS(*), DATATYPE, OP, COMM, REQUEST, IERROR \fP - -MPI_REDUCE_SCATTER_INIT(\fISENDBUF, RECVBUF, RECVCOUNTS, DATATYPE, OP, - COMM, INFO, REQUEST, IERROR\fP) - \fISENDBUF(*), RECVBUF(*)\fP - INTEGER \fIRECVCOUNTS(*), DATATYPE, OP, COMM, INFO, REQUEST, IERROR \fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Reduce_scatter(\fIsendbuf\fP, \fIrecvbuf\fP, \fIrecvcounts\fP, \fIdatatype\fP, \fIop\fP, \fIcomm\fP, - \fIierror\fP) - TYPE(*), DIMENSION(..), INTENT(IN) :: \fIsendbuf\fP - TYPE(*), DIMENSION(..) :: \fIrecvbuf\fP - INTEGER, INTENT(IN) :: \fIrecvcounts(*)\fP - TYPE(MPI_Datatype), INTENT(IN) :: \fIdatatype\fP - TYPE(MPI_Op), INTENT(IN) :: \fIop\fP - TYPE(MPI_Comm), INTENT(IN) :: \fIcomm\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -MPI_Ireduce_scatter(\fIsendbuf\fP, \fIrecvbuf\fP, \fIrecvcounts\fP, \fIdatatype\fP, \fIop\fP, \fIcomm\fP, - \fIrequest\fP, \fIierror\fP) - TYPE(*), DIMENSION(..), INTENT(IN), ASYNCHRONOUS :: \fIsendbuf\fP - TYPE(*), DIMENSION(..), ASYNCHRONOUS :: \fIrecvbuf\fP - INTEGER, INTENT(IN), ASYNCHRONOUS :: \fIrecvcounts(*)\fP - TYPE(MPI_Datatype), INTENT(IN) :: \fIdatatype\fP - TYPE(MPI_Op), INTENT(IN) :: \fIop\fP - TYPE(MPI_Comm), INTENT(IN) :: \fIcomm\fP - TYPE(MPI_Request), INTENT(OUT) :: \fIrequest\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -MPI_Reduce_scatter_init(\fIsendbuf\fP, \fIrecvbuf\fP, \fIrecvcounts\fP, \fIdatatype\fP, \fIop\fP, \fIcomm\fP, - \fIinfo\fP, \fIrequest\fP, \fIierror\fP) - TYPE(*), DIMENSION(..), INTENT(IN), ASYNCHRONOUS :: \fIsendbuf\fP - TYPE(*), DIMENSION(..), ASYNCHRONOUS :: \fIrecvbuf\fP - INTEGER, INTENT(IN), ASYNCHRONOUS :: \fIrecvcounts(*)\fP - TYPE(MPI_Datatype), INTENT(IN) :: \fIdatatype\fP - TYPE(MPI_Op), INTENT(IN) :: \fIop\fP - TYPE(MPI_Comm), INTENT(IN) :: \fIcomm\fP - TYPE(MPI_Info), INTENT(IN) :: \fIinfo\fP - TYPE(MPI_Request), INTENT(OUT) :: \fIrequest\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETERS -.ft R -.TP 1i -sendbuf -Starting address of send buffer (choice). -.TP 1i -recvcounts -Integer array specifying the number of elements in result distributed to -each process. Array must be identical on all calling processes. -.TP 1i -datatype -Datatype of elements of input buffer (handle). -.TP 1i -op -Operation (handle). -.TP 1i -comm -Communicator (handle). -.TP 1i -info -Info (handle, persistent). - -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -recvbuf -Starting address of receive buffer (choice). -.TP 1i -request -Request (handle, non-blocking only). -.ft R -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R - -MPI_Reduce_scatter first does an element-wise reduction on vector of \fIcount\fP\ - =\ S(i)\fIrecvcounts\fP[i] elements in the send buffer defined by \fIsendbuf\fP, \fIcount\fP, and -\fIdatatype\fP. Next, the resulting vector of results is split into n disjoint -segments, where n is the number of processes in the group. Segment i contains -\fIrecvcounts\fP[i] elements. The ith segment is sent to process i and stored in -the receive buffer defined by \fIrecvbuf\fP, \fIrecvcounts\fP[i], and \fIdatatype\fP. - - -.SH USE OF IN-PLACE OPTION -When the communicator is an intracommunicator, you can perform a reduce-scatter operation in-place (the output buffer is used as the input buffer). Use the variable MPI_IN_PLACE as the value of the \fIsendbuf\fR. In this case, the input data is taken from the top of the receive buffer. The area occupied by the input data may be either longer or shorter than the data filled by the output data. -.sp -.SH WHEN COMMUNICATOR IS AN INTER-COMMUNICATOR -.sp -When the communicator is an inter-communicator, the reduce-scatter operation occurs in two phases. First, the result of the reduction performed on the data provided by the processes in the first group is scattered among the processes in the second group. Then the reverse occurs: the reduction performed on the data provided by the processes in the second group is scattered among the processes in the first group. For each group, all processes provide the same \fIrecvcounts\fR argument, and the sum of the \fIrecvcounts\fR values should be the same for both groups. -.sp -.SH NOTES ON COLLECTIVE OPERATIONS - -The reduction functions ( -.I MPI_Op -) do not return an error value. As a result, -if the functions detect an error, all they can do is either call -.I MPI_Abort -or silently skip the problem. Thus, if you change the error handler from -.I MPI_ERRORS_ARE_FATAL -to something else, for example, -.I MPI_ERRORS_RETURN -, -then no error may be indicated. - -The reason for this is the performance problems in ensuring that -all collective routines return the same error value. - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for I/O function errors. The error handler may be changed with MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN may be used to cause error values to be returned. Note that MPI does not guarantee that an MPI program can continue past an error. - diff --git a/ompi/mpi/man/man3/MPI_Reduce_scatter_block.3in b/ompi/mpi/man/man3/MPI_Reduce_scatter_block.3in deleted file mode 100644 index e6e3f3e11f7..00000000000 --- a/ompi/mpi/man/man3/MPI_Reduce_scatter_block.3in +++ /dev/null @@ -1,162 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2013 Los Alamos National Security, LLC. All rights reserved. -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" $COPYRIGHT$ -.TH MPI_Reduce_scatter_block 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Reduce_scatter_block, MPI_Ireduce_scatter_block, MPI_Reduce_scatter_block_init\fP \- Combines values and scatters the results in blocks. - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -int MPI_Reduce_scatter_block(const void *\fIsendbuf\fP, void\fI *recvbuf\fP, int\fI recvcount\fP, - MPI_Datatype\fI datatype\fP, MPI_Op\fI op\fP, MPI_Comm\fI comm\fP) - -int MPI_Ireduce_scatter_block(const void *\fIsendbuf\fP, void\fI *recvbuf\fP, int\fI recvcount\fP, - MPI_Datatype\fI datatype\fP, MPI_Op\fI op\fP, MPI_Comm\fI comm\fP, MPI_Request \fI*request\fP) - - -int MPI_Reduce_scatter_block_init(const void *\fIsendbuf\fP, void\fI *recvbuf\fP, int\fI recvcount\fP, - MPI_Datatype\fI datatype\fP, MPI_Op\fI op\fP, MPI_Comm\fI comm\fP, MPI_Info\fI info\fP, MPI_Request \fI*request\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_REDUCE_SCATTER_BLOCK(\fISENDBUF, RECVBUF, RECVCOUNT, DATATYPE, OP, - COMM, IERROR\fP) - \fISENDBUF(*), RECVBUF(*)\fP - INTEGER \fIRECVCOUNT, DATATYPE, OP, COMM, IERROR \fP - -MPI_IREDUCE_SCATTER_BLOCK(\fISENDBUF, RECVBUF, RECVCOUNT, DATATYPE, OP, - COMM, REQUEST, IERROR\fP) - \fISENDBUF(*), RECVBUF(*)\fP - INTEGER \fIRECVCOUNT, DATATYPE, OP, COMM, REQUEST, IERROR \fP - - -MPI_REDUCE_SCATTER_BLOCK_INOT(\fISENDBUF, RECVBUF, RECVCOUNT, DATATYPE, OP, - COMM, INFO, REQUEST, IERROR\fP) - \fISENDBUF(*), RECVBUF(*)\fP - INTEGER \fIRECVCOUNT, DATATYPE, OP, COMM, INFO, REQUEST, IERROR \fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Ireduce_scatter_block(\fIsendbuf\fP, \fIrecvbuf\fP, \fIrecvcount\fP, \fIdatatype\fP, \fIop\fP, \fIcomm\fP, - \fIierror\fP) - TYPE(*), DIMENSION(..), INTENT(IN), ASYNCHRONOUS :: \fIsendbuf\fP - TYPE(*), DIMENSION(..), ASYNCHRONOUS :: \fIrecvbuf\fP - INTEGER, INTENT(IN) :: \fIrecvcount\fP - TYPE(MPI_Datatype), INTENT(IN) :: \fIdatatype\fP - TYPE(MPI_Op), INTENT(IN) :: \fIop\fP - TYPE(MPI_Comm), INTENT(IN) :: \fIcomm\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -MPI_Ireduce_scatter_block(\fIsendbuf\fP, \fIrecvbuf\fP, \fIrecvcount\fP, \fIdatatype\fP, \fIop\fP, \fIcomm\fP, - \fIrequest\fP, \fIierror\fP) - TYPE(*), DIMENSION(..), INTENT(IN), ASYNCHRONOUS :: \fIsendbuf\fP - TYPE(*), DIMENSION(..), ASYNCHRONOUS :: \fIrecvbuf\fP - INTEGER, INTENT(IN) :: \fIrecvcount\fP - TYPE(MPI_Datatype), INTENT(IN) :: \fIdatatype\fP - TYPE(MPI_Op), INTENT(IN) :: \fIop\fP - TYPE(MPI_Comm), INTENT(IN) :: \fIcomm\fP - TYPE(MPI_Request), INTENT(OUT) :: \fIrequest\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -MPI_Reduce_scatter_block_init(\fIsendbuf\fP, \fIrecvbuf\fP, \fIrecvcount\fP, \fIdatatype\fP, \fIop\fP, \fIcomm\fP, - \fIinfo\fP, \fIrequest\fP, \fIierror\fP) - TYPE(*), DIMENSION(..), INTENT(IN), ASYNCHRONOUS :: \fIsendbuf\fP - TYPE(*), DIMENSION(..), ASYNCHRONOUS :: \fIrecvbuf\fP - INTEGER, INTENT(IN) :: \fIrecvcount\fP - TYPE(MPI_Datatype), INTENT(IN) :: \fIdatatype\fP - TYPE(MPI_Op), INTENT(IN) :: \fIop\fP - TYPE(MPI_Comm), INTENT(IN) :: \fIcomm\fP - TYPE(MPI_Info), INTENT(IN) :: \fIinfo\fP - TYPE(MPI_Request), INTENT(OUT) :: \fIrequest\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETERS -.ft R -.TP 1i -sendbuf -Starting address of send buffer (choice). -.TP 1i -recvcount -lement count per block (non-negative integer). -.TP 1i -datatype -Datatype of elements of input buffer (handle). -.TP 1i -op -Operation (handle). -.TP 1i -comm -Communicator (handle). -.TP 1i -info -Info (handle, persistent only). - -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -recvbuf -Starting address of receive buffer (choice). -.TP 1i -request -Request (handle, non-blocking only). -.ft R -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R - -MPI_Reduce_scatter_block first does an element-wise reduction on vector of \fIcount\fP\ - =\ n * \fIrecvcount\fP elements in the send buffer defined by \fIsendbuf\fP, \fIcount\fP, and -\fIdatatype\fP, using the operation \fIop\fP, where n is the number of -processes in the group of \fIcomm\fP. Next, the resulting vector of results is split into n disjoint -segments, where n is the number of processes in the group. Each segments contains \fIrecvcount\fP -elements. The ith segment is sent to process i and stored in the receive buffer defined by -\fIrecvbuf\fP, \fIrecvcount\fP, and \fIdatatype\fP. - - -.SH USE OF IN-PLACE OPTION -When the communicator is an intracommunicator, you can perform a reduce-scatter operation in-place (the output buffer is used as the input buffer). Use the variable MPI_IN_PLACE as the value of the \fIsendbuf\fR. In this case, the input data is taken from the top of the receive buffer. The area occupied by the input data may be either longer or shorter than the data filled by the output data. -.sp -.SH WHEN COMMUNICATOR IS AN INTER-COMMUNICATOR -.sp -When the communicator is an inter-communicator, the reduce-scatter operation occurs in two phases. First, the result of the reduction performed on the data provided by the processes in the first group is scattered among the processes in the second group. Then the reverse occurs: the reduction performed on the data provided by the processes in the second group is scattered among the processes in the first group. For each group, all processes provide the same \fIrecvcounts\fR argument, and the sum of the \fIrecvcounts\fR values should be the same for both groups. -.sp -.SH NOTES ON COLLECTIVE OPERATIONS - -The reduction functions ( -.I MPI_Op -) do not return an error value. As a result, -if the functions detect an error, all they can do is either call -.I MPI_Abort -or silently skip the problem. Thus, if you change the error handler from -.I MPI_ERRORS_ARE_FATAL -to something else, for example, -.I MPI_ERRORS_RETURN -, -then no error may be indicated. - -The reason for this is the performance problems in ensuring that -all collective routines return the same error value. - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for I/O function errors. The error handler may be changed with MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN may be used to cause error values to be returned. Note that MPI does not guarantee that an MPI program can continue past an error. - -.SH SEE ALSO -MPI_Reduce_scatter diff --git a/ompi/mpi/man/man3/MPI_Reduce_scatter_block_init.3in b/ompi/mpi/man/man3/MPI_Reduce_scatter_block_init.3in deleted file mode 100644 index f649a6c443d..00000000000 --- a/ompi/mpi/man/man3/MPI_Reduce_scatter_block_init.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/MPI_Reduce_scatter_block.3 diff --git a/ompi/mpi/man/man3/MPI_Reduce_scatter_init.3in b/ompi/mpi/man/man3/MPI_Reduce_scatter_init.3in deleted file mode 100644 index 4f03aec6068..00000000000 --- a/ompi/mpi/man/man3/MPI_Reduce_scatter_init.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/MPI_Reduce_scatter.3 diff --git a/ompi/mpi/man/man3/MPI_Register_datarep.3in b/ompi/mpi/man/man3/MPI_Register_datarep.3in deleted file mode 100644 index 60f08b269e9..00000000000 --- a/ompi/mpi/man/man3/MPI_Register_datarep.3in +++ /dev/null @@ -1,102 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2013 Los Alamos National Security, LLC. All rights reserved. -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright 2015-2016 Research Organization for Information Science -.\" and Technology (RIST). All rights reserved. -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Register_datarep 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Register_datarep\fP \- Defines data representation. - -.SH SYNTAX -.ft R -.nf -.SH C Syntax -.nf -#include -int MPI_Register_datarep(const char \fI*datarep\fP, - MPI_Datarep_conversion_function \fI*read_conversion_fn\fP, - MPI_Datarep_conversion_function \fI*write_conversion_fn\fP, - MPI_Datarep_extent_function \fI*dtype_file_extent_fn\fP, - void \fI*extra_state\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_REGISTER_DATAREP(\fIDATAREP\fP, \fIREAD_CONVERSION_FN\fP, - \fIWRITE_CONVERSION_FN\fP, \fIDTYPE_FILE_EXTENT_FN\fP, - \fIEXTRA_STATE\fP, \fIIERROR\fP) - CHARACTER*(*) \fIDATAREP\fP - EXTERNAL \fIREAD_CONVERSION_FN, WRITE_CONVERSION_FN, DTYPE_FILE_EXTENT_FN\fP - INTEGER \fIIERROR\fP - INTEGER(KIND=MPI_ADDRESS_KIND) \fIEXTRA_STATE\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Register_datarep(\fIdatarep\fP, \fIread_conversion_fn\fP, \fIwrite_conversion_fn\fP, - \fIdtype_file_extent_fn\fP, \fIextra_state\fP, \fIierror\fP) - CHARACTER(LEN=*), INTENT(IN) :: \fIdatarep\fP - PROCEDURE(MPI_Datarep_conversion_function) :: \fIread_conversion_fn\fP - PROCEDURE(MPI_Datarep_conversion_function) :: \fIwrite_conversion_fn\fP - PROCEDURE(MPI_Datarep_extent_function) :: \fIdtype_file_extent_fn\fP - INTEGER(KIND=MPI_ADDRESS_KIND), INTENT(IN) :: \fIextra_state\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETERS -.ft R -.TP 1i -datarep -Data representation identifier (string). -.ft R -.TP 1i -read_conversion_fn -Function invoked to convert from file representation to native representation (function). -.ft R -.TP 1i -write_conversion_fn -Function invoked to convert from native representation to file representation (function). -.ft R -.TP 1i -dtype_file_extent_fn -Function invoked to get the extent of a data type as represented in the file (function). -.ft R -.TP 1i -extra_state -Extra state. - -.SH OUTPUT PARAMETER -.ft R -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -MPI_Register_datarep defines a data representation. It associates the data representation's identifier (a string) with the functions that convert from file representation to the native representation and vice versa, with the function that gets the extent of a data type as represented in the file, as well as with "extra state," which is used for passing arguments. Once a data representation has been registered using this routine, you may specify its identifier as an argument to MPI_File_set_view, causing subsequent data-access operations to call the specified conversion functions. - -The call associates \fIread_conversion_fn\fP, \fIwrite_conversion_fn\fP, and \fIdtype_file_extent_fn\fP with the data representation identifier \fIdatarep\fP. \fIdatarep\fP can then be used as an argument to MPI_File_set_view, causing subsequent data access operations to call the conversion functions to convert all data items accessed between file data representation and native representation. MPI_Register_datarep is a local operation and only registers the data representation for the calling MPI process. If \fIdatarep\fP is already defined, an error in the error class MPI_ERR_DUP_DATAREP is raised using the default file error handler. The length of a data representation string is limited to the value of MPI_MAX_DATAREP_STRING. MPI_MAX_DATAREP_STRING must have a value of at least 64. No routines are provided to delete data representations and free the associated resources; it is not expected that an application will generate them in significant numbers. - -.SH NOTES -.ft R - -The Fortran version of each MPI I/O routine includes a final argument, -IERROR, which is not defined in the PARAMETERS sections. This argument is used to return the error status of the routine in the manner typical for Fortran library routines. -.sp -The C version of each routine returns an error status as an integer return value. -.sp -Error classes are found in mpi.h (for C) and mpif.h (for Fortran). - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. For MPI I/O function errors, the default error handler is set to MPI_ERRORS_RETURN. The error handler may be changed with MPI_File_set_errhandler; the predefined error handler MPI_ERRORS_ARE_FATAL may be used to make I/O errors fatal. Note that MPI does not guarantee that an MPI program can continue past an error. - diff --git a/ompi/mpi/man/man3/MPI_Request_c2f.3in b/ompi/mpi/man/man3/MPI_Request_c2f.3in deleted file mode 100644 index a13fce697dd..00000000000 --- a/ompi/mpi/man/man3/MPI_Request_c2f.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/MPI_Comm_f2c.3 diff --git a/ompi/mpi/man/man3/MPI_Request_f2c.3in b/ompi/mpi/man/man3/MPI_Request_f2c.3in deleted file mode 100644 index a13fce697dd..00000000000 --- a/ompi/mpi/man/man3/MPI_Request_f2c.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/MPI_Comm_f2c.3 diff --git a/ompi/mpi/man/man3/MPI_Request_free.3in b/ompi/mpi/man/man3/MPI_Request_free.3in deleted file mode 100644 index 7000fe5066e..00000000000 --- a/ompi/mpi/man/man3/MPI_Request_free.3in +++ /dev/null @@ -1,140 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" Copyright (c) 2020 FUJITSU LIMITED. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Request_free 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Request_free\fP \- Frees a communication request object. - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -int MPI_Request_free(MPI_Request *request) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_REQUEST_FREE(REQUEST, IERROR) - INTEGER REQUEST, IERROR - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Request_free(\fIrequest\fP, \fIierror\fP) - TYPE(MPI_Request), INTENT(INOUT) :: \fIrequest\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT/OUTPUT PARAMETER -.ft R -.TP 1i -request - Communication request (handle). - -.SH DESCRIPTION -.ft R -This operation allows a request object to be deallocated without waiting for the associated communication to complete. -.sp -MPI_Request_free marks the request object for deallocation and sets request -to MPI_REQUEST_NULL. Any ongoing communication that is associated with the request will be allowed to complete. The request will be deallocated only after its completion. - -.SH NOTES -Once a request is freed by a call to MPI_Request_free, it is not possible to check for the successful completion of the associated communication with calls to MPI_Wait or MPI_Test. Also, if an error occurs subsequently during the communication, an error code cannot be returned to the user -- such an error must be treated as fatal. Questions arise as to how one knows when the operations have completed when using MPI_Request_free. Depending on the program logic, there may be other ways in which the program knows that certain operations have completed and this makes usage of MPI_Request_free practical. For example, an active send request could be freed when the logic of the program is such that the receiver sends a reply to the message sent -- the arrival of the reply informs the sender that the send has completed and the send buffer can be reused. An active receive request should never be freed, as the receiver will have no way to verify that the receive has completed and the receive buffer can be reused. - -.sp -\fBExample:\fR -.sp -.nf - CALL MPI_COMM_RANK(MPI_COMM_WORLD, rank) - IF(rank.EQ.0) THEN - DO i=1, n - CALL MPI_ISEND(outval, 1, MPI_REAL, 1, 0, req, ierr) - CALL MPI_REQUEST_FREE(req, ierr) - CALL MPI_IRECV(inval, 1, MPI_REAL, 1, 0, req, ierr) - CALL MPI_WAIT(req, status, ierr) - END DO - ELSE ! rank.EQ.1 - CALL MPI_IRECV(inval, 1, MPI_REAL, 0, 0, req, ierr) - CALL MPI_WAIT(req, status) - DO I=1, n-1 - CALL MPI_ISEND(outval, 1, MPI_REAL, 0, 0, req, ierr) - CALL MPI_REQUEST_FREE(req, ierr) - CALL MPI_IRECV(inval, 1, MPI_REAL, 0, 0, req, ierr) - CALL MPI_WAIT(req, status, ierr) - END DO - CALL MPI_ISEND(outval, 1, MPI_REAL, 0, 0, req, ierr) - CALL MPI_WAIT(req, status) - END IF -.fi -.sp -This routine is normally used to free persistent requests created with -either -.I MPI_Recv_init -or -.I MPI_Send_init -and friends. However, it can be -used to free a request created with -.I MPI_Irecv -or -.I MPI_Isend -and friends; -in that case the use can not use the test/wait routines on the request. - -It -.B is -permitted to free an active request. However, once freed, you can not -use the request in a wait or test routine (e.g., -.I MPI_Wait -). - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for I/O function errors. The error handler may be changed with MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN may be used to cause error values to be returned. Note that MPI does not guarantee that an MPI program can continue past an error. - -.SH SEE ALSO -MPI_Isend -.br -MPI_Irecv -.br -MPI_Issend -.br -MPI_Ibsend -.br -MPI_Irsend -.br -MPI_Recv_init -.br -MPI_Send_init -.br -MPI_Ssend_init -.br -MPI_Rsend_init -.br -MPI_Test -.br -MPI_Wait -.br -MPI_Waitall -.br -MPI_Waitany -.br -MPI_Waitsome -.br -MPI_Testall -.br -MPI_Testany -.br -MPI_Testsome - - - diff --git a/ompi/mpi/man/man3/MPI_Request_get_status.3in b/ompi/mpi/man/man3/MPI_Request_get_status.3in deleted file mode 100644 index d4e5414f38e..00000000000 --- a/ompi/mpi/man/man3/MPI_Request_get_status.3in +++ /dev/null @@ -1,65 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Request_get_status 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Request_get_status\fP \- Access information associated with a request without freeing the request. - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -int MPI_Request_get_status(MPI_Request \fIrequest\fP, int \fI*flag\fP, MPI_Status \fI*status\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_REQUEST_GET_STATUS(\fIREQUEST\fP, \fIFLAG\fP, \fISTATUS\fP, \fIIERROR\fP) - INTEGER REQUEST, STATUS(MPI_STATUS_SIZE), IERROR - LOGICAL FLAG - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Request_get_status(\fIrequest\fP, \fIflag\fP, \fIstatus\fP, \fIierror\fP) - TYPE(MPI_Request), INTENT(IN) :: \fIrequest\fP - LOGICAL, INTENT(OUT) :: \fIflag\fP - TYPE(MPI_Status) :: \fIstatus\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETER -.ft -.TP 1i -request - Communication request (handle). - -.SH OUTPUT PARAMETERS -.ft -.TP 1i -flag -Boolean flag, same as from MPI_Test (logical). -.ft -.TP 1i -status -MPI_Status object if flag is true (status). - -.SH DESCRIPTION -.ft R -MPI_Request_get_status sets \fIflag\fP=\fItrue\fP if the operation is complete or sets \fIflag\fP=\fIfalse\fP if it is not complete. If the operation is complete, it returns in \fIstatus\fP the request status. It does not deallocate or inactivate the request; a subsequent call to test, wait, or free should be executed with that request. -.sp -If your application does not need to examine the \fIstatus\fP field, you can save resources by using the predefined constant MPI_STATUS_IGNORE as a special value for the \fIstatus\fP argument. - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for I/O function errors. The error handler may be changed with MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN may be used to cause error values to be returned. Note that MPI does not guarantee that an MPI program can continue past an error. - diff --git a/ompi/mpi/man/man3/MPI_Rget.3in b/ompi/mpi/man/man3/MPI_Rget.3in deleted file mode 100644 index 4b4410dd0da..00000000000 --- a/ompi/mpi/man/man3/MPI_Rget.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/MPI_Get.3 diff --git a/ompi/mpi/man/man3/MPI_Rget_accumulate.3in b/ompi/mpi/man/man3/MPI_Rget_accumulate.3in deleted file mode 100644 index 86db5536072..00000000000 --- a/ompi/mpi/man/man3/MPI_Rget_accumulate.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/MPI_Get_accumulate.3 diff --git a/ompi/mpi/man/man3/MPI_Rput.3in b/ompi/mpi/man/man3/MPI_Rput.3in deleted file mode 100644 index 52e806a2e8b..00000000000 --- a/ompi/mpi/man/man3/MPI_Rput.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/MPI_Put.3 diff --git a/ompi/mpi/man/man3/MPI_Rsend.3in b/ompi/mpi/man/man3/MPI_Rsend.3in deleted file mode 100644 index 14db0479e99..00000000000 --- a/ompi/mpi/man/man3/MPI_Rsend.3in +++ /dev/null @@ -1,79 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2013 Los Alamos National Security, LLC. All rights reserved. -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Rsend 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Rsend\fP \- Ready send. - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -int MPI_Rsend(const void *\fIbuf\fP, int\fI count\fP, MPI_Datatype\fI datatype\fP, int\fI dest\fP, - int\fI tag\fP, MPI_Comm\fI comm\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_RSEND(\fIBUF, COUNT, DATATYPE, DEST, TAG, COMM, IERROR\fP) - \fIBUF\fP(*) - INTEGER \fICOUNT, DATATYPE, DEST, TAG, COMM, IERROR\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Rsend(\fIbuf\fP, \fIcount\fP, \fIdatatype\fP, \fIdest\fP, \fItag\fP, \fIcomm\fP, \fIierror\fP) - TYPE(*), DIMENSION(..), INTENT(IN) :: \fIbuf\fP - INTEGER, INTENT(IN) :: \fIcount\fP, \fIdest\fP, \fItag\fP - TYPE(MPI_Datatype), INTENT(IN) :: \fIdatatype\fP - TYPE(MPI_Comm), INTENT(IN) :: \fIcomm\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETERS -.ft R -.TP 1i -buf -Initial address of send buffer (choice). -.TP 1i -count -Number of elements in send buffer (nonnegative integer). -.TP 1i -datatype -Datatype of each send buffer element (handle). -.TP 1i -dest -Rank of destination (integer). -.TP 1i -tag -Message tag (integer). -.TP 1i -comm -Communicator (handle). - -.SH OUTPUT PARAMETER -.ft R -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -A ready send may only be called if the user can guarantee that a receive is -already posted. It is an error if the receive is not posted before the -ready send is called. - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for I/O function errors. The error handler may be changed with MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN may be used to cause error values to be returned. Note that MPI does not guarantee that an MPI program can continue past an error. - diff --git a/ompi/mpi/man/man3/MPI_Rsend_init.3in b/ompi/mpi/man/man3/MPI_Rsend_init.3in deleted file mode 100644 index 6746e1f903d..00000000000 --- a/ompi/mpi/man/man3/MPI_Rsend_init.3in +++ /dev/null @@ -1,102 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2013 Los Alamos National Security, LLC. All rights reserved. -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Rsend_init 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Rsend_init\fP \- Builds a handle for a ready send. - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -int MPI_Rsend_init(const void *\fIbuf\fP, int\fI count\fP, MPI_Datatype\fI datatype\fP, - int\fI dest\fP, int\fI tag\fP, MPI_Comm\fI comm\fP, MPI_Request\fI *request\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_RSEND_INIT(\fIBUF, COUNT, DATATYPE, DEST, TAG, COMM, REQUEST, - IERROR\fP) - \fIBUF\fP(*) - INTEGER \fICOUNT, DATATYPE, DEST, TAG, COMM, REQUEST, IERROR\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Rsend_init(\fIbuf\fP, \fIcount\fP, \fIdatatype\fP, \fIdest\fP, \fItag\fP, \fIcomm\fP, \fIrequest\fP, \fIierror\fP) - TYPE(*), DIMENSION(..), INTENT(IN), ASYNCHRONOUS :: \fIbuf\fP - INTEGER, INTENT(IN) :: \fIcount\fP, \fIdest\fP, \fItag\fP - TYPE(MPI_Datatype), INTENT(IN) :: \fIdatatype\fP - TYPE(MPI_Comm), INTENT(IN) :: \fIcomm\fP - TYPE(MPI_Request), INTENT(OUT) :: \fIrequest\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETERS -.ft R -.TP 1i -buf -Initial address of send buffer (choice). -.TP 1i -count -Number of elements sent (integer). -.TP 1i -datatype -Type of each element (handle). -.TP 1i -dest -Rank of destination (integer). -.TP 1i -tag -Message tag (integer). -.TP 1i -comm -Communicator (handle). - -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -request -Communication request (handle). -.ft R -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -Creates a persistent communication object for a ready mode send operation, and binds to it all the arguments of a send operation. -.sp -A communication (send or receive) that uses a persistent request is initiated by the function MPI_Start. - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for I/O function errors. The error handler may be changed with MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN may be used to cause error values to be returned. Note that MPI does not guarantee that an MPI program can continue past an error. - -.SH SEE ALSO -.ft R -.sp -MPI_Bsend_init -.br -MPI_Send_init -.br -MPI_Sssend_init -.br -MPI_Recv_init -.br -MPI_Start -.br -MPI_Startall -.br -MPI_Request_free - diff --git a/ompi/mpi/man/man3/MPI_Scan.3in b/ompi/mpi/man/man3/MPI_Scan.3in deleted file mode 100644 index e212fe63fce..00000000000 --- a/ompi/mpi/man/man3/MPI_Scan.3in +++ /dev/null @@ -1,257 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2013 Los Alamos National Security, LLC. All rights reserved. -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" Copyright (c) 2020 FUJITSU LIMITED. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Scan 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" - -.SH NAME -\fBMPI_Scan, MPI_Iscan, MPI_Scan_init\fP \- Computes an inclusive scan (partial reduction) - -.SH SYNTAX -.ft R - -.SH C Syntax -.nf -#include -int MPI_Scan(const void *\fIsendbuf\fP, void *\fIrecvbuf\fP, int \fIcount\fP, - MPI_Datatype \fIdatatype\fP, MPI_Op \fIop\fP, MPI_Comm \fIcomm\fP) - -int MPI_Iscan(const void *\fIsendbuf\fP, void *\fIrecvbuf\fP, int \fIcount\fP, - MPI_Datatype \fIdatatype\fP, MPI_Op \fIop\fP, MPI_Comm \fIcomm\fP, - MPI_Request \fI*request\fP) - -int MPI_Scan_init(const void *\fIsendbuf\fP, void *\fIrecvbuf\fP, int \fIcount\fP, - MPI_Datatype \fIdatatype\fP, MPI_Op \fIop\fP, MPI_Comm \fIcomm\fP, - MPI_Info \fIinfo\fP, MPI_Request \fI*request\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_SCAN(\fISENDBUF, RECVBUF, COUNT, DATATYPE, OP, COMM, IERROR\fP) - \fISENDBUF(*), RECVBUF(*)\fP - INTEGER \fICOUNT, DATATYPE, OP, COMM, IERROR\fP - -MPI_ISCAN(\fISENDBUF, RECVBUF, COUNT, DATATYPE, OP, COMM, REQUEST, IERROR\fP) - \fISENDBUF(*), RECVBUF(*)\fP - INTEGER \fICOUNT, DATATYPE, OP, COMM, REQUEST, IERROR\fP - -MPI_SCAN_INIT(\fISENDBUF, RECVBUF, COUNT, DATATYPE, OP, COMM, INFO, REQUEST, IERROR\fP) - \fISENDBUF(*), RECVBUF(*)\fP - INTEGER \fICOUNT, DATATYPE, OP, COMM, INFO, REQUEST, IERROR\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Scan(\fIsendbuf\fP, \fIrecvbuf\fP, \fIcount\fP, \fIdatatype\fP, \fIop\fP, \fIcomm\fP, \fIierror\fP) - TYPE(*), DIMENSION(..), INTENT(IN) :: \fIsendbuf\fP - TYPE(*), DIMENSION(..) :: \fIrecvbuf\fP - INTEGER, INTENT(IN) :: \fIcount\fP - TYPE(MPI_Datatype), INTENT(IN) :: \fIdatatype\fP - TYPE(MPI_Op), INTENT(IN) :: \fIop\fP - TYPE(MPI_Comm), INTENT(IN) :: \fIcomm\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -MPI_Iscan(\fIsendbuf\fP, \fIrecvbuf\fP, \fIcount\fP, \fIdatatype\fP, \fIop\fP, \fIcomm\fP, \fIrequest\fP, \fIierror\fP) - TYPE(*), DIMENSION(..), INTENT(IN), ASYNCHRONOUS :: \fIsendbuf\fP - TYPE(*), DIMENSION(..), ASYNCHRONOUS :: \fIrecvbuf\fP - INTEGER, INTENT(IN) :: \fIcount\fP - TYPE(MPI_Datatype), INTENT(IN) :: \fIdatatype\fP - TYPE(MPI_Op), INTENT(IN) :: \fIop\fP - TYPE(MPI_Comm), INTENT(IN) :: \fIcomm\fP - TYPE(MPI_Request), INTENT(OUT) :: \fIrequest\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -MPI_Scan_init(\fIsendbuf\fP, \fIrecvbuf\fP, \fIcount\fP, \fIdatatype\fP, \fIop\fP, \fIcomm\fP, \fIinfo\fP, \fIrequest\fP, \fIierror\fP) - TYPE(*), DIMENSION(..), INTENT(IN), ASYNCHRONOUS :: \fIsendbuf\fP - TYPE(*), DIMENSION(..), ASYNCHRONOUS :: \fIrecvbuf\fP - INTEGER, INTENT(IN) :: \fIcount\fP - TYPE(MPI_Datatype), INTENT(IN) :: \fIdatatype\fP - TYPE(MPI_Op), INTENT(IN) :: \fIop\fP - TYPE(MPI_Comm), INTENT(IN) :: \fIcomm\fP - TYPE(MPI_Info), INTENT(IN) :: \fIinfo\fP - TYPE(MPI_Request), INTENT(OUT) :: \fIrequest\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETERS -.ft R -.TP 1i -sendbuf -Send buffer (choice). -.TP 1i -count -Number of elements in input buffer (integer). -.TP 1i -datatype -Data type of elements of input buffer (handle). -.TP 1i -op -Operation (handle). -.TP 1i -comm -Communicator (handle). -.TP 1i -info -Info (handle, persistent only) - -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -recvbuf -Receive buffer (choice). -.TP 1i -request -Request (handle, non-blocking only). -.ft R -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -MPI_Scan is used to perform an inclusive prefix reduction on data -distributed across the calling processes. The operation returns, in -the \fIrecvbuf\fP of the process with rank i, the reduction -(calculated according to the function \fIop\fP) of the values in the -\fIsendbuf\fPs of processes with ranks 0, ..., i (inclusive). The type -of operations supported, their semantics, and the constraints on send -and receive buffers are as for MPI_Reduce. - -.SH EXAMPLE -.ft R -This example uses a user-defined operation to produce a segmented -scan. A segmented scan takes, as input, a set of values and a set of -logicals, where the logicals delineate the various segments of the -scan. For example, -.sp -.nf -values v1 v2 v3 v4 v5 v6 v7 v8 -logicals 0 0 1 1 1 0 0 1 -result v1 v1+v2 v3 v3+v4 v3+v4+v5 v6 v6+v7 v8 -.fi -.sp -The result for rank j is thus the sum v(i) + ... + v(j), where i is -the lowest rank such that for all ranks n, i <= n <= j, logical(n) = -logical(j). The operator that produces this effect is -.sp -.nf - [ u ] [ v ] [ w ] - [ ] o [ ] = [ ] - [ i ] [ j ] [ j ] -.fi -.sp -where -.sp - ( u + v if i = j - w = ( - ( v if i != j -.fi -.sp -Note that this is a noncommutative operator. C code that implements it is -given below. -.sp -.nf - typedef struct { - double val; - int log; - } SegScanPair; - - /* - * the user-defined function - */ - void segScan(SegScanPair *in, SegScanPair *inout, int *len, - MPI_Datatype *dptr) - { - int i; - SegScanPair c; - - for (i = 0; i < *len; ++i) { - if (in->log == inout->log) - c.val = in->val + inout->val; - else - c.val = inout->val; - - c.log = inout->log; - *inout = c; - in++; - inout++; - } - } -.fi -.sp -Note that the inout argument to the user-defined function corresponds -to the right-hand operand of the operator. When using this operator, -we must be careful to specify that it is noncommutative, as in the -following: -.sp -.nf - int i, base; - SeqScanPair a, answer; - MPI_Op myOp; - MPI_Datatype type[2] = {MPI_DOUBLE, MPI_INT}; - MPI_Aint disp[2]; - int blocklen[2] = {1, 1}; - MPI_Datatype sspair; - - /* - * explain to MPI how type SegScanPair is defined - */ - MPI_Get_address(a, disp); - MPI_Get_address(a.log, disp + 1); - base = disp[0]; - for (i = 0; i < 2; ++i) - disp[i] -= base; - MPI_Type_struct(2, blocklen, disp, type, &sspair); - MPI_Type_commit(&sspair); - - /* - * create the segmented-scan user-op - * noncommutative - set commute (arg 2) to 0 - */ - MPI_Op_create((MPI_User_function *)segScan, 0, &myOp); - \&... - MPI_Scan(a, answer, 1, sspair, myOp, comm); -.fi - -.SH USE OF IN-PLACE OPTION -When the communicator is an intracommunicator, you can perform a scanning operation in place (the output buffer is used as the input buffer). Use the variable MPI_IN_PLACE as the value of the \fIsendbuf\fR argument. The input data is taken from the receive buffer and replaced by the output data. - -.SH NOTES ON COLLECTIVE OPERATIONS -.ft R -The reduction functions of type MPI_Op do not return an error value. -As a result, if the functions detect an error, all they can do is -either call MPI_Abort or silently skip the problem. Thus, if the -error handler is changed from MPI_ERRORS_ARE_FATAL to something else -(e.g., MPI_ERRORS_RETURN), then no error may be indicated. -.sp -The reason for this is the performance problems in ensuring that -all collective routines return the same error value. - -.SH ERRORS -.ft R -Almost all MPI routines return an error value; C routines as -the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for -I/O function errors. The error handler may be changed with -MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN -may be used to cause error values to be returned. Note that MPI does not -guarantee that an MPI program can continue past an error. -.sp -See the MPI man page for a full list of MPI error codes. - -.SH SEE ALSO -.ft R -.nf -MPI_Exscan -MPI_Op_create -MPI_Reduce - diff --git a/ompi/mpi/man/man3/MPI_Scan_init.3in b/ompi/mpi/man/man3/MPI_Scan_init.3in deleted file mode 100644 index 42cdcd65e92..00000000000 --- a/ompi/mpi/man/man3/MPI_Scan_init.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/MPI_Scan.3 diff --git a/ompi/mpi/man/man3/MPI_Scatter.3in b/ompi/mpi/man/man3/MPI_Scatter.3in deleted file mode 100644 index 427fc23e015..00000000000 --- a/ompi/mpi/man/man3/MPI_Scatter.3in +++ /dev/null @@ -1,210 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2013 Los Alamos National Security, LLC. All rights reserved. -.\" Copyright (c) 2010-2015 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Scatter 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Scatter, MPI_Iscatter, MPI_Scatter_init\fP \- Sends data from one task to all tasks in a group. - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -int MPI_Scatter(const void *\fIsendbuf\fP, int\fI sendcount\fP, MPI_Datatype\fI sendtype\fP, - void\fI *recvbuf\fP, int\fI recvcount\fP, MPI_Datatype\fI recvtype\fP, int\fI root\fP, - MPI_Comm\fI comm\fP) - -int MPI_Iscatter(const void *\fIsendbuf\fP, int\fI sendcount\fP, MPI_Datatype\fI sendtype\fP, - void\fI *recvbuf\fP, int\fI recvcount\fP, MPI_Datatype\fI recvtype\fP, int\fI root\fP, - MPI_Comm\fI comm\fP, MPI_Request \fI*request\fP) - -int MPI_Scatter_init(const void *\fIsendbuf\fP, int\fI sendcount\fP, MPI_Datatype\fI sendtype\fP, - void\fI *recvbuf\fP, int\fI recvcount\fP, MPI_Datatype\fI recvtype\fP, int\fI root\fP, - MPI_Comm\fI comm\fP, MPI_Info\fI info\fP, MPI_Request \fI*request\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_SCATTER(\fISENDBUF, SENDCOUNT, SENDTYPE, RECVBUF, RECVCOUNT, - RECVTYPE, ROOT, COMM, IERROR\fP) - \fISENDBUF(*), RECVBUF(*)\fP - INTEGER \fISENDCOUNT, SENDTYPE, RECVCOUNT, RECVTYPE, ROOT\fP - INTEGER \fICOMM, IERROR\fP - -MPI_ISCATTER(\fISENDBUF, SENDCOUNT, SENDTYPE, RECVBUF, RECVCOUNT, - RECVTYPE, ROOT, COMM, REQUEST, IERROR\fP) - \fISENDBUF(*), RECVBUF(*)\fP - INTEGER \fISENDCOUNT, SENDTYPE, RECVCOUNT, RECVTYPE, ROOT\fP - INTEGER \fICOMM, REQUEST, IERROR\fP - -MPI_SCATTER_INIT(\fISENDBUF, SENDCOUNT, SENDTYPE, RECVBUF, RECVCOUNT, - RECVTYPE, ROOT, COMM, INFO, REQUEST, IERROR\fP) - \fISENDBUF(*), RECVBUF(*)\fP - INTEGER \fISENDCOUNT, SENDTYPE, RECVCOUNT, RECVTYPE, ROOT\fP - INTEGER \fICOMM, INFO, REQUEST, IERROR\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Scatter(\fIsendbuf\fP, \fIsendcount\fP, \fIsendtype\fP, \fIrecvbuf\fP, \fIrecvcount\fP, \fIrecvtype\fP, - \fIroot\fP, \fIcomm\fP, \fIierror\fP) - TYPE(*), DIMENSION(..), INTENT(IN) :: \fIsendbuf\fP - TYPE(*), DIMENSION(..) :: \fIrecvbuf\fP - INTEGER, INTENT(IN) :: \fIsendcount\fP, \fIrecvcount\fP, \fIroot\fP - TYPE(MPI_Datatype), INTENT(IN) :: \fIsendtype\fP, \fIrecvtype\fP - TYPE(MPI_Comm), INTENT(IN) :: \fIcomm\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -MPI_Iscatter(\fIsendbuf\fP, \fIsendcount\fP, \fIsendtype\fP, \fIrecvbuf\fP, \fIrecvcount\fP, \fIrecvtype\fP, - \fIroot\fP, \fIcomm\fP, \fIrequest\fP, \fIierror\fP) - TYPE(*), DIMENSION(..), INTENT(IN), ASYNCHRONOUS :: \fIsendbuf\fP - TYPE(*), DIMENSION(..), ASYNCHRONOUS :: \fIrecvbuf\fP - INTEGER, INTENT(IN) :: \fIsendcount\fP, \fIrecvcount\fP, \fIroot\fP - TYPE(MPI_Datatype), INTENT(IN) :: \fIsendtype\fP, \fIrecvtype\fP - TYPE(MPI_Comm), INTENT(IN) :: \fIcomm\fP - TYPE(MPI_Request), INTENT(OUT) :: \fIrequest\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -MPI_Scatter_init(\fIsendbuf\fP, \fIsendcount\fP, \fIsendtype\fP, \fIrecvbuf\fP, \fIrecvcount\fP, \fIrecvtype\fP, - \fIroot\fP, \fIcomm\fP, \fIinfo\fP, \fIrequest\fP, \fIierror\fP) - TYPE(*), DIMENSION(..), INTENT(IN), ASYNCHRONOUS :: \fIsendbuf\fP - TYPE(*), DIMENSION(..), ASYNCHRONOUS :: \fIrecvbuf\fP - INTEGER, INTENT(IN) :: \fIsendcount\fP, \fIrecvcount\fP, \fIroot\fP - TYPE(MPI_Datatype), INTENT(IN) :: \fIsendtype\fP, \fIrecvtype\fP - TYPE(MPI_Comm), INTENT(IN) :: \fIcomm\fP - TYPE(MPI_Info), INTENT(IN) :: \fIinfo\fP - TYPE(MPI_Request), INTENT(OUT) :: \fIrequest\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETERS -.ft R -.TP 1i -sendbuf -Address of send buffer (choice, significant only at root). -.TP 1i -sendcount -Number of elements sent to each process (integer, significant only at -root). -.TP 1i -sendtype -Datatype of send buffer elements (handle, significant only at root). -.TP 1i -recvcount -Number of elements in receive buffer (integer). -.TP 1i -recvtype -Datatype of receive buffer elements (handle). -.TP 1i -root -Rank of sending process (integer). -.TP 1i -comm -Communicator (handle). -.TP 1i -info -Info (handle, persistent). - -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -recvbuf -Address of receive buffer (choice). -.TP 1i -request -Request (handle, non-blocking only). -.ft R -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -MPI_Scatter is the inverse operation to MPI_Gather. -.sp -The outcome is as if the root executed n send operations, -.sp -.nf - MPI_Send(sendbuf + i * sendcount * extent(sendtype), sendcount, - sendtype, i, \&...) -.fi -.sp -and each process executed a receive, -.sp -.nf - MPI_Recv(recvbuf, recvcount, recvtype, i, \&...). -.fi -.sp -An alternative description is that the root sends a message with -MPI_Send(\fIsendbuf\fP, \fIsendcount\fP * \fIn\fP,\ \fIsendtype\fP, \&...). This message is split -into \fIn\fP equal segments, the ith segment is sent to the ith process in the -group, and each process receives this message as above. -.sp -The send buffer is ignored for all nonroot processes. -.sp -The type signature associated with \fIsendcount\fP, \fIsendtype\fP at the root must be -equal to the type signature associated with \fIrecvcount\fP, \fIrecvtype\fP at all -processes (however, the type maps may be different). This implies that the -amount of data sent must be equal to the amount of data received, pairwise -between each process and the root. Distinct type maps between sender and -receiver are still allowed. -.sp -All arguments to the function are significant on process \fIroot\fP, while on -other processes, only arguments \fIrecvbuf\fP, \fIrecvcount\fP, \fIrecvtype\fP, \fIroot\fP, \fIcomm\fP -are significant. The arguments \fIroot\fP and \fIcomm\fP must have identical values on -all processes. -.sp -The specification of counts and types should not cause any location on the -root to be read more than once. -.sp -\fBRationale:\fR Though not needed, the last restriction is imposed so as -to achieve symmetry with MPI_Gather, where the corresponding restriction (a -multiple-write restriction) is necessary. -.sp -\fBExample:\fR The reverse of Example 1 in the MPI_Gather manpage. Scatter -sets of 100 ints from the root to each process in the group. -.sp -.nf - MPI_Comm comm; - int gsize,*sendbuf; - int root, rbuf[100]; - \&... - MPI_Comm_size(comm, &gsize); - sendbuf = (int *)malloc(gsize*100*sizeof(int)); - \&... - MPI_Scatter(sendbuf, 100, MPI_INT, rbuf, 100, - MPI_INT, root, comm); -.fi - -.SH USE OF IN-PLACE OPTION -When the communicator is an intracommunicator, you can perform a scatter operation in-place (the output buffer is used as the input buffer). Use the variable MPI_IN_PLACE as the value of the root process \fIrecvbuf\fR. In this case, \fIrecvcount\fR and \fIrecvtype\fR are ignored, and the root process sends no data to itself. -.sp -Note that MPI_IN_PLACE is a special kind of value; it has the same restrictions on its use as MPI_BOTTOM. -.sp -Because the in-place option converts the receive buffer into a send-and-receive buffer, a Fortran binding that includes INTENT must mark these as INOUT, not OUT. -.sp -.SH WHEN COMMUNICATOR IS AN INTER-COMMUNICATOR -.sp -When the communicator is an inter-communicator, the root process in the first group sends data to all processes in the second group. The first group defines the root process. That process uses MPI_ROOT as the value of its \fIroot\fR argument. The remaining processes use MPI_PROC_NULL as the value of their \fIroot\fR argument. All processes in the second group use the rank of that root process in the first group as the value of their \fIroot\fR argument. The receive buffer argument of the root process in the first group must be consistent with the receive buffer argument of the processes in the second group. -.sp -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for I/O function errors. The error handler may be changed with MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN may be used to cause error values to be returned. Note that MPI does not guarantee that an MPI program can continue past an error. - -.SH SEE ALSO -.ft R -.sp -.nf -MPI_Scatterv -MPI_Gather -MPI_Gatherv - diff --git a/ompi/mpi/man/man3/MPI_Scatter_init.3in b/ompi/mpi/man/man3/MPI_Scatter_init.3in deleted file mode 100644 index 05572bc5ca1..00000000000 --- a/ompi/mpi/man/man3/MPI_Scatter_init.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/MPI_Scatter.3 diff --git a/ompi/mpi/man/man3/MPI_Scatterv.3in b/ompi/mpi/man/man3/MPI_Scatterv.3in deleted file mode 100644 index 81cd31d8527..00000000000 --- a/ompi/mpi/man/man3/MPI_Scatterv.3in +++ /dev/null @@ -1,258 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2013 Los Alamos National Security, LLC. All rights reserved. -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" Copyright (c) 2020 FUJITSU LIMITED. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Scatterv 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Scatterv, MPI_Iscatterv, MPI_Scatterv_init\fP \- Scatters a buffer in parts to all tasks in a group. - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -int MPI_Scatterv(const void *\fIsendbuf\fP, const int\fI sendcounts[]\fP, const int\fI displs[]\fP, - MPI_Datatype\fI sendtype\fP, void\fI *recvbuf\fP, int\fI recvcount\fP, - MPI_Datatype\fI recvtype\fP, int\fI root\fP, MPI_Comm\fI comm\fP) - -int MPI_Iscatterv(const void *\fIsendbuf\fP, const int\fI sendcounts[]\fP, const int\fI displs[]\fP, - MPI_Datatype\fI sendtype\fP, void\fI *recvbuf\fP, int\fI recvcount\fP, - MPI_Datatype\fI recvtype\fP, int\fI root\fP, MPI_Comm\fI comm\fP, MPI_Request \fI*request\fP) - -int MPI_Scatterv_init(const void *\fIsendbuf\fP, const int\fI sendcounts[]\fP, const int\fI displs[]\fP, - MPI_Datatype\fI sendtype\fP, void\fI *recvbuf\fP, int\fI recvcount\fP, - MPI_Datatype\fI recvtype\fP, int\fI root\fP, MPI_Comm\fI comm\fP, MPI_Info\fI info\fP, MPI_Request \fI*request\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_SCATTERV(\fISENDBUF, SENDCOUNTS, DISPLS, SENDTYPE, RECVBUF, - RECVCOUNT, RECVTYPE, ROOT, COMM, IERROR\fP) - \fISENDBUF(*), RECVBUF(*)\fP - INTEGER \fISENDCOUNTS(*), DISPLS(*), SENDTYPE\fP - INTEGER \fIRECVCOUNT, RECVTYPE, ROOT, COMM, IERROR\fP - -MPI_ISCATTERV(\fISENDBUF, SENDCOUNTS, DISPLS, SENDTYPE, RECVBUF, - RECVCOUNT, RECVTYPE, ROOT, COMM, REQUEST, IERROR\fP) - \fISENDBUF(*), RECVBUF(*)\fP - INTEGER \fISENDCOUNTS(*), DISPLS(*), SENDTYPE\fP - INTEGER \fIRECVCOUNT, RECVTYPE, ROOT, COMM, REQUEST, IERROR\fP - -MPI_SCATTERV_INIT(\fISENDBUF, SENDCOUNTS, DISPLS, SENDTYPE, RECVBUF, - RECVCOUNT, RECVTYPE, ROOT, COMM, INFO, REQUEST, IERROR\fP) - \fISENDBUF(*), RECVBUF(*)\fP - INTEGER \fISENDCOUNTS(*), DISPLS(*), SENDTYPE\fP - INTEGER \fIRECVCOUNT, RECVTYPE, ROOT, COMM, INFO, REQUEST, IERROR\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Scatterv(\fIsendbuf\fP, \fIsendcounts\fP, \fIdispls\fP, \fIsendtype\fP, \fIrecvbuf\fP, \fIrecvcount\fP, - \fIrecvtype\fP, \fIroot\fP, \fIcomm\fP, \fIierror\fP) - TYPE(*), DIMENSION(..), INTENT(IN) :: \fIsendbuf\fP - TYPE(*), DIMENSION(..) :: \fIrecvbuf\fP - INTEGER, INTENT(IN) :: \fIsendcounts(*)\fP, \fIdispls(*)\fP, \fIrecvcount\fP, \fIroot\fP - TYPE(MPI_Datatype), INTENT(IN) :: \fIsendtype\fP, \fIrecvtype\fP - TYPE(MPI_Comm), INTENT(IN) :: \fIcomm\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -MPI_Iscatterv(\fIsendbuf\fP, \fIsendcounts\fP, \fIdispls\fP, \fIsendtype\fP, \fIrecvbuf\fP, \fIrecvcount\fP, - \fIrecvtype\fP, \fIroot\fP, \fIcomm\fP, \fIrequest\fP, \fIierror\fP) - TYPE(*), DIMENSION(..), INTENT(IN), ASYNCHRONOUS :: \fIsendbuf\fP - TYPE(*), DIMENSION(..), ASYNCHRONOUS :: \fIrecvbuf\fP - INTEGER, INTENT(IN), ASYNCHRONOUS :: \fIsendcounts(*)\fP, \fIdispls(*)\fP - INTEGER, INTENT(IN) :: \fIrecvcount\fP, \fIroot\fP - TYPE(MPI_Datatype), INTENT(IN) :: \fIsendtype\fP, \fIrecvtype\fP - TYPE(MPI_Comm), INTENT(IN) :: \fIcomm\fP - TYPE(MPI_Request), INTENT(OUT) :: \fIrequest\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -MPI_scatterv_init(\fIsendbuf\fP, \fIsendcounts\fP, \fIdispls\fP, \fIsendtype\fP, \fIrecvbuf\fP, \fIrecvcount\fP, - \fIrecvtype\fP, \fIroot\fP, \fIcomm\fP, \fIinfo\fP, \fIrequest\fP, \fIierror\fP) - TYPE(*), DIMENSION(..), INTENT(IN), ASYNCHRONOUS :: \fIsendbuf\fP - TYPE(*), DIMENSION(..), ASYNCHRONOUS :: \fIrecvbuf\fP - INTEGER, INTENT(IN), ASYNCHRONOUS :: \fIsendcounts(*)\fP, \fIdispls(*)\fP - INTEGER, INTENT(IN) :: \fIrecvcount\fP, \fIroot\fP - TYPE(MPI_Datatype), INTENT(IN) :: \fIsendtype\fP, \fIrecvtype\fP - TYPE(MPI_Comm), INTENT(IN) :: \fIcomm\fP - TYPE(MPI_Info), INTENT(IN) :: \fIinfo\fP - TYPE(MPI_Request), INTENT(OUT) :: \fIrequest\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETERS -.ft R -.TP 1i -sendbuf -Address of send buffer (choice, significant only at root). -.TP 1i -sendcounts -Integer array (of length group size) specifying the number of elements to -send to each processor. -.TP 1i -displs -Integer array (of length group size). Entry i specifies the displacement -(relative to sendbuf) from which to take the outgoing data to process i. -.TP 1i -sendtype -Datatype of send buffer elements (handle). -.TP 1i -recvcount -Number of elements in receive buffer (integer). -.TP 1i -recvtype -Datatype of receive buffer elements (handle). -.TP 1i -root -Rank of sending process (integer). -.TP 1i -comm -Communicator (handle). -.TP 1i -info -Info (handle, persistent only). - -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -recvbuf -Address of receive buffer (choice). -.TP 1i -request -Request (handle, non-blocking only). -.ft R -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -MPI_Scatterv is the inverse operation to MPI_Gatherv. -.sp -MPI_Scatterv extends the functionality of MPI_Scatter by allowing a varying -count of data to be sent to each process, since \fIsendcounts\fP is now an array. -It also allows more flexibility as to where the data is taken from on the -root, by providing the new argument, \fIdispls\fP. -.sp -The outcome is as if the root executed \fIn\fP send operations, -.sp -.nf - MPI_Send(\fIsendbuf\fP + \fIdispls\fP[\fIi\fP] * \fIextent\fP(\fIsendtype\fP), \\ - \fIsendcounts\fP[i], \fIsendtype\fP, \fIi\fP, \&...) - -and each process executed a receive, - - MPI_Recv(\fIrecvbuf\fP, \fIrecvcount\fP, \fIrecvtype\fP, \fIroot\fP, \&...) - -The send buffer is ignored for all nonroot processes. -.fi -.sp -The type signature implied by \fIsendcount\fP[\fIi\fP], \fIsendtype\fP at the root must be -equal to the type signature implied by \fIrecvcount\fP, \fIrecvtype\fP at process \fIi\fP -(however, the type maps may be different). This implies that the amount of -data sent must be equal to the amount of data received, pairwise between -each process and the root. Distinct type maps between sender and receiver -are still allowed. -.sp -All arguments to the function are significant on process \fIroot\fP, while on -other processes, only arguments \fIrecvbuf\fP, \fIrecvcount\fP, \fIrecvtype\fP, \fIroot\fP, \fIcomm\fP -are significant. The arguments \fIroot\fP and \fIcomm\fP must have identical values on -all processes. -.sp -The specification of counts, types, and displacements should not cause any -location on the root to be read more than once. -.sp -\fBExample 1:\fR The reverse of Example 5 in the MPI_Gatherv manpage. We -have a varying stride between blocks at sending (root) side, at the -receiving side we receive 100 - \fIi\fP elements into the \fIi\fPth column of a 100 x 150 C array at process \fIi\fP. -.sp -.nf - MPI_Comm comm; - int gsize,recvarray[100][150],*rptr; - int root, *sendbuf, myrank, bufsize, *stride; - MPI_Datatype rtype; - int i, *displs, *scounts, offset; - \&... - MPI_Comm_size( comm, &gsize); - MPI_Comm_rank( comm, &myrank ); - - stride = (int *)malloc(gsize*sizeof(int)); - \&... - /* stride[i] for i = 0 to gsize-1 is set somehow - * sendbuf comes from elsewhere - */ - \&... - displs = (int *)malloc(gsize*sizeof(int)); - scounts = (int *)malloc(gsize*sizeof(int)); - offset = 0; - for (i=0; i= 100. -.sp -.nf - MPI_Comm comm; - int gsize,*sendbuf; - int root, rbuf[100], i, *displs, *scounts; - - \&... - - MPI_Comm_size(comm, &gsize); - sendbuf = (int *)malloc(gsize*stride*sizeof(int)); - \&... - displs = (int *)malloc(gsize*sizeof(int)); - scounts = (int *)malloc(gsize*sizeof(int)); - for (i=0; i -int MPI_Send(const void *\fIbuf\fP, int\fI count\fP, MPI_Datatype\fI datatype\fP, int\fI dest\fP, - int\fI tag\fP, MPI_Comm\fI comm\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_SEND(\fIBUF, COUNT, DATATYPE, DEST, TAG, COMM, IERROR\fP) - \fIBUF(*)\fP - INTEGER \fICOUNT, DATATYPE, DEST, TAG, COMM, IERROR\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Send(\fIbuf\fP, \fIcount\fP, \fIdatatype\fP, \fIdest\fP, \fItag\fP, \fIcomm\fP, \fIierror\fP) - TYPE(*), DIMENSION(..), INTENT(IN) :: \fIbuf\fP - INTEGER, INTENT(IN) :: \fIcount\fP, \fIdest\fP, \fItag\fP - TYPE(MPI_Datatype), INTENT(IN) :: \fIdatatype\fP - TYPE(MPI_Comm), INTENT(IN) :: \fIcomm\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETERS -.ft R -.TP 1i -buf -Initial address of send buffer (choice). -.TP 1i -count -Number of elements send (nonnegative integer). -.TP 1i -datatype -Datatype of each send buffer element (handle). -.TP 1i -dest -Rank of destination (integer). -.TP 1i -tag -Message tag (integer). -.TP 1i -comm -Communicator (handle). - -.SH OUTPUT PARAMETER -.ft R -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -MPI_Send performs a standard-mode, blocking send. - -.SH NOTE -.ft R -This routine will block until the message is sent to the destination. For an in-depth explanation of the semantics of the standard-mode send, refer to the MPI-1 Standard. - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for I/O function errors. The error handler may be changed with MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN may be used to cause error values to be returned. Note that MPI does not guarantee that an MPI program can continue past an error. - -.SH SEE ALSO -.ft R -.nf -MPI_Isend -MPI_Bsend -MPI_Recv - diff --git a/ompi/mpi/man/man3/MPI_Send_init.3in b/ompi/mpi/man/man3/MPI_Send_init.3in deleted file mode 100644 index 0209db645c7..00000000000 --- a/ompi/mpi/man/man3/MPI_Send_init.3in +++ /dev/null @@ -1,104 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2013 Los Alamos National Security, LLC. All rights reserved. -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Send_init 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Send_init\fP \- Builds a handle for a standard send. - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -int MPI_Send_init(const void *\fIbuf\fP, int\fI count\fP, MPI_Datatype\fI datatype\fP, - int\fI dest\fP, int\fI tag\fP, MPI_Comm\fI comm\fP, MPI_Request\fI *request\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_SEND_INIT(\fIBUF, COUNT, DATATYPE, DEST, TAG, COMM, REQUEST, - IERROR\fP) - \fIBUF\fP(*) - INTEGER \fIREQUEST, COUNT, DATATYPE, DEST, TAG\fP - INTEGER \fICOMM, REQUEST, IERROR\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Send_init(\fIbuf\fP, \fIcount\fP, \fIdatatype\fP, \fIdest\fP, \fItag\fP, \fIcomm\fP, \fIrequest\fP, \fIierror\fP) - TYPE(*), DIMENSION(..), INTENT(IN), ASYNCHRONOUS :: \fIbuf\fP - INTEGER, INTENT(IN) :: \fIcount\fP, \fIdest\fP, \fItag\fP - TYPE(MPI_Datatype), INTENT(IN) :: \fIdatatype\fP - TYPE(MPI_Comm), INTENT(IN) :: \fIcomm\fP - TYPE(MPI_Request), INTENT(OUT) :: \fIrequest\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETERS -.ft R -.TP 1i -buf -Initial address of send buffer (choice). -.TP 1i -count -Number of elements to send (integer). -.TP 1i -datatype -Type of each element (handle). -.TP 1i -dest -Rank of destination (integer). -.TP 1i -tag -Message tag (integer). -.TP 1i -comm -Communicator (handle). - -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -request -Communication request (handle). -.ft R -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -Creates a persistent communication request for a standard mode send operation, and binds to it all the arguments of a send operation. -.sp -A communication (send or receive) that uses a persistent request is initiated by the function MPI_Start or MPI_Startall. - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for I/O function errors. The error handler may be changed with MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN may be used to cause error values to be returned. Note that MPI does not guarantee that an MPI program can continue past an error. - -.SH SEE ALSO -.ft R -.sp -MPI_Bsend_init -.br -MPI_Ssend_init -.br -MPI_Rsend_init -.br -MPI_Recv_init -.br -MPI_Start -.br -MPI_Startall -.br -MPI_Request_free - - diff --git a/ompi/mpi/man/man3/MPI_Sendrecv.3in b/ompi/mpi/man/man3/MPI_Sendrecv.3in deleted file mode 100644 index d7e2d757e67..00000000000 --- a/ompi/mpi/man/man3/MPI_Sendrecv.3in +++ /dev/null @@ -1,118 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2013 Los Alamos National Security, LLC. All rights reserved. -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Sendrecv 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Sendrecv\fP \- Sends and receives a message. - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -int MPI_Sendrecv(const void *\fIsendbuf\fP, int\fI sendcount\fP, MPI_Datatype\fI sendtype\fP, - int\fI dest\fP, int\fI sendtag\fP, void\fI *recvbuf\fP, int\fI recvcount\fP, - MPI_Datatype\fI recvtype\fP, int\fI source\fP, int\fI recvtag\fP, - MPI_Comm\fI comm\fP, MPI_Status\fI *status\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_SENDRECV(\fISENDBUF, SENDCOUNT, SENDTYPE, DEST, SENDTAG, - RECVBUF, RECVCOUNT, RECVTYPE, SOURCE, RECVTAG, COMM, - STATUS, IERROR\fP) - \fISENDBUF(*), RECVBUF(*)\fP - INTEGER \fISENDCOUNT, SENDTYPE, DEST, SENDTAG\fP - INTEGER \fIRECVCOUNT, RECVTYPE, SOURCE, RECVTAG, COMM\fP - INTEGER \fISTATUS(MPI_STATUS_SIZE), IERROR\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Sendrecv(\fIsendbuf\fP, \fIsendcount\fP, \fIsendtype\fP, \fIdest\fP, \fIsendtag\fP, \fIrecvbuf\fP, - \fIrecvcount\fP, \fIrecvtype\fP, \fIsource\fP, \fIrecvtag\fP, \fIcomm\fP, \fIstatus\fP, \fIierror\fP) - TYPE(*), DIMENSION(..), INTENT(IN) :: \fIsendbuf\fP - TYPE(*), DIMENSION(..) :: \fIrecvbuf\fP - INTEGER, INTENT(IN) :: \fIsendcount\fP, \fIdest\fP, \fIsendtag\fP, \fIrecvcount\fP, \fIsource,\fP - \fIrecvtag\fP - TYPE(MPI_Datatype), INTENT(IN) :: \fIsendtype\fP, \fIrecvtype\fP - TYPE(MPI_Comm), INTENT(IN) :: \fIcomm\fP - TYPE(MPI_Status) :: \fIstatus\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETERS -.ft R -.TP 1i -sendbuf -Initial address of send buffer (choice). -.TP 1i -sendcount -Number of elements to send (integer). -.TP 1i -sendtype -Type of elements in send buffer (handle). -.TP 1i -dest -Rank of destination (integer). -.TP 1i -sendtag -Send tag (integer). -.TP 1i -recvcount -Maximum number of elements to receive (integer). -.TP 1i -recvtype -Type of elements in receive buffer (handle). -.TP 1i -source -Rank of source (integer). -.TP 1i -recvtag -Receive tag (integer). -.TP 1i -comm -Communicator (handle). - -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -recvbuf -Initial address of receive buffer (choice). -.TP 1i -status -Status object (status). This refers to the receive operation. -.ft R -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -The send-receive operations combine in one call the sending of a message to one destination and the receiving of another message, from another process. The two (source and destination) are possibly the same. A send-receive operation is useful for executing a shift operation across a chain of processes. If blocking sends and receives are used for such a shift, then one needs to order the sends and receives correctly (for example, even processes send, then receive; odd processes receive first, then send) in order to prevent cyclic dependencies that may lead to deadlock. When a send-receive operation is used, the communication subsystem takes care of these issues. The send-receive operation can be used in conjunction with the functions described in Chapter 6 of the MPI-1 Standard, "Process Topologies," in order to perform shifts on various logical topologies. Also, a send-receive operation is useful for implementing remote procedure calls. -.sp -A message sent by a send-receive operation can be received by a regular receive operation or probed by a probe operation; a send-receive operation can receive a message sent by a regular send operation. -.sp -MPI_Sendrecv executes a blocking send and receive operation. Both send and receive use the same communicator, but possibly different tags. The send buffer and receive buffers must be disjoint, and may have different lengths and datatypes. -.sp -If your application does not need to examine the \fIstatus\fP field, you can save resources by using the predefined constant MPI_STATUS_IGNORE as a special value for the \fIstatus\fP argument. - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for I/O function errors. The error handler may be changed with MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN may be used to cause error values to be returned. Note that MPI does not guarantee that an MPI program can continue past an error. - -.SH SEE ALSO -.ft R -.sp -MPI_Sendrecv_replace - - diff --git a/ompi/mpi/man/man3/MPI_Sendrecv_replace.3in b/ompi/mpi/man/man3/MPI_Sendrecv_replace.3in deleted file mode 100644 index 4d28f2021c2..00000000000 --- a/ompi/mpi/man/man3/MPI_Sendrecv_replace.3in +++ /dev/null @@ -1,107 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Sendrecv_replace 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Sendrecv_replace\fP \- Sends and receives a message using a single buffer. - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -int MPI_Sendrecv_replace(void *\fIbuf\fP, int\fI count\fP, MPI_Datatype\fI datatype\fP, - int\fI dest\fP, int\fI sendtag\fP, int\fI source\fP, int\fI recvtag\fP, MPI_Comm\fI comm\fP, - MPI_Status\fI *status\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_SENDRECV_REPLACE(\fIBUF, COUNT, DATATYPE, DEST, SENDTAG, SOURCE, - RECVTAG, COMM, STATUS, IERROR\fP) - \fIBUF\fP(*) - INTEGER \fICOUNT, DATATYPE, DEST, SENDTAG\fP - INTEGER \fISOURCE, RECVTAG, COMM\fP - INTEGER \fISTATUS(MPI_STATUS_SIZE), IERROR\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Sendrecv_replace(\fIbuf\fP, \fIcount\fP, \fIdatatype\fP, \fIdest\fP, \fIsendtag\fP, \fIsource\fP, \fIrecvtag\fP, - \fIcomm\fP, \fIstatus\fP, \fIierror\fP) - TYPE(*), DIMENSION(..) :: \fIbuf\fP - INTEGER, INTENT(IN) :: \fIcount\fP, \fIdest\fP, \fIsendtag\fP, \fIsource\fP, \fIrecvtag\fP - TYPE(MPI_Datatype), INTENT(IN) :: \fIdatatype\fP - TYPE(MPI_Comm), INTENT(IN) :: \fIcomm\fP - TYPE(MPI_Status) :: \fIstatus\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT/OUTPUT PARAMETER -.ft R -.TP 1i -buf -Initial address of send and receive buffer (choice). - -.SH INPUT PARAMETERS -.ft R -.TP 1i -count -Number of elements in send and receive buffer (integer). -.TP 1i -datatype -Type of elements to send and receive (handle). -.TP 1i -dest -Rank of destination (integer). -.TP 1i -sendtag -Send message tag (integer). -.TP 1i -source -Rank of source (integer). -.TP 1i -recvtag -Receive message tag (integer). -.TP 1i -comm -Communicator (handle). - -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -status -Status object (status). -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -The send-receive operations combine in one call the sending of a message to one destination and the receiving of another message, from another process. The two (source and destination) are possibly the same. A send-receive operation is useful for executing a shift operation across a chain of processes. If blocking sends and receives are used for such a shift, then one needs to order the sends and receives correctly (for example, even processes send, then receive; odd processes receive first, then send) in order to prevent cyclic dependencies that may lead to deadlock. When a send-receive operation is used, the communication subsystem takes care of these issues. The send-receive operation can be used in conjunction with the functions described in Chapter 6 of the MPI Standard, "Process Topologies," in order to perform shifts on various logical topologies. Also, a send-receive operation is useful for implementing remote procedure calls. -.sp -A message sent by a send-receive operation can be received by a regular receive operation or probed by a probe operation; a send-receive operation can receive a message sent by a regular send operation. -.sp -MPI_Sendrecv_replace executes a blocking send and receive. The same buffer is used both for the send and for the receive, so that the message sent is replaced by the message received. -.sp -The semantics of a send-receive operation is what would be obtained if the caller forked two concurrent threads, one to execute the send, and one to execute the receive, followed by a join of these two threads. - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for I/O function errors. The error handler may be changed with MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN may be used to cause error values to be returned. Note that MPI does not guarantee that an MPI program can continue past an error. - -.SH SEE ALSO -.ft R -.sp -MPI_Sendrecv - - - diff --git a/ompi/mpi/man/man3/MPI_Session_create_errhandler.3.md b/ompi/mpi/man/man3/MPI_Session_create_errhandler.3.md deleted file mode 100644 index b2d74a3ad11..00000000000 --- a/ompi/mpi/man/man3/MPI_Session_create_errhandler.3.md +++ /dev/null @@ -1,76 +0,0 @@ -# Name - -`MPI_Session_create_errhandler` - Creates an error handler that can be -attached to sessions - -# Syntax - -## C Syntax - -```c -#include - -int MPI_Session_create_errhandler(MPI_Session_errhandler_function *function, - MPI_Errhandler *errhandler) -``` - -## Fortran Syntax - -```fortran -USE MPI -! or the older form: INCLUDE 'mpif.h' - -MPI_SESSION_CREATE_ERRHANDLER(FUNCTION, ERRHANDLER, IERROR) - EXTERNAL FUNCTION - INTEGER ERRHANDLER, IERROR -``` - -## Fortran 2008 Syntax - -```fortran -USE mpi_f08 - -MPI_Session_create_errhandler(session_errhandler_fn, errhandler, ierror) - PROCEDURE(MPI_Session_errhandler_function) :: session_errhandler_fn - TYPE(MPI_Errhandler), INTENT(OUT) :: errhandler - INTEGER, OPTIONAL, INTENT(OUT) :: ierror -``` - -# Input Parameter - -* `function` : User-defined error handling procedure (function). - -# Output Parameters - -* `errhandler` : MPI error handler (handle). -* `IERROR` : Fortran only: Error status (integer). - -# Description - -`MPI_Session_create_errhandler` creates an error handler that can be attached -to sessions. This `function` is identical to `MPI_Errhandler_create`, -the use of which is deprecated. -In C, the user routine should be a `function` of type -`MPI_Session_errhandler_function`, which is defined as -```c -typedef void MPI_Session_errhandler_function(MPI_Session *, int *, ...); -``` -The first argument is the session in use. The second is the error -code to be returned by the MPI routine that raised the error. This -typedef replaces `MPI_Handler_function`, the use of which is deprecated. -In Fortran, the user routine should be of this form: -```fortran -SUBROUTINE SESSION_ERRHANDLER_FUNCTION(SESSION, ERROR_CODE, ...) - INTEGER SESSION, ERROR_CODE -``` - -# Errors - -Almost all MPI routines return an error value; C routines as the value -of the `function` and Fortran routines in the last argument. -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for -I/O `function` errors. The error handler may be changed with -`MPI_Session_set_errhandler`; the predefined error handler `MPI_ERRORS_RETURN` -may be used to cause error values to be returned. Note that MPI does not -guarantee that an MPI program can continue past an error. diff --git a/ompi/mpi/man/man3/MPI_Session_f2c.3.md b/ompi/mpi/man/man3/MPI_Session_f2c.3.md deleted file mode 100644 index 2deaa5915ac..00000000000 --- a/ompi/mpi/man/man3/MPI_Session_f2c.3.md +++ /dev/null @@ -1,43 +0,0 @@ -# NAME - -MPI_Session_c2f, MPI_Session_f2c - Translates a C session handle into a Fortran INTEGER-style session handle, or vice versa. - -# SYNTAX - -## C Syntax - -```c -#include - -int MPI_Session_f2c(const MPI_Fint *f_session, MPI_Session *c_session) -int MPI_Session_c2f(const MPI_Session *c_session, MPI_Fint *f_session) -``` - -# PARAMETERS - -* `f_session`: `mpi`-style `INTEGER` MPI session object -* `c_session`: C-style MPI session object - -# DESCRIPTION - -These two procedures are provided in C to convert from a Fortran -session (which is an array of integers) to a C session (which is a -structure), and vice versa. The conversion occurs on all the -information in `session`, including that which is hidden. That is, -no session information is lost in the conversion. - -When using `MPI_Session_f2c()`, if `f_session` is a valid Fortran -session, then `MPI_Session_f2c()` returns in `c_session` a -valid C session with the same content. If `f_session` is the Fortran -value of `MPI_SESSION_NULL`, or if -`f_session` is not a valid Fortran session, then the call is erroneous. - -When using `MPI_Session_c2f()`, the opposite conversion is applied. If -`c_session` is `MPI_SESSION_NULL`, or if -`c_session` is not a valid C session, then the call is erroneous. - -# NOTES - -These functions are only available in C; they are not available in any -of the Fortran MPI interfaces. - diff --git a/ompi/mpi/man/man3/MPI_Session_finalize.3.md b/ompi/mpi/man/man3/MPI_Session_finalize.3.md deleted file mode 100644 index c5d4d6d8219..00000000000 --- a/ompi/mpi/man/man3/MPI_Session_finalize.3.md +++ /dev/null @@ -1,78 +0,0 @@ -# Name - -`MPI_Session_finalize` - releases all MPI state associated with a session - -# Syntax - -## C Syntax - -```c -#include - -int MPI_Session_finalize(MPI_Session *session) -``` - -## Fortran Syntax - -```fortran -USE MPI -! or the older form: INCLUDE 'mpif.h' - -MPI_SESSION_FINALIZE(SESSION, IERROR) - INTEGER SESSION, IERROR -``` - -## Fortran 2008 Syntax - -```fortran -USE mpi_f08 - -MPI_Session_finalize(session, ierror) - TYPE(MPI_Session), INTENT(IN) :: session - INTEGER, OPTIONAL, INTENT(OUT) :: ierror -``` - -# Input Parameters - -* `session` : session to be finalized (handle) - -# Output Parameters - -* `IERROR` : Fortran only: Error status (integer). - -# Description - -`MPI_Session_finalize` releases all MPI state associated with the supplied `session`. Every instantiated -session must be finalized using `MPI_Session_finalize`. The handle `session` is set to -MPI_SESSION_NULL by the call. - -# Notes - -Before an MPI process invokes `MPI_Session_finalize`, the process must perform -all MPI calls needed to complete its involvement in MPI communications: it must locally -complete all MPI operations that it initiated and it must execute matching calls needed to -complete MPI communications initiated by other processes. The call to `MPI_Session_finalize` does not free objects created by MPI calls; these -objects are freed using `MPI_XXX_FREE` calls. `MPI_Session_finalize` may be synchronizing on any or all of the groups associated -with communicators, windows, or  les derived from the session and not disconnected, freed, -or closed, respectively, before the call to `MPI_Session_finalize` procedure. -`MPI_Session_finalize` behaves as if all such synchronizations occur concurrently. As -`MPI_Comm_free` may mark a communicator for freeing later, `MPI_Session_finalize` -may be synchronizing on the group associated with a communicator that is only freed (with -`MPI_Comm_free) rather than disconnected (with `MPI_Comm_disconnect`). - - -# Errors - -Almost all MPI routines return an error value; C routines as the value -of the function and Fortran routines in the last argument. -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for -I/O function errors. The error handler may be changed with -`MPI_Session_set_errhandler`; the predefined error handler `MPI_ERRORS_RETURN` -may be used to cause error values to be returned. Note that MPI does not -guarantee that an MPI program can continue past an error. - -# See Also - -[`MPI_Session_init`(3)](MPI_Session_init.html) -[`MPI_Comm_disconnect`(3)](MPI_Comm_disconnect.html) diff --git a/ompi/mpi/man/man3/MPI_Session_get_info.3.md b/ompi/mpi/man/man3/MPI_Session_get_info.3.md deleted file mode 100644 index e3fd2659fd3..00000000000 --- a/ompi/mpi/man/man3/MPI_Session_get_info.3.md +++ /dev/null @@ -1,71 +0,0 @@ -# Name - -`MPI_Session_get_info` - Returns an info object containing the hints of an MPI Session - -# Syntax - -## C Syntax - -```c -#include - -int MPI_Session_get_info(MPI_Session session, MPI_Info *info_used) -``` - -## Fortran Syntax - -```fortran -USE MPI -! or the older form: INCLUDE 'mpif.h' - -MPI_SESSION_GET_INFO(SESSION, INFO_USED) - INTEGER SESSION, INFO_USED -``` - -## Fortran 2008 Syntax - -```fortran -USE mpi_f08 - -MPI_Session_get_info(session, info_used) - TYPE(MPI_Session), INTENT(IN) :: session - TYPE(MPI_Info), INTENT(OUT) :: info_used -``` - -# Input Parameters - -* `session` : session (handle) - -# Output Parameters - -* `info_used`: info object (handle) -* `IERROR` : Fortran only: Error status (integer). - -# Description - -`MPI_Session_get_info` returns a new info object containing the hints of the MPI -Session associated with session. The current setting of all hints related to this MPI Session -is returned in `info_used`. An MPI implementation is required to return all hints that are -supported by the implementation and have default values specified; any user-supplied hints -that were not ignored by the implementation; and any additional hints that were set by -the implementation. If no such hints exist, a handle to a newly created info object is -returned that contains no key/value pair. - -# Notes - -The user is responsible for freeing info_used via ` MPI_Info_free`. - -# Errors - -Almost all MPI routines return an error value; C routines as the value -of the function and Fortran routines in the last argument. -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for -I/O function errors. The error handler may be changed with -`MPI_Session_set_errhandler`; the predefined error handler `MPI_ERRORS_RETURN` -may be used to cause error values to be returned. Note that MPI does not -guarantee that an MPI program can continue past an error. - -# See Also - -[`MPI_Session_init`(3)](MPI_Session_init.html) diff --git a/ompi/mpi/man/man3/MPI_Session_get_nth_pset.3.md b/ompi/mpi/man/man3/MPI_Session_get_nth_pset.3.md deleted file mode 100644 index f608930f7d6..00000000000 --- a/ompi/mpi/man/man3/MPI_Session_get_nth_pset.3.md +++ /dev/null @@ -1,86 +0,0 @@ -# Name - -`MPI_Session_get_nth_pset` - Query runtime for name of the nth process set - -# Syntax - -## C Syntax - -```c -#include - -int MPI_Session_get_nth_pset(MPI_Session session, MPI_Info info, int n, int *pset_len, char *pset_name) -``` - -## Fortran Syntax - -```fortran -USE MPI -! or the older form: INCLUDE 'mpif.h' - -MPI_SESSION_GET_NTH_PSET(SESSION, INFO, N, PSET_LEN, PSET_NAME, IERROR) - INTEGER SESSION, INFO, N, PSET_LEN, IERROR - CHARACTER*(*) PSET_NAME -``` - -## Fortran 2008 Syntax - -```fortran -USE mpi_f08 - -MPI_Session_get_nth_pset(session, info, n, pset_len, pset_name, ierror) - TYPE(MPI_Session), INTENT(IN) :: session - TYPE(MPI_Info), INTENT(IN) :: info - INTEGER, INTENT(IN) :: n - INTEGER, INTENT(INOUT) :: pset_len - CHARACTER(LEN=*), INTENT(OUT) :: pset_name - INTEGER, OPTIONAL, INTENT(OUT) :: ierror -``` - -# Input Parameters - -* `session` : session (handle) -* `info`: info object (handle) -* `n`: index of the desired process set name (integer) - -## Input/Output Parameter - -* `pset_len`: length of the pset_name argument (integer) - -# Output Parameters - -* `pset_name` : name of the nth process set (string) -* `IERROR` : Fortran only: Error status (integer). - -# Description - -`MPI_Session_get_nth_pset` returns the name of the nth process set in the supplied `pset_name` buffer. -`pset_len` is the size of the buffer needed to store the nth process set name. If the `pset_len` -passed into the function is less than the actual buffer size needed for the process set name, -then the string value returned in `pset_name` is truncated. If `pset_len` is set to 0, `pset_name` is -not changed. On return, the value of `pset_len` will be set to the required buffer size to hold -the process set name. In C, `pset_len` includes the required space for the null terminator. In -C, this function returns a null terminated string in all cases where the `pset_len` input value -is greater than 0. - -# Notes - -Process set names have an implementation-defined maximum length of -`MPI_MAX_PSET_NAME_LEN` characters. `MPI_MAX_PSET_NAME_LEN` shall have a value of -at least 63. - -# Errors - -Almost all MPI routines return an error value; C routines as the value -of the function and Fortran routines in the last argument. -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for -I/O function errors. The error handler may be changed with -`MPI_Session_set_errhandler`; the predefined error handler `MPI_ERRORS_RETURN` -may be used to cause error values to be returned. Note that MPI does not -guarantee that an MPI program can continue past an error. - -# See Also - -[`MPI_Session_init`(3)](MPI_Session_init.html) -[`MPI_Session_get_num_psets`(3)](MPI_Session_get_num_psets.html) diff --git a/ompi/mpi/man/man3/MPI_Session_get_num_psets.3.md b/ompi/mpi/man/man3/MPI_Session_get_num_psets.3.md deleted file mode 100644 index 3c61cb4d2dc..00000000000 --- a/ompi/mpi/man/man3/MPI_Session_get_num_psets.3.md +++ /dev/null @@ -1,77 +0,0 @@ -# Name - -`MPI_Session_get_num_psets` - Query runtime for number of available process sets - -# Syntax - -## C Syntax - -```c -#include - -int MPI_Session_get_num_psets(MPI_Session session, MPI_Info info, int *npset_names) -``` - -## Fortran Syntax - -```fortran -USE MPI -! or the older form: INCLUDE 'mpif.h' - -MPI_SESSION_GET_NUM_PSETS(SESSION, INFO, NPSET_NAMES, IERROR) - INTEGER SESSION, INFO, SESSION, IERROR -``` - -## Fortran 2008 Syntax - -```fortran -USE mpi_f08 - -MPI_Session_get_num_psets(session, info, npset_names, ierror) - TYPE(MPI_Session), INTENT(IN) :: session - TYPE(MPI_Info), INTENT(IN) :: info - INTEGER, INTENT(OUT) :: npset_names - INTEGER, OPTIONAL, INTENT(OUT) :: ierror -``` - -# Input Parameters - -* `session` : session (handle) -* `info`: info object (handle) - -# Output Parameters - -* `npset_names` : number of available process sets (non-negtive integer) -* `IERROR` : Fortran only: Error status (integer). - -# Description - -`MPI_Session_get_num_psets` is used to query the runtime for the number of available process sets in -which the calling MPI process is a member. An MPI implementation is allowed to increase -the number of available process sets during the execution of an MPI application when new -process sets become available. However, MPI implementations are not allowed to change -the index of a particular process set name, or to change the name of the process set at a -particular index, or to delete a process set name once it has been added. - -# Notes - -When a process set becomes invalid, for example, when some processes become unreachable due to failures -in the communication system, subsequent usage of the process set name may raise an -error. For example, creating an `MPI_Group` from such a process set might succeed because it -is a local operation, but creating an `MPI_Comm` from that group and attempting collective -communication may raise an error. - -# Errors - -Almost all MPI routines return an error value; C routines as the value -of the function and Fortran routines in the last argument. -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for -I/O function errors. The error handler may be changed with -`MPI_Session_set_errhandler`; the predefined error handler `MPI_ERRORS_RETURN` -may be used to cause error values to be returned. Note that MPI does not -guarantee that an MPI program can continue past an error. - -# See Also - -[`MPI_Session_init`(3)](MPI_Session_init.html) diff --git a/ompi/mpi/man/man3/MPI_Session_get_pset_info.3.md b/ompi/mpi/man/man3/MPI_Session_get_pset_info.3.md deleted file mode 100644 index 038c70486bd..00000000000 --- a/ompi/mpi/man/man3/MPI_Session_get_pset_info.3.md +++ /dev/null @@ -1,72 +0,0 @@ -# Name - -`MPI_Session_get_pset_info` - Returns an info object containing properties of a specific process set - -# Syntax - -## C Syntax - -```c -#include - -int MPI_Session_get_pset_info(MPI_Session session, const char *pset_name, MPI_Info *info) -``` - -## Fortran Syntax - -```fortran -USE MPI -! or the older form: INCLUDE 'mpif.h' - -MPI_SESSION_GET_PSET_INFO(SESSION, PSET_NAME, INFO, IERROR) - INTEGER SESSION, INFO, IERROR - CHARACTER*(*) PSET_NAME -``` - -## Fortran 2008 Syntax - -```fortran -USE mpi_f08 - -MPI_Session_get_pset_info(session, pset_name, info, ierror) - TYPE(MPI_Session), INTENT(IN) :: session - CHARACTER(LEN=*), INTENT(IN) :: pset_name - TYPE(MPI_Info), INTENT(OUT) :: info - INTEGER, OPTIONAL, INTENT(OUT) :: ierror -``` - -# Input Parameters - -* `session` : session (handle) -* `pset_name` : name of process set (string) - -# Output Parameters - -* `info`: info object (handle) -* `IERROR` : Fortran only: Error status (integer). - -# Description - -`MPI_Session_get_pset_info` is used to query properties of a specific process set. The returned info -object can be queried with existing MPI info object query functions. One key/value pair -must be de ned, "mpi_size". The value of the "mpi_size" key specifies the number of MPI -processes in the process set. - -# Notes - -The user is responsible for freeing the returned info object via ` MPI_Info_free`. - -# Errors - -Almost all MPI routines return an error value; C routines as the value -of the function and Fortran routines in the last argument. -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for -I/O function errors. The error handler may be changed with -`MPI_Session_set_errhandler`; the predefined error handler `MPI_ERRORS_RETURN` -may be used to cause error values to be returned. Note that MPI does not -guarantee that an MPI program can continue past an error. - -# See Also - -[`MPI_Session_init`(3)](MPI_Session_init.html) diff --git a/ompi/mpi/man/man3/MPI_Session_init.3.md b/ompi/mpi/man/man3/MPI_Session_init.3.md deleted file mode 100644 index 5d3577954e8..00000000000 --- a/ompi/mpi/man/man3/MPI_Session_init.3.md +++ /dev/null @@ -1,76 +0,0 @@ -# Name - -`MPI_Session_init` - Creates a new session handle - -# Syntax - -## C Syntax - -```c -#include - -int MPI_Session_init(MPI_Info info, MPI_Errhandler errhandler, MPI_Session *session) -``` - -## Fortran Syntax - -```fortran -USE MPI -! or the older form: INCLUDE 'mpif.h' - -MPI_SESSION_INIT(INFO, ERRHANDLER, SESSION, IERROR) - INTEGER INFO, ERRHANDLER, SESSION, IERROR -``` - -## Fortran 2008 Syntax - -```fortran -USE mpi_f08 - -MPI_Session_init(info, errhandler, session, ierror) - TYPE(MPI_Info), INTENT(IN) :: info - TYPE(MPI_Errhandler), INTENT(IN) :: errhandler - TYPE(MPI_Session), INTENT(OUT) :: session - INTEGER, OPTIONAL, INTENT(OUT) :: ierror -``` - -# Input Parameters - -* `info` : info object (handle) -* `errhandler` : error handler to be attached to the returned session (handle) - -# Output Parameters - -* `session` : New session (handle). -* `IERROR` : Fortran only: Error status (integer). - -# Description - -`MPI_Session_init` is used to instantiate an MPI Session. The returned session handle -can be used to query the runtime system about characteristics of the job within which the process is running, as well as other system resources. -An application can make multiple calls to `MPI_Session_init` and the related `MPI_Session_finalize` routine. - -# Notes - -The info argument is used to request MPI functionality requirements and possible MPI -implementation specific capabilities. - -The `errhandler` argument specifies an error handler to invoke in the event that the -Session instantiation call encounters an error. - -# Errors - -Almost all MPI routines return an error value; C routines as the value -of the function and Fortran routines in the last argument. -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for -I/O function errors. The predefined error handler `MPI_ERRORS_RETURN` -may be used to cause error values to be returned. Note that MPI does not -guarantee that an MPI program can continue past an error. - -# See Also - -[`MPI_Session_get_num_psets`(3)](MPI_Session_get_num_psets.html) -[`MPI_Session_get_nth_pset`(3)](MPI_Session_get_nth_pset.html) -[`MPI_Session_group_from_pset`(3)](MPI_Session_group_from_pset.html) -[`MPI_Session_finalize`(3)](MPI_Session_finalize.html) diff --git a/ompi/mpi/man/man3/MPI_Sizeof.3in b/ompi/mpi/man/man3/MPI_Sizeof.3in deleted file mode 100644 index fb414c41a45..00000000000 --- a/ompi/mpi/man/man3/MPI_Sizeof.3in +++ /dev/null @@ -1,73 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Sizeof 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" - -.SH NAME -\fBMPI_Sizeof\fP \- Returns the size, in bytes, of the given type - -.SH SYNTAX -.ft R - -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_SIZEOF(\fIX, SIZE, IERROR\fP) - \fIX\fP -INTEGER \fISIZE, IERROR\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Sizeof(\fIx\fP, \fIsize\fP, \fIierror\fP) - TYPE(*), DIMENSION(..) :: \fIx\fP - INTEGER, INTENT(OUT) :: \fIsize\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETER -.ft R -.TP 1i -X -A Fortran variable of numeric intrinsic type (choice). - -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -SIZE -Size of machine representation of that type (integer). -.ft R -.TP 1i -IERROR -Error status (integer). - -.SH DESCRIPTION -.ft R -MPI_SIZEOF returns the size (in bytes) of the machine representation -of the given variable. It is a generic Fortran type and has a Fortran -binding only. This routine is similar to the sizeof builtin in -C. However, if given an array argument, it returns the size of the -base element, not the size of the whole array. - -.SH NOTES -This function is not available in C because it is not necessary. - -.SH ERRORS -.ft R -Almost all MPI routines return an error value; C routines as -the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for -I/O function errors. The error handler may be changed with -MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN -may be used to cause error values to be returned. Note that MPI does not -guarantee that an MPI program can continue past an error. -.sp -See the MPI man page for a full list of MPI error codes. - diff --git a/ompi/mpi/man/man3/MPI_Ssend.3in b/ompi/mpi/man/man3/MPI_Ssend.3in deleted file mode 100644 index 877d3628896..00000000000 --- a/ompi/mpi/man/man3/MPI_Ssend.3in +++ /dev/null @@ -1,77 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2013 Los Alamos National Security, LLC. All rights reserved. -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Ssend 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Ssend\fP \- Standard synchronous send. - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -int MPI_Ssend(const void *\fIbuf\fP, int\fI count\fP, MPI_Datatype\fI datatype\fP, int\fI dest\fP, - int\fI tag\fP, MPI_Comm\fI comm\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_SSEND(\fIBUF, COUNT, DATATYPE, DEST, TAG, COMM, IERROR\fP) - \fIBUF\fP(*) - INTEGER \fICOUNT, DATATYPE, DEST, TAG, COMM, IERROR\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Ssend(\fIbuf\fP, \fIcount\fP, \fIdatatype\fP, \fIdest\fP, \fItag\fP, \fIcomm\fP, \fIierror\fP) - TYPE(*), DIMENSION(..), INTENT(IN) :: \fIbuf\fP - INTEGER, INTENT(IN) :: \fIcount\fP, \fIdest\fP, \fItag\fP - TYPE(MPI_Datatype), INTENT(IN) :: \fIdatatype\fP - TYPE(MPI_Comm), INTENT(IN) :: \fIcomm\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETERS -.ft R -.TP 1i -buf -Initial address of send buffer (choice). -.TP 1i -count -Number of elements in send buffer (nonnegative integer). -.TP 1i -datatype -Datatype of each send buffer element (handle). -.TP 1i -dest -Rank of destination (integer). -.TP 1i -tag -Message tag (integer). -.TP 1i -comm -Communicator (handle). - -.SH OUTPUT PARAMETER -.ft R -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -MPI_Ssend performs a synchronous-mode, blocking send. See the MPI-1 Standard for more detailed information about such sends. - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for I/O function errors. The error handler may be changed with MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN may be used to cause error values to be returned. Note that MPI does not guarantee that an MPI program can continue past an error. - diff --git a/ompi/mpi/man/man3/MPI_Ssend_init.3in b/ompi/mpi/man/man3/MPI_Ssend_init.3in deleted file mode 100644 index f216cd92021..00000000000 --- a/ompi/mpi/man/man3/MPI_Ssend_init.3in +++ /dev/null @@ -1,103 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2013 Los Alamos National Security, LLC. All rights reserved. -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Ssend_init 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Ssend_init\fP \- Builds a handle for a synchronous send. - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -int MPI_Ssend_init(const void *\fIbuf\fP, int\fI count\fP, MPI_Datatype\fI datatype\fP, - int\fI dest\fP, int\fI tag\fP, MPI_Comm\fI comm\fP, MPI_Request\fI *request\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_SSEND_INIT(\fIBUF, COUNT, DATATYPE, DEST, TAG, COMM, REQUEST, - IERROR\fP) - \fIBUF\fP(*) - INTEGER \fICOUNT, DATATYPE, DEST, TAG, COMM, REQUEST, IERROR\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Ssend_init(\fIbuf\fP, \fIcount\fP, \fIdatatype\fP, \fIdest\fP, \fItag\fP, \fIcomm\fP, \fIrequest\fP, \fIierror\fP) - TYPE(*), DIMENSION(..), INTENT(IN), ASYNCHRONOUS :: \fIbuf\fP - INTEGER, INTENT(IN) :: \fIcount\fP, \fIdest\fP, \fItag\fP - TYPE(MPI_Datatype), INTENT(IN) :: \fIdatatype\fP - TYPE(MPI_Comm), INTENT(IN) :: \fIcomm\fP - TYPE(MPI_Request), INTENT(OUT) :: \fIrequest\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETERS -.ft R -.TP 1i -buf -Initial address of send buffer (choice). -.TP 1i -count -Number of elements to send (integer). -.TP 1i -datatype -Type of each element (handle). -.TP 1i -dest -Rank of destination (integer). -.TP 1i -tag -Message tag (integer). -.TP 1i -comm -Communicator (handle). - -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -request -Communication request (handle). -.ft R -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -Creates a persistent communication object for a synchronous mode send operation, and binds to it all the arguments of a send operation. -.sp -A communication (send or receive) that uses a persistent request is initiated by the function MPI_Start. - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for I/O function errors. The error handler may be changed with MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN may be used to cause error values to be returned. Note that MPI does not guarantee that an MPI program can continue past an error. - -.SH SEE ALSO -.ft R -.sp -MPI_Bsend_init -.br -MPI_Send_init -.br -MPI_Rsend_init -.br -MPI_Recv_init -.br -MPI_Start -.br -MPI_Startall -.br -MPI_Ssend - - diff --git a/ompi/mpi/man/man3/MPI_Start.3in b/ompi/mpi/man/man3/MPI_Start.3in deleted file mode 100644 index 32da019a2f6..00000000000 --- a/ompi/mpi/man/man3/MPI_Start.3in +++ /dev/null @@ -1,78 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Start 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Start\fP \- Initiates a communication using a persistent request handle. - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -int MPI_Start(MPI_Request *\fIrequest\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_START(\fIREQUEST, IERROR\fP) - INTEGER \fIREQUEST, IERROR\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Start(\fIrequest\fP, \fIierror\fP) - TYPE(MPI_Request), INTENT(INOUT) :: \fIrequest\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETER -.ft R -.TP 1i -request -Communication request (handle). - -.SH OUTPUT PARAMETER -.ft R -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -A communication (send or receive) that uses a persistent request is initiated by the function MPI_Start. -.sp -The argument, request, is a handle returned by one of the persistent communication-request initialization functions (MPI_Send_init, MPI_Bsend_init, MPI_Ssend_init, MPI_Rsend_init, MPI_Recv_init). The associated request should be inactive and becomes active once the call is made. -.sp -If the request is for a send with ready mode, then a matching receive should be posted before the call is made. From the time the call is made until after the operation completes, the communication buffer should not be accessed. -.sp -The call is local, with semantics similar to the nonblocking communication operations (see Section 3.7 in the MPI-1 Standard, "Nonblocking Communication.") That is, a call to MPI_Start with a request created by MPI_Send_init starts a communication in the same manner as a call to MPI_Isend; a call to MPI_Start with a request created by MPI_Bsend_init starts a communication in the same manner as a call to MPI_Ibsend; and so on. - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for I/O function errors. The error handler may be changed with MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN may be used to cause error values to be returned. Note that MPI does not guarantee that an MPI program can continue past an error. - -.SH SEE ALSO -.ft R -.sp -MPI_Bsend_init -.br -MPI_Rsend_init -.br -MPI_Send_init -.br -MPI_Sssend_init -.br -MPI_Recv_init -.br -MPI_Startall - - diff --git a/ompi/mpi/man/man3/MPI_Startall.3in b/ompi/mpi/man/man3/MPI_Startall.3in deleted file mode 100644 index 1f2b366134e..00000000000 --- a/ompi/mpi/man/man3/MPI_Startall.3in +++ /dev/null @@ -1,95 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Startall 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Startall\fP \- Starts a collection of requests. - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -int MPI_Startall(int \fIcount\fP, MPI_Request\fI array_of_requests[]\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_STARTALL(\fICOUNT, ARRAY_OF_REQUESTS, IERROR\fP) - INTEGER \fICOUNT, ARRAY_OF_REQUESTS(*), IERROR\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Startall(\fIcount\fP, \fIarray_of_requests\fP, \fIierror\fP) - INTEGER, INTENT(IN) :: \fIcount\fP - TYPE(MPI_Request), INTENT(INOUT) :: \fIarray_of_requests(count)\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETER -.ft R -.TP 1i -count -List length (integer). - -.SH INPUT/OUTPUT PARAMETER -.ft R -.TP 1i -array_of_requests -Array of requests (array of handle). - -.SH OUTPUT PARAMETER -.ft R -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -Starts all communications associated with requests in array_of_requests. A call to MPI_Startall(count, array_of_requests) has the same effect as calls to MPI_Start (&array_of_requests[i]), executed for i=0 ,..., count-1, in some arbitrary order. -.sp -A communication started with a call to MPI_Start or MPI_Startall is completed by a call to MPI_Wait, MPI_Test, or one of the derived functions MPI_Waitany, MPI_Testany, MPI_Waitall, MPI_Testall, MPI_Waitsome, MPI_Testsome (these are described in Section 3.7.5 of the MPI-1 Standard, "Multiple Completions"). The request becomes inactive after successful completion by such a call. The request is not deallocated, and it can be activated anew by another MPI_Start or MPI_Startall call. -.sp -A persistent request is deallocated by a call to MPI_Request_free (see Section 3.7.3 of the MPI-1 Standard, "Communication Completion"). -.sp -The call to MPI_Request_free can occur at any point in the program after the persistent request was created. However, the request will be deallocated only after it becomes inactive. Active receive requests should not be freed. Otherwise, it will not be possible to check that the receive has completed. It is preferable, in general, to free requests when they are inactive. If this rule is followed, then the persistent communication request functions will be invoked in a sequence of the form, -.br -.sp - Create (Start Complete)* Free -.br -.sp -where * indicates zero or more repetitions. If the same communication object is used in several concurrent threads, it is the user's responsibility to coordinate calls so that the correct sequence is obeyed. -.sp -A send operation initiated with MPI_Start can be matched with any receive operation and, likewise, a receive operation initiated with MPI_Start can receive messages generated by any send operation. - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for I/O function errors. The error handler may be changed with MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN may be used to cause error values to be returned. Note that MPI does not guarantee that an MPI program can continue past an error. - -.SH SEE ALSO -.ft R -.sp -MPI_Bsend_init -.br -MPI_Rsend_init -.br -MPI_Send_init -.br -MPI_Ssend_init -.br -MPI_Recv_init -.br -MPI_Start -.br -MPI_Request_free - - diff --git a/ompi/mpi/man/man3/MPI_Status_c2f.3in b/ompi/mpi/man/man3/MPI_Status_c2f.3in deleted file mode 100644 index da04a6e9097..00000000000 --- a/ompi/mpi/man/man3/MPI_Status_c2f.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/MPI_Status_f2c.3 diff --git a/ompi/mpi/man/man3/MPI_Status_c2f08.3in b/ompi/mpi/man/man3/MPI_Status_c2f08.3in deleted file mode 100644 index b3c411bb42c..00000000000 --- a/ompi/mpi/man/man3/MPI_Status_c2f08.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/MPI_Status_f082c.3 diff --git a/ompi/mpi/man/man3/MPI_Status_f082c.3.md b/ompi/mpi/man/man3/MPI_Status_f082c.3.md deleted file mode 100644 index c9004565f7e..00000000000 --- a/ompi/mpi/man/man3/MPI_Status_f082c.3.md +++ /dev/null @@ -1,58 +0,0 @@ -# NAME - -MPI_Status_f082c, MPI_Status_c2f08 - Translates a C status into a Fortran 2008 status, or vice versa. - -# SYNTAX - -## C Syntax - -```c -#include - -int MPI_Status_f082c(const MPI_F08_status *f08_status, MPI_Status *c_status) -int MPI_Status_c2f08(const MPI_Status *c_status, MPI_F08_status *f08_status) -``` - -# PARAMETERS - -* `f08_status`: `mpi_f08`-style MPI status object -* `c_status`: C-style MPI status object - -# DESCRIPTION - -These two procedures are provided in C to convert from a Fortran 2008 -status (which is a derived type made of integers) to a C status (which -is a structure), and vice versa. The conversion occurs on all the -information in `status`, including that which is hidden. That is, -no status information is lost in the conversion. - -When using `MPI_Status_f082c()`, if `f08_status` is a valid Fortran -status, but not the Fortran value of `MPI_F08_STATUS_IGNORE` or -`MPI_F08_STATUSES_IGNORE`, then `MPI_Status_f082c()` returns in -`c_status` a valid C status with the same content. If `f08_status` is -the Fortran value of `MPI_STATUS_IGNORE` or `MPI_STATUSES_IGNORE`, or -if `f08_status` is not a valid Fortran status, then the call is -erroneous. - -When using `MPI_Status_c2f08()`, the opposite conversion is applied. If -`c_status` is `MPI_STATUS_IGNORE` or `MPI_STATUSES_IGNORE`, or if -`c_status` is not a valid C status, then the call is erroneous. - -The input status has the same source, tag and error code values as the -output status, and returns the same answers when queried for count, -elements, and cancellation. The conversion function may be called with -an input status argument that has an undefined error field, in which -case the value of the error field in the output status argument is -undefined. - -# NOTES - -These functions are only available in C; they are not available in any -of the Fortran MPI interfaces. - -# SEE ALSO - -[`MPI_Status_c2f`(3)](MPI_Status_c2f.html), -[`MPI_Status_f2c`(3)](MPI_Status_f2c.html), -[`MPI_Status_f082f`(3)](MPI_Status_f082f.html), -[`MPI_Status_f2f08`(3)](MPI_Status_f2f08.html) diff --git a/ompi/mpi/man/man3/MPI_Status_f082f.3.md b/ompi/mpi/man/man3/MPI_Status_f082f.3.md deleted file mode 100644 index a53af76bef8..00000000000 --- a/ompi/mpi/man/man3/MPI_Status_f082f.3.md +++ /dev/null @@ -1,92 +0,0 @@ -# NAME - -MPI_Status_f082f, MPI_Status_c2f08 - Translates a Fortran 2008 status into a Fortran INTEGER-style status, or vice versa. - -# SYNTAX - -## C Syntax - -```c -#include - -int MPI_Status_f082f(const MPI_F08_status *f08_status, MPI_Fint *f_status) -int MPI_Status_f2f08(const MPI_Fint *f_status, MPI_F08_status *f08_status) -``` - -## Fortran mpi Module Syntax - -```fortran -USE MPI - -MPI_STATUS_F082F(F08_STATUS, F_STATUS, IERROR) - TYPE(MPI_Status) :: F08_STATUS - INTEGER :: STATUS(MPI_STATUS_SIZE), IERROR - -MPI_STATUS_F2F08(F_STATUS, F08_STATUS, IERROR) - INTEGER :: F_STATUS(MPI_STATUS_SIZE), IERROR - TYPE(MPI_Status) :: F08_STATUS -``` - -## Fortran mpi_f08 Module Syntax - -```fortran -USE mpi_f08 - -MPI_Status_f082f(f08_status, f_status, ierror) - TYPE(MPI_Status), INTENT(IN) :: f08_status - INTEGER, INTENT(OUT) :: f_status(MPI_STATUS_SIZE) - INTEGER, OPTIONAL, INTENT(OUT) :: ierror - -MPI_Status_f2f08(f_status, f08_status, ierror) - INTEGER, INTENT(IN) :: f_status(MPI_STATUS_SIZE) - TYPE(MPI_Status), INTENT(OUT) :: f08_status - INTEGER, OPTIONAL, INTENT(OUT) :: ierror -``` - -# PARAMETERS - -* `f08_status`: `mpi_f08`-style MPI status object -* `f_status`: `mpi`-style `INTEGER` MPI status object - -# DESCRIPTION - -These two procedures are provided to convert from a Fortran 2008 -status (which is a derived datatype made of integers) to a Fortran -status (which is an array of integers), and vice versa. The conversion -occurs on all the information in `status`, including that which is -hidden. That is, no status information is lost in the conversion. - -When using `MPI_Status_f082f()`, if `f08_status` is a valid Fortran -status, but not the Fortran value of `MPI_F08_STATUS_IGNORE` (in C), -`MPI_STATUS_IGNORE` (in Fortran) or `MPI_F08_STATUSES_IGNORE` (in C) -or `MPI_STATUSES_IGNORE` (in Fortran), then `MPI_Status_f082f()` -returns in `f_status` a valid array with the same content. If -`f08_status` is the C value of `MPI_F08_STATUS_IGNORE` or -`MPI_F08_STATUSES_IGNORE` or the Fortran value of `MPI_STATUS_IGNORE` -or `MPI_STATUSES_IGNORE`, or if `f08_status` is not a valid Fortran -status, then the call is erroneous. - -When using `MPI_Status_f2f08()`, the opposite conversion is -applied. If `f_status` is `MPI_STATUS_IGNORE` or -`MPI_STATUSES_IGNORE`, or if `f_status` is not a valid Fortran status, -then the call is erroneous. - -The input status has the same source, tag and error code values as the -output status, and returns the same answers when queried for count, -elements, and cancellation. The conversion function may be called with -an input status argument that has an undefined error field, in which -case the value of the error field in the output status argument is -undefined. - -# NOTES - -The Fortran subroutines for these MPI routines are only available in -the `mpi` and `mpi_f08` modules (including the type specification for -`TYPE(MPI_Status); they are (intentionally) not available in `mpif.h`. - -# SEE ALSO - -[`MPI_Status_c2f`(3)](MPI_Status_c2f.html), -[`MPI_Status_f2c`(3)](MPI_Status_c2f.html), -[`MPI_Status_f082c`(3)](MPI_Status_f082c.html), -[`MPI_Status_c2f08`(3)](MPI_Status_c2f08.html) diff --git a/ompi/mpi/man/man3/MPI_Status_f2c.3.md b/ompi/mpi/man/man3/MPI_Status_f2c.3.md deleted file mode 100644 index 4f24ffdc938..00000000000 --- a/ompi/mpi/man/man3/MPI_Status_f2c.3.md +++ /dev/null @@ -1,57 +0,0 @@ -# NAME - -MPI_Status_f2c, MPI_Status_f2c - Translates a C status into a Fortran INTEGER-style status, or vice versa. - -# SYNTAX - -## C Syntax - -```c -#include - -int MPI_Status_f2c(const MPI_Fint *f_status, MPI_Status *c_status) -int MPI_Status_c2f(const MPI_Status *c_status, MPI_Fint *f_status) -``` - -# PARAMETERS - -* `f_status`: `mpi`-style `INTEGER` MPI status object -* `c_status`: C-style MPI status object - -# DESCRIPTION - -These two procedures are provided in C to convert from a Fortran -status (which is an array of integers) to a C status (which is a -structure), and vice versa. The conversion occurs on all the -information in `status`, including that which is hidden. That is, -no status information is lost in the conversion. - -When using `MPI_Status_f2c()`, if `f_status` is a valid Fortran -status, but not the Fortran value of `MPI_STATUS_IGNORE` or -`MPI_STATUSES_IGNORE`, then `MPI_Status_f2c()` returns in `c_status` a -valid C status with the same content. If `f_status` is the Fortran -value of `MPI_STATUS_IGNORE` or `MPI_STATUSES_IGNORE`, or if -`f_status` is not a valid Fortran status, then the call is erroneous. - -When using `MPI_Status_c2f()`, the opposite conversion is applied. If -`c_status` is `MPI_STATUS_IGNORE` or `MPI_STATUSES_IGNORE`, or if -`c_status` is not a valid C status, then the call is erroneous. - -The input status has the same source, tag and error code values as the -output status, and returns the same answers when queried for count, -elements, and cancellation. The conversion function may be called with -an input status argument that has an undefined error field, in which -case the value of the error field in the output status argument is -undefined. - -# NOTES - -These functions are only available in C; they are not available in any -of the Fortran MPI interfaces. - -# SEE ALSO - -[`MPI_Status_f082c`(3)](MPI_Status_f082c.html), -[`MPI_Status_c2f08`(3)](MPI_Status_c2f08.html), -[`MPI_Status_f082f`(3)](MPI_Status_f082f.html), -[`MPI_Status_f2f08`(3)](MPI_Status_f2f08.html) diff --git a/ompi/mpi/man/man3/MPI_Status_f2f08.3in b/ompi/mpi/man/man3/MPI_Status_f2f08.3in deleted file mode 100644 index 67f77ae23bc..00000000000 --- a/ompi/mpi/man/man3/MPI_Status_f2f08.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/MPI_Status_f082f.3 diff --git a/ompi/mpi/man/man3/MPI_Status_set_cancelled.3in b/ompi/mpi/man/man3/MPI_Status_set_cancelled.3in deleted file mode 100644 index e7de779eb01..00000000000 --- a/ompi/mpi/man/man3/MPI_Status_set_cancelled.3in +++ /dev/null @@ -1,68 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Status_set_cancelled 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Status_set_cancelled\fP \- Sets \fIstatus\fP to indicate a request has been canceled. - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -int MPI_Status_set_cancelled(MPI_Status *\fIstatus\fP, int \fIflag\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_STATUS_SET_CANCELLED(\fISTATUS, FLAG, IERROR\fP) - INTEGER \fISTATUS\fP(MPI_STATUS_SIZE), \fIIERROR \fP - LOGICAL \fIFLAG\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Status_set_cancelled(\fIstatus\fP, \fIflag\fP, \fIierror\fP) - TYPE(MPI_Status), INTENT(INOUT) :: \fIstatus\fP - LOGICAL, INTENT(OUT) :: \fIflag\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT/OUTPUT PARAMETER -.ft R -.TP 1i -status -Status with which to associate cancel flag (status). - -.SH INPUT PARAMETER -.ft R -.TP 1i -flag -If true, indicates request was canceled (logical). - -.SH OUTPUT PARAMETER -.ft R -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -If \fIflag\fP is set to true, then a subsequent call to MPI_Test_cancelled(\fIstatus, flag\fP) will also return \fIflag\fP = true; otherwise it will return false. - -.SH NOTES -.ft R -Users are advised not to reuse the status fields for values other than those for which they were intended. Doing so may lead to unexpected results when using the status object. For example, calling MPI_Get_elements may cause an error if the value is out of range, or it may be impossible to detect such an error. The \fIextra_state\fP argument provided with a generalized request can be used to return information that does not logically belong in \fIstatus\fP. Furthermore, modifying the values in a status set internally by MPI, such as MPI_Recv, may lead to unpredictable results and is strongly discouraged. - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for I/O function errors. The error handler may be changed with MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN may be used to cause error values to be returned. Note that MPI does not guarantee that an MPI program can continue past an error. - diff --git a/ompi/mpi/man/man3/MPI_Status_set_elements.3in b/ompi/mpi/man/man3/MPI_Status_set_elements.3in deleted file mode 100644 index ebda3c8dfc3..00000000000 --- a/ompi/mpi/man/man3/MPI_Status_set_elements.3in +++ /dev/null @@ -1,92 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2013 Los Alamos National Security, LLC. All rights reserved. -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Status_set_elements 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Status_set_elements\fP, \fBMPI_Status_set_elements_x\fP \- Modifies opaque part of \fIstatus\fP to allow MPI_Get_elements to return \fIcount\fP. - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -int MPI_Status_set_elements(MPI_Status *\fIstatus\fP, MPI_Datatype \fIdatatype\fP, int \fIcount\fP) -int MPI_Status_set_elements_x(MPI_Status *\fIstatus\fP, MPI_Datatype \fIdatatype\fP, MPI_Count \fIcount\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_STATUS_SET_ELEMENTS(\fISTATUS, DATATYPE, COUNT, IERROR\fP) - INTEGER \fISTATUS\fP(MPI_STATUS_SIZE), DATATYPE, COUNT, IERROR\fP -MPI_STATUS_SET_ELEMENTS_X(\fISTATUS, DATATYPE, COUNT, IERROR\fP) - INTEGER \fISTATUS\fP(MPI_STATUS_SIZE), DATATYPE\fP - INTEGER(KIND=MPI_COUNT_KIND) \fICOUNT\fP - INTEGER \fIIERROR\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Status_set_elements(\fIstatus\fP, \fIdatatype\fP, \fIcount\fP, \fIierror\fP) - TYPE(MPI_Status), INTENT(INOUT) :: \fIstatus\fP - TYPE(MPI_Datatype), INTENT(IN) :: \fIdatatype\fP - INTEGER, INTENT(IN) :: \fIcount\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP -MPI_Status_set_elements_x(\fIstatus\fP, \fIdatatype\fP, \fIcount\fP, \fIierror\fP) - TYPE(MPI_Status), INTENT(INOUT) :: \fIstatus\fP - TYPE(MPI_Datatype), INTENT(IN) :: \fIdatatype\fP - INTEGER(KIND = MPI_COUNT_KIND), INTENT(IN) :: \fIcount\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT/OUTPUT PARAMETER -.ft R -.TP 1i -status -Status to associate with \fIcount\fP (status). - -.SH INPUT PARAMETERS -.ft R -.TP 1i -datatype -Data type associated with \fIcount\fP (handle). -.TP 1i -count -Number of elements to associate with \fIstatus\fP (integer). - -.SH OUTPUT PARAMETER -.ft R -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -MPI_Status_set_elements modifies the opaque part of \fIstatus\fP so that a call to MPI_Get_elements or MPI_Get_elements_x will return \fIcount\fP. MPI_Get_count will return a compatible value. -.sp -A subsequent call to MPI_Get_count(\fIstatus, datatype, count\fP), to MPI_Get_elements(\fIstatus, datatype, count\fP), or to MPI_Get_elements_x(\fIstatus, datatype, count\fP) must use a data-type argument that has the same type signature as the data-type argument that was used in the call to MPI_Status_set_elements. - -.SH NOTES -.ft R -Users are advised not to reuse the status fields for values other than those for which they were intended. Doing so may lead to unexpected results when using the status object. For example, calling MPI_Get_elements may cause an error if the value is out of range, or it may be impossible to detect such an error. The \fIextra_state\fP argument provided with a generalized request can be used to return information that does not logically belong in \fIstatus\fP. Furthermore, modifying the values in a status set internally by MPI, such as MPI_Recv, may lead to unpredictable results and is strongly discouraged. - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for I/O function errors. The error handler may be changed with MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN may be used to cause error values to be returned. Note that MPI does not guarantee that an MPI program can continue past an error. - -.SH FORTRAN 77 NOTES -.ft R -The MPI standard prescribes portable Fortran syntax for the \fICOUNT\fP argument of MPI_Status_set_elements_x only for Fortran 90. FORTRAN 77 users may use the non-portable syntax -.sp -.nf - INTEGER*MPI_COUNT_KIND \fICOUNT\fP -.sp -where MPI_COUNT_KIND is a constant defined in mpif.h and gives the length of the declared integer in bytes. diff --git a/ompi/mpi/man/man3/MPI_Status_set_elements_x.3in b/ompi/mpi/man/man3/MPI_Status_set_elements_x.3in deleted file mode 100644 index 4d643f1a3e5..00000000000 --- a/ompi/mpi/man/man3/MPI_Status_set_elements_x.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/MPI_Status_set_elements.3 diff --git a/ompi/mpi/man/man3/MPI_T_category_changed.3in b/ompi/mpi/man/man3/MPI_T_category_changed.3in deleted file mode 100644 index 095fb0d02b3..00000000000 --- a/ompi/mpi/man/man3/MPI_T_category_changed.3in +++ /dev/null @@ -1,38 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2013 Los Alamos National Security, LLC. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2010 Cisco Systems, Inc. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_T_category_changed 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -. -.SH NAME -\fBMPI_T_category_changed\fP \- Get a timestamp for the categories -. -.SH SYNTAX -.ft R -. -.SH C Syntax -.nf -#include -int MPI_T_category_changed(int *\fIstamp\fP) - -.fi -.SH INPUT PARAMETERS -.ft R -.TP 1i -stamp -A virtual time stamp to indicate the last change to the categories. - -.SH DESCRIPTION -.ft R -If two subsequent calls to this routine return the same timestamp, it is guaranteed that -no categories have been changed or added. If the timestamp from the second call is -higher than some categories have been added or changed. - -.SH ERRORS -.ft R -MPI_T_category_changed() will fail if: -.TP 1i -[MPI_T_ERR_NOT_INITIALIZED] -The MPI Tools interface not initialized diff --git a/ompi/mpi/man/man3/MPI_T_category_get_categories.3in b/ompi/mpi/man/man3/MPI_T_category_get_categories.3in deleted file mode 100644 index ff193dedc9f..00000000000 --- a/ompi/mpi/man/man3/MPI_T_category_get_categories.3in +++ /dev/null @@ -1,49 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2013 Los Alamos National Security, LLC. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2010 Cisco Systems, Inc. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_T_category_get_categories 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -. -.SH NAME -\fBMPI_T_category_get_categories\fP \- Query which categories are in a category -. -.SH SYNTAX -.ft R -. -.SH C Syntax -.nf -#include -int MPI_T_category_get_categories(int cat_index, int len, int indices[]) - -.fi -.SH INPUT PARAMETERS -.ft R -.TP 1i -cat_index -Index of the category to be queried. -.TP 1i -len -The length of the indices array. - -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -indices -An integer array of size len, indicating category indices. - -.SH DESCRIPTION -.ft R -MPI_T_category_get_categories can be used to query which other categories are in -a category. - -.SH ERRORS -.ft R -MPI_T_category_get_categories() will fail if: -.TP 1i -[MPI_T_ERR_NOT_INITIALIZED] -The MPI Tools interface not initialized -.TP 1i -[MPI_T_ERR_INVALID_INDEX] -The category index is invalid diff --git a/ompi/mpi/man/man3/MPI_T_category_get_cvars.3in b/ompi/mpi/man/man3/MPI_T_category_get_cvars.3in deleted file mode 100644 index 8041302327d..00000000000 --- a/ompi/mpi/man/man3/MPI_T_category_get_cvars.3in +++ /dev/null @@ -1,49 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2013 Los Alamos National Security, LLC. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2010 Cisco Systems, Inc. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_T_category_get_cvars 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -. -.SH NAME -\fBMPI_T_category_get_cvars\fP \- Query which control variables are in a category -. -.SH SYNTAX -.ft R -. -.SH C Syntax -.nf -#include -int MPI_T_category_get_cvars(int \fIcat_index\fP, int \fIlen\fP, int \fIindices\fP[]) - -.fi -.SH INPUT PARAMETERS -.ft R -.TP 1i -cat_index -Index of the category to be queried. -.TP 1i -len -The length of the indices array. - -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -indices -An integer array of size len, indicating control variable indices. - -.SH DESCRIPTION -.ft R -MPI_T_category_get_cvars can be used to query which control variables are contained in a -particular category. - -.SH ERRORS -.ft R -MPI_T_category_get_cvars() will fail if: -.TP 1i -[MPI_T_ERR_NOT_INITIALIZED] -The MPI Tools interface not initialized -.TP 1i -[MPI_T_ERR_INVALID_INDEX] -The category index is invalid diff --git a/ompi/mpi/man/man3/MPI_T_category_get_info.3in b/ompi/mpi/man/man3/MPI_T_category_get_info.3in deleted file mode 100644 index 811f362403a..00000000000 --- a/ompi/mpi/man/man3/MPI_T_category_get_info.3in +++ /dev/null @@ -1,81 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2013 Los Alamos National Security, LLC. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2010 Cisco Systems, Inc. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_T_category_get_info 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -. -.SH NAME -\fBMPI_T_category_get_info\fP \- Query information from a category -. -.SH SYNTAX -.ft R -. -.SH C Syntax -.nf -#include -int MPI_T_category_get_info(int \fIcat_index\fP, char *\fIname\fP, int *\fIname_len\fP, -char *\fIdesc\fP, int *\fIdesc_len\fP, int *\fInum_cvars\fP, int *\fInum_pvars\fP, -int *\fInum_categories\fP) - -.fi -.SH INPUT PARAMETERS -.ft R -.TP 1i -cat_index -Index of the category to be queried. - -.SH INPUT/OUTPUT PARAMETERS -.ft R -.TP 1i -name_len -Length of the string and/or buffer for name. -.TP 1i -desc_len -Length of the string and/or buffer for desc. - -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -name -Buffer to return the string containing the name of the -category. -.TP 1i -desc -Buffer to return the string containing the description -of the category. -.TP 1i -num_cvars -Number of control variables in the category. -.TP 1i -num_pvars -Number of performance variables in the category. -.TP 1i -num_categories -Number of categories contained in the category. - -.SH DESCRIPTION -.ft R -MPI_T_category_get_info can be used to query information from a category. The function returns the -number of control variables, performance variables, and sub-categories in the queried category in -the arguments \fInum_cvars\fP, \fInum_pvars\fP, and \fInum_categories\fP, respectively. - -.SH NOTES -.ft R -This MPI tool interface function returns two strings. This function takes two argument for each string: -a buffer to store the string, and a length which must initially specify the size of the buffer. If the -length passed is n then this function will copy at most n - 1 characters of the string into the -corresponding buffer and set the length to the number of characters copied - 1. If the length argument -is NULL or the value specified in the length is 0 the corresponding string buffer is ignored and the -string is not returned. - -.SH ERRORS -.ft R -MPI_T_category_get_info() will fail if: -.TP 1i -[MPI_T_ERR_NOT_INITIALIZED] -The MPI Tools interface not initialized -.TP 1i -[MPI_T_ERR_INVALID_INDEX] -The category index is invalid diff --git a/ompi/mpi/man/man3/MPI_T_category_get_num.3in b/ompi/mpi/man/man3/MPI_T_category_get_num.3in deleted file mode 100644 index 3a6fa0de15f..00000000000 --- a/ompi/mpi/man/man3/MPI_T_category_get_num.3in +++ /dev/null @@ -1,36 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2013 Los Alamos National Security, LLC. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2010 Cisco Systems, Inc. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_T_category_get_num 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -. -.SH NAME -\fBMPI_T_category_get_num\fP \- Query the number of categories -. -.SH SYNTAX -.ft R -. -.SH C Syntax -.nf -#include -int MPI_T_category_get_num(int *\fInum_cat\fP) - -.fi -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -num_cat -Current number of categories - -.SH DESCRIPTION -.ft R -MPI_T_category_get_num can be used to query the current number of categories. - -.SH ERRORS -.ft R -MPI_T_category_get_num() will fail if: -.TP 1i -[MPI_T_ERR_NOT_INITIALIZED] -The MPI Tools interface not initialized diff --git a/ompi/mpi/man/man3/MPI_T_category_get_pvars.3in b/ompi/mpi/man/man3/MPI_T_category_get_pvars.3in deleted file mode 100644 index e8536161a2c..00000000000 --- a/ompi/mpi/man/man3/MPI_T_category_get_pvars.3in +++ /dev/null @@ -1,50 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2013 Los Alamos National Security, LLC. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2010 Cisco Systems, Inc. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_T_category_get_pvars 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -. -.SH NAME -\fBMPI_T_category_get_pvars\fP \- Query which performance variables are in a category -. -.SH SYNTAX -.ft R -. -.SH C Syntax -.nf -#include -int MPI_T_category_get_pvars(int cat_index, int len, int indices[]) - -.fi -.SH INPUT PARAMETERS -.ft R -.TP 1i -cat_index -Index of the category to be queried. -.TP 1i -len -The length of the indices array. - -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -indices -An integer array of size len, indicating performance variable indices. - -.SH DESCRIPTION -.ft R -MPI_T_category_get_pvars can be used to query which performance variables are -contained in a particular category. A category contains zero or more performance variables. - -.SH ERRORS -.ft R -MPI_T_category_get_pvars() will fail if: -.TP 1i -[MPI_T_ERR_NOT_INITIALIZED] -The MPI Tools interface not initialized -.TP 1i -[MPI_T_ERR_INVALID_INDEX] -The category index is invalid -. diff --git a/ompi/mpi/man/man3/MPI_T_cvar_get_info.3in b/ompi/mpi/man/man3/MPI_T_cvar_get_info.3in deleted file mode 100644 index 07a150b0845..00000000000 --- a/ompi/mpi/man/man3/MPI_T_cvar_get_info.3in +++ /dev/null @@ -1,170 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2013 Los Alamos National Security, LLC. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2010 Cisco Systems, Inc. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_T_cvar_get_info 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -. -.SH NAME -\fBMPI_T_cvar_get_info\fP \- Query information from a control variable -. -.SH SYNTAX -.ft R -. -.SH C Syntax -.nf -#include -int MPI_T_cvar_get_info(int \fIcvar_index\fP, char *\fIname\fP, int *\fIname_len\fP, - int *\fIverbosity\fP, MPI_Datatype *\fIdatatype\fP, MPI_T_enum *\fIenumtype\fP, - const *\fIdesc\fP, int *\fIdesc_len\fP, int *\fIbind\fP, int *\fIscope\fP) - -.fi -.SH INPUT PARAMETERS -.ft R -.TP 1i -cvar_index -Index of the control variable to be queried. -. -. -.SH INPUT/OUTPUT PARAMETERS -.ft R -.TP 1i -name_len -Length of the string and/or buffer for name. -.TP 1i -desc_len -Length of the string and/or buffer for desc. -. -. -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -name -Buffer to return the string containing the name of the -control variable. -.TP 1i -verbosity -Verbosity level of this variable. -.TP 1i -datatype -MPI datatype of the information stored in the control -variable. -.TP 1i -enumtype -Optional descriptor for enumeration information. -.TP 1i -desc -Buffer to return the string containing the description -of the control variable. -.TP 1i -bind -Type of MPI object to which this variable must be -bound. -.TP 1i -scope -Scope of when changes to this variable are possible. -. -. -.SH DESCRIPTION -.ft R -MPI_T_cvar_get_info can be used to query information about a control variable. The function returns -the verbosity, datatype, enumeration type, binding, and scope of the queried control variable in the arguments -\fIverbosity\fP, \fIdatatype\fP, \fIenumtype\fP, \fIbind\fP, and \fIscope\fP, respectively. Control variables -in Open MPI are the same as MCA parameters. -. -. -.SH VERBOSITY -.ft R -As Open MPI exposes a very large number of MCA parameters (control variables), control variables are -categorized into nine verbosity levels corresponding to the equivalent ompi_info level. The nine levels are -(in increasing order): -.TP 1i -MPI_T_VERBOSITY_USER_BASIC -Basic information of interest to users -.TP 1i -MPI_T_VERBOSITY_USER_DETAIL -Detailed information of interest to users -.TP 1i -MPI_T_VERBOSITY_USER_ALL -All remaining information of interest to users -.TP 1i -MPI_T_VERBOSITY_TUNER_BASIC -Basic information required for tuning -.TP 1i -MPI_T_VERBOSITY_TUNER_DETAIL -Detailed information required for tuning -.TP 1i -MPI_T_VERBOSITY_TUNER_ALL -All remaining information required for tuning -.TP 1i -MPI_T_VERBOSITY_MPIDEV_BASIC -Basic information for MPI implementors -.TP 1i -MPI_T_VERBOSITY_MPIDEV_DETAIL -Detailed information for MPI implementors -.TP 1i -MPI_T_VERBOSITY_MPIDEV_ALL -All remaining information for MPI implementors - -For more information see MPI-3 \[char167] 14.3.1. - -.SH DATATYPE -.ft R -The datatype returned by MPI_T_cvar_get_info is restricted to one of the following datatypes: MPI_INT, -MPI_UNSIGNED, MPI_UNSIGNED_LONG, MPI_UNSIGNED_LONG_LONG, MPI_COUNT, MPI_CHAR, and MPI_DOUBLE. For more -information on datatypes in MPI_T see MPI-3 \[char167] 14.3.5. - -.SH SCOPE -.ft R -The scope describes when and how changes can be made to a control variable. From MPI-3 \[char167] 14.3.6, the scope may be any of the following: -.TP 1i -MPI_T_SCOPE_CONSTANT -read-only, value is constant -.TP 1i -MPI_T_SCOPE_READONLY -read-only, cannot be written, but can change -.TP 1i -MPI_T_SCOPE_LOCAL -may be writeable, writing is a local operation -.TP 1i -MPI_T_SCOPE_GROUP -may be writeable, must be done to a group of processes, all processes in a group must be set to consistent values -.TP 1i -MPI_T_SCOPE_GROUP_EQ -may be writeable, must be done to a group of processes, all processes in a group must be set to the same value -.TP 1i -MPI_T_SCOPE_ALL -may be writeable, must be done to all processes, all connected processes must be set to consistent values -.TP 1i -MPI_T_SCOPE_ALL_EQ -may be writeable, must be done to all processes, all connected processes must be set to the same value - -For more information see MPI-3 \[char167] 14.3.6 Table 14.4. - -.SH NOTES -.ft R -This MPI tool interface function returns two strings. This function takes two argument for each string: -a buffer to store the string, and a length which must initially specify the size of the buffer. If the -length passed is n then this function will copy at most n - 1 characters of the string into the -corresponding buffer and set the length to the number of characters copied - 1. If the length argument -is NULL or the value specified in the length is 0 the corresponding string buffer is ignored and the -string is not returned. -.sp -Open MPI does not currently support binding control variables to MPI objects. -. -. -.SH ERRORS -.ft R -MPI_T_cvar_get_info() will fail if: -.TP 1i -[MPI_T_ERR_NOT_INITIALIZED] -The MPI Tools interface not initialized -.TP 1i -[MPI_T_ERR_INVALID_INDEX] -The control variable index is invalid -. -.SH SEE ALSO -.ft R -.nf -ompi_info diff --git a/ompi/mpi/man/man3/MPI_T_cvar_get_num.3in b/ompi/mpi/man/man3/MPI_T_cvar_get_num.3in deleted file mode 100644 index f70c4abfecb..00000000000 --- a/ompi/mpi/man/man3/MPI_T_cvar_get_num.3in +++ /dev/null @@ -1,39 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2013 Los Alamos National Security, LLC. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2010 Cisco Systems, Inc. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_T_cvar_get_num 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -. -.SH NAME -\fBMPI_T_cvar_get_num\fP \- Query the number of control variables -. -.SH SYNTAX -.ft R -. -.SH C Syntax -.nf -#include -int MPI_T_cvar_get_num(int *\fInum_cvar\fP) - -.fi -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -num_cvar -Current number of control variables. -. -. -.SH DESCRIPTION -.ft R -MPI_T_cvar_get_num can be used to query the current number of control variables. The number -of control variables may increase throughout the execution of the process but will never -decrease. - -.SH ERRORS -.ft R -MPI_T_cvar_get_num() will fail if: -.TP 1i -[MPI_T_ERR_NOT_INITIALIZED] -The MPI Tools interface not initialized diff --git a/ompi/mpi/man/man3/MPI_T_cvar_handle_alloc.3in b/ompi/mpi/man/man3/MPI_T_cvar_handle_alloc.3in deleted file mode 100644 index fa393b5cf66..00000000000 --- a/ompi/mpi/man/man3/MPI_T_cvar_handle_alloc.3in +++ /dev/null @@ -1,69 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2013 Los Alamos National Security, LLC. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2010 Cisco Systems, Inc. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_T_cvar_handle_alloc 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -. -.SH NAME -\fBMPI_T_cvar_handle_alloc\fP, \fBMPI_T_cvar_handle_free\fP \- Allocate/free contol variable handles -. -.SH SYNTAX -.ft R -. -.SH C Syntax -.nf -#include -int MPI_T_cvar_handle_alloc(int \fIcvar_index\fP, void *\fIobj_handle\fP, - MPI_T_cvar_handle *\fIhandle\fP, int *\fIcount\fP) - -int MPI_T_cvar_handle_free(MPI_T_cvar_handle *\fIhandle\fP) - -.fi -.SH DESCRIPTION -.ft R -MPI_T_cvar_handle_alloc binds the control variable specified in \fIcvar_index\fP to the MPI -object specified in \fIobj_handle\fP. If MPI_T_cvar_get_info returns MPI_T_BIND_NO_OBJECT -as the binding of the variable the \fIobj_handle\fP argument is ignored. The number of -values represented by this control variable is returned in the \fIcount\fP parameter. If the -control variable represents a string then \fIcount\fP will be the maximum length of the -string. - -MPI_T_cvar_handle_free frees a handle allocated by MPI_T_cvar_handle_alloc and sets the -\fIhandle\fP argument to MPI_T_CVAR_HANDLE_NULL. - - -.SH NOTES -.ft R -Open MPI does not currently support binding MPI objects to control variables so the -\fIobj_handle\fP argument is always ignored. - - -.SH ERRORS -.ft R -MPI_T_cvar_handle_alloc() will fail if: -.TP 1i -[MPI_T_ERR_NOT_INITIALIZED] -The MPI Tools interface not initialized -.TP 1i -[MPI_T_ERR_INVALID_INDEX] -The control variable index is invalid -.TP 1i -[MPI_T_ERR_OUT_OF_HANDLES] -No more handles available -.TP 1i -MPI_T_cvar_handle_free() will fail if: -.TP 1i -[MPI_T_ERR_NOT_INITIALIZED] -The MPI Tools interface not initialized -.TP 1i -[MPI_T_ERR_INVALID_HANDLE] -The handle is invalid - - -.SH SEE ALSO -.ft R -.nf -MPI_T_cvar_get_info - diff --git a/ompi/mpi/man/man3/MPI_T_cvar_handle_free.3in b/ompi/mpi/man/man3/MPI_T_cvar_handle_free.3in deleted file mode 100644 index 3fa2264737d..00000000000 --- a/ompi/mpi/man/man3/MPI_T_cvar_handle_free.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/MPI_T_cvar_handle_alloc.3 diff --git a/ompi/mpi/man/man3/MPI_T_cvar_read.3in b/ompi/mpi/man/man3/MPI_T_cvar_read.3in deleted file mode 100644 index 5e678d7e098..00000000000 --- a/ompi/mpi/man/man3/MPI_T_cvar_read.3in +++ /dev/null @@ -1,51 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2013 Los Alamos National Security, LLC. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2010 Cisco Systems, Inc. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_T_cvar_read 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -. -.SH NAME -\fBMPI_T_cvar_read\fP \- Read the value of a control variable -. -.SH SYNTAX -.ft R -. -.SH C Syntax -.nf -#include -int MPI_T_cvar_read(MPI_T_cvar_handle \fIhandle\fP, const void *\fIbuf\fP) - -.fi -.SH INPUT PARAMETERS -.ft R -.TP 1i -handle -Handle of the control variable to be read. -.TP 1i -buf -Initial address of storage location for variable value. - -.SH DESCRIPTION -.ft R -MPI_T_cvar_read reads the value of the control variable identified by the handle -specified in \fIhandle\fP and stores the value in the buffer pointed to by \fIbuf\fP. -The caller must ensure that the buffer pointed to by \fIbuf\fP is large enough to -hold the entire value of the control variable. - -.SH ERRORS -.ft R -MPI_T_cvar_read() will fail if: -.TP 1i -[MPI_T_ERR_NOT_INITIALIZED] -The MPI Tools interface not initialized -.TP 1i -[MPI_T_ERR_INVALID_HANDLE] -The handle is invalid - -.SH SEE ALSO -.ft R -.nf -MPI_T_cvar_handle_alloc -MPI_T_cvar_get_info diff --git a/ompi/mpi/man/man3/MPI_T_cvar_write.3in b/ompi/mpi/man/man3/MPI_T_cvar_write.3in deleted file mode 100644 index 323cda935a7..00000000000 --- a/ompi/mpi/man/man3/MPI_T_cvar_write.3in +++ /dev/null @@ -1,59 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2013 Los Alamos National Security, LLC. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2010 Cisco Systems, Inc. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_T_cvar_write 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -. -.SH NAME -\fBMPI_T_cvar_write\fP \- Write the value of a bound control variable -. -.SH SYNTAX -.ft R -. -.SH C Syntax -.nf -#include -int MPI_T_cvar_write(MPI_T_cvar_handle \fIhandle\fP, const void *\fIbuf\fP) - -.fi -.SH INPUT PARAMETERS -.ft R -.TP 1i -handle -Handle of the control variable to be written. -.TP 1i -buf -Initial address of storage location for variable value. - -.SH DESCRIPTION -.ft R -MPI_T_cvar_write sets the value the control variable identified by the handle -specified in \fIhandle\fP from the buffer provided in \fIbuf\fP. The caller must -ensure that the buffer specified in \fIbuf\fP is large enough to hold the -entire value of the control variable. If the variable has global scope, any -write call must be issued on all connected MPI processes. For more -information see MPI-3 \[char167] 14.3.6. - -.SH ERRORS -.ft R -MPI_T_cvar_write() will fail if: -.TP 1i -[MPI_T_ERR_NOT_INITIALIZED] -The MPI Tools interface not initialized -.TP 1i -[MPI_T_ERR_INVALID_HANDLE] -The handle is invalid -.TP 1i -[MPI_T_ERR_CVAR_SET_NOT_NOW] -Variable cannot be set at this moment -.TP 1i -[MPI_T_ERR_CVAR_SET_NEVER] -Variable cannot be set until end of execution - -.SH SEE ALSO -.ft R -.nf -MPI_T_cvar_handle_alloc -MPI_T_cvar_get_info diff --git a/ompi/mpi/man/man3/MPI_T_enum_get_info.3in b/ompi/mpi/man/man3/MPI_T_enum_get_info.3in deleted file mode 100644 index 73172561afd..00000000000 --- a/ompi/mpi/man/man3/MPI_T_enum_get_info.3in +++ /dev/null @@ -1,66 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2013 Los Alamos National Security, LLC. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2010 Cisco Systems, Inc. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_T_enum_get_info 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -. -.SH NAME -\fBMPI_T_enum_get_info\fP \- Query information about an enumerator -. -.SH SYNTAX -.ft R -. -.SH C Syntax -.nf -#include -int MPI_T_enum_get_info(MPI_T_enum \fIenumtype\fP, int *\fInum\fP, char *\fIname\fP, int *\fIname_len\fP) - -.fi -.SH INPUT PARAMETERS -.ft R -.TP 1i -enumtype -Enumerator to be queried. - -.SH INPUT/OUTPUT PARAMETERS -.ft R -.TP 1i -name_len -Length of the string and/or buffer for name. - -.SH OUTPUT PARAMETERS -.ft R -.TP li -num -number of discrete values represented by this enumeration. -.TP 1i -name -Buffer to return the string containing the name of the -category. - -.SH DESCRIPTION -.ft R -MPI_T_enum_get_info can be used to query information about an enumerator. The function returns the -number of discrete values represented by this enumerator in the \fInum\fP parameter. - -.SH NOTES -.ft R -This MPI tool interface function returns the name of the enumeration as a string. This function -takes two argument for the string: \fIname\fP which specifies a buffer where the name of the -should be stored, and \fIname_len\fP which must initially specify the size of the buffer pointed -to by \fIname\fP. This function will copy at most \fIname_len\fP - 1 characters of the name -and sets \fIname_len\fP to the number of characters returned + 1. If \fIname_len\fP is NULL -or the value specified in \fIname_len\fP is 0 the \fIname\fP buffer is ignored and the name of -the enumeration is not returned. - -.SH ERRORS -.ft R -MPI_T_enum_get_info() will fail if: -.TP 1i -[MPI_T_ERR_NOT_INITIALIZED] -The MPI Tools interface not initialized -.TP 1i -[MPI_T_ERR_INVALID_INDEX] -The enumeration is invalid or has been deleted diff --git a/ompi/mpi/man/man3/MPI_T_enum_get_item.3in b/ompi/mpi/man/man3/MPI_T_enum_get_item.3in deleted file mode 100644 index 86c22e912e1..00000000000 --- a/ompi/mpi/man/man3/MPI_T_enum_get_item.3in +++ /dev/null @@ -1,69 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2013 Los Alamos National Security, LLC. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2010 Cisco Systems, Inc. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_T_enum_get_item 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -. -.SH NAME -\fBMPI_T_enum_get_item\fP \- Query information about an enumerator -. -.SH SYNTAX -.ft R -. -.SH C Syntax -.nf -#include -int MPI_T_enum_get_item(MPI_T_enum \fIenumtype\fP, int \fIindex\fP, int *\fIvalue\fP, char *\fIname\fP, - int *\fIname_len\fP) - -.fi -.SH INPUT PARAMETERS -.ft R -.TP 1i -enumtype -Enumeration to be queried. -.TP 1i -index -Number of the value to be queried in this enumeration. - -.SH INPUT/OUTPUT PARAMETERS -.ft R -.TP 1i -name_len -Length of the string and/or buffer for name. - -.SH OUTPUT PARAMETERS -.ft R -.TP li -value -Variable value. -.TP 1i -name -Buffer to return the string containing the name of the -category. - -.SH DESCRIPTION -.ft R -MPI_T_enum_get_item can be used to query information about an item in an enumerator. This function -returns the enumeration value in the \fIvalue\fP parameter. - -.SH NOTES -.ft R -This MPI tool interface function returns the name of the item as a string. This function takes two -arguments for the string: a buffer to store the string, and a length which must initially specify the -size of the buffer. If the length passed is n then this function will copy at most n - 1 characters -of the string into the buffer and sets the length to the number of characters copied - 1. If the length -argument is NULL or the value specified in the length is 0 the string buffer is ignored and the -string is not returned. For more information see MPI-3 \[char167] 14.3.3. - -.SH ERRORS -.ft R -MPI_T_enum_get_item() will fail if: -.TP 1i -[MPI_T_ERR_NOT_INITIALIZED] -The MPI Tools interface not initialized -.TP 1i -[MPI_T_ERR_INVALID_INDEX] -The enumeration is invalid or has been deleted diff --git a/ompi/mpi/man/man3/MPI_T_finalize.3in b/ompi/mpi/man/man3/MPI_T_finalize.3in deleted file mode 100644 index 7cb2b7dce2c..00000000000 --- a/ompi/mpi/man/man3/MPI_T_finalize.3in +++ /dev/null @@ -1,45 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2013 Los Alamos National Security, LLC. All rights reserved. -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" $COPYRIGHT$ -.TH MPI_T_finalize 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_T_finalize \fP \- Finalize the MPI tool information interface - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -int MPI_T_finalize(void) - -.fi -.SH DESCRIPTION -.ft R -MPI_T_finalize() finalizes the MPI tool information interface and must be called the same -number of times as MPI_T_init_thread() by the end of execution. Calls to MPI tool functions -are allowed at any point in execution as long as MPI_T_init_thread() has been called at least -once and the number of calls to MPI_T_init_thread() is greater than the number of calls to -MPI_T_finalize(). If at any point in execution the number of calls to MPI_T_finalize() equals -the number of calls to MPI_T_init_thread() the MPI tool interface will no longer be available -until another call to MPI_T_init_thread(). - -.SH NOTES -.ft R -Before the end of execution the number of calls to MPI_T_init_thread() and MPI_T_finalize must -be the same. - -.SH ERRORS -.ft R -MPI_T_finalize() will fail if: -.TP 1i -[MPI_T_ERR_NOT_INITIALIZED] -The MPI Tools interface not initialized - -.SH SEE ALSO -.ft T -.nf -MPI_T_init_thread - diff --git a/ompi/mpi/man/man3/MPI_T_init_thread.3.md b/ompi/mpi/man/man3/MPI_T_init_thread.3.md deleted file mode 100644 index e0a6851f308..00000000000 --- a/ompi/mpi/man/man3/MPI_T_init_thread.3.md +++ /dev/null @@ -1,65 +0,0 @@ -# NAME - -MPI_T_init_thread - Initializes the MPI Tool information interface - -# SYNTAX - -## C Syntax - -```c -#include -int MPI_T_init_thread(int required, int *provided) -``` - -# INPUT PARAMETERS - -* required: Desired level of thread support (integer). - -# OUTPUT PARAMETERS - -* provided: Available level of thread support (integer). - -# DESCRIPTION - -`MPI_T_init_thread()` initializes the MPI tool information interface. Calls to -MPI tool functions are allowed at any point in execution (including before -`MPI_Init()` and after `MPI_Finalize()`) as long as `MPI_T_init_thread()` has -been called at least once and the number of calls to `MPI_T_init_thread()` is -greater than the number of calls to `MPI_T_finalize()`. If at any point in -execution the number of calls to `MPI_T_finalize()` equals the number of calls -to `MPI_T_init_thread()` the MPI tool interface will no longer be available -until another call to `MPI_T_init_thread()`. - -`MPI_T_init_thread()`, like `MPI_Init_thread()`, has a provision to request a -certain level of thread support in `required`: - -* `MPI_THREAD_SINGLE`: Only one thread will execute. -* `MPI_THREAD_FUNNELED`: If the process is multithreaded, only the thread that - called `MPI_Init_thread()` will make MPI calls. -* `MPI_THREAD_SERIALIZED`: If the process is multithreaded, only one thread will - make MPI library calls at one time. -* `MPI_THREAD_MULTIPLE`: If the process is multithreaded, multiple threads may - call MPI at once with no restrictions. - -The level of thread support available to the program is set in -`provided`. In Open MPI, the value is dependent on how the library was -configured and built. Note that there is no guarantee that -`provided` will be greater than or equal to `required`. - -# NOTES - -It is the caller's responsibility to check the value of `provided`, -as it may be less than what was requested in `required`. - -# ERRORS - -`MPI_T_init_thread()` will fail if: - -* `MPI_T_ERR_MEMORY`: Out of memory -* `MPI_T_ERR_CANNOT_INIT`: Interface not in the state to be initialized - -# SEE ALSO - -[`MPI_T`(5)](MPI_T.html), -[`MPI_Init_thread`(3)](MPI_Init_thread.html), -[`MPI_T_finalize`(3)](MPI_T_finalize.html) diff --git a/ompi/mpi/man/man3/MPI_T_pvar_get_info.3in b/ompi/mpi/man/man3/MPI_T_pvar_get_info.3in deleted file mode 100644 index 86e7dbaa7a1..00000000000 --- a/ompi/mpi/man/man3/MPI_T_pvar_get_info.3in +++ /dev/null @@ -1,204 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2013 Los Alamos National Security, LLC. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2010 Cisco Systems, Inc. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_T_pvar_get_info 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -. -.SH NAME -\fBMPI_T_pvar_get_info\fP \- Query information from a performance variable -. -.SH SYNTAX -.ft R -. -.SH C Syntax -.nf -#include -int MPI_T_pvar_get_info(int \fIpvar_index\fP, char *\fIname\fP, int *\fIname_len\fP, - int *\fIverbosity\fP, int *\fIvar_class\fP, MPI_Datatype *\fIdatatype\fP, MPI_T_enum *\fIenumtype\fP, - char *\fIdesc\fP, int *\fIdesc_len\fP, int *\fIbind\fP, int *\fIreadonly\fP, int *\fIcontinuous\fP, - int *\fIatomic\fP) - -.fi -.SH INPUT PARAMETERS -.ft R -.TP 1i -pvar_index -Index of the performance variable to be queried. - -.SH INPUT/OUTPUT PARAMETERS -.ft R -.TP 1i -name_len -Length of the string and/or buffer for name. -.TP 1i -desc_len -Length of the string and/or buffer for desc. - -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -name -Buffer to return the string containing the name of the -performance variable. -.TP 1i -verbosity -Verbosity level of this variable. -.TP 1i -var_class -Class of performance variable. -.TP 1i -datatype -MPI datatype of the information stored in the performance variable. -.TP 1i -enumtype -Optional descriptor for enumeration information. -.TP 1i -desc -Buffer to return the string containing the description of the performance variable. -.TP 1i -bind -Type of MPI object to which this variable must be bound. -.TP 1i -readonly -Flag indicating whether the variable can be written/reset. -.TP 1i -continuous -Flag indicating whether the variable can be started and stopped or is continuously active. -.TP 1i -atomic -Flag indicating whether the variable can be atomically read and reset. -. -. -.SH DESCRIPTION -.ft R -MPI_T_pvar_get_info can be used to query information from a performance variable. The function returns -the verbosity, class, datatype, enumeration type, and binding of the queried control variable in the arguments -\fIverbosity\fP, \fIvar_class\fP, \fIdatatype\fP, \fIenumtype\fP, and \fIbind\fP respectively. Flags indicating -whether the variable is read-only, continuous, or atomic are returns in \fIreadonly\fP, \fIcontinuous\fP, and -\fIatomic\fP accordingly. See MPI-3 § 14.3.7 for more information. See the man page for MPI_T_cvar_get_info -for information on variable verbosity. - -.SH VARIABLE CLASS -.ft R -Performance variables are categorized into classes which describe their initial value, valid types, and -behavior. The class returned in the \fIvar_class\fP parameter may be one of the following: -.TP 2 -MPI_T_PVAR_CLASS_STATE -Variable represents a set of discrete states that may be described by an enumerator. Variables of this class -must be represented by an MPI_INT. The starting value is the current state of the variable. -.TP 2 -MPI_T_PVAR_CLASS_LEVEL -Variable represents the current utilization level of a resource. Variables of this class must be represented -by an MPI_UNSIGNED, MPI_UNSIGNED_LONG, MPI_UNSIGNED_LONG_LONG, or MPI_DOUBLE. The starting value is the -current utilization level of the resource. -.TP 2 -MPI_T_PVAR_CLASS_SIZE -Variable represents the fixed size of a resource. Variables of this class are represented by an MPI_UNSIGNED, -MPI_UNSIGNED_LONG, MPI_UNSIGNED_LONG_LONG, or MPI_DOUBLE. The starting value is the current size of the resource. -.TP 2 -MPI_T_PVAR_CLASS_PERCENTAGE -Variable represents the current precentage utilization level of a resource. Variables of this class are -represented by an MPI_DOUBLE. The starting value is the current percentage utilization of the resource. -.TP 2 -MPI_T_PVAR_CLASS_HIGHWATERMARK -Variable represents the high watermark of the utilization of a resource. Variables of this class are -represented by an MPI_UNSIGNED, MPI_UNSIGNED_LONG, MPI_UNSIGNED_LONG_LONG, or MPI_DOUBLE. The starting value -is the current utilization of the resource. -.TP 2 -MPI_T_PVAR_CLASS_HIGHWATERMARK -Variable represents the low watermark of the utilization of a resource. Variables of this class are -represented by an MPI_UNSIGNED, MPI_UNSIGNED_LONG, MPI_UNSIGNED_LONG_LONG, or MPI_DOUBLE. The starting value -is the current utilization of the resource. -.TP 2 -MPI_T_PVAR_CLASS_COUNTER -Variable represents a count of the number of occurrences of a specific event. Variables of this class are -represented by an MPI_UNSIGNED, MPI_UNSIGNED_LONG, or MPI_UNSIGNED_LONG_LONG. The starting value is 0. -.TP 2 -MPI_T_PVAR_CLASS_COUNTER -Variable represents an aggregated value that represents a sum of arguments processed during a specific event. -Variables of this class are represented by an MPI_UNSIGNED, MPI_UNSIGNED_LONG, MPI_UNSIGNED_LONG_LONG, -or MPI_DOUBLE. The starting value is 0. -.TP 2 -MPI_T_PVAR_CLASS_TIMER -Variable represents the aggregated time spent by the MPI implementation while processing an event, type of -event, or section of code. Variables of this class are represented by an MPI_UNSIGNED, MPI_UNSIGNED_LONG, -MPI_UNSIGNED_LONG_LONG, or MPI_DOUBLE. If the variable is represented by an MPI_DOUBLE the units will be -the same as those used by MPI_Wtime(). The starting value is 0. -.TP 2 -MPI_T_PVAR_CLASS_GENERIC -Variable does not fit into any other class. Can by represented by an type supported by the MPI tool -information interface (see DATATYPE). Starting value is variable specific. - -For more information see MPI-3 \[char 167] 14.3.7. - -.SH DATATYPE -.ft R -The datatype returned by MPI_T_pvar_get_info is restricted to one of the following datatypes: MPI_INT, -MPI_UNSIGNED, MPI_UNSIGNED_LONG, MPI_UNSIGNED_LONG_LONG, MPI_COUNT, MPI_CHAR, and MPI_DOUBLE. For more -information on datatypes in the MPI Tool information interface see MPI-3 \[char167] 14.3.5. - -.SH BINDING -.ft R -Performance variables may be bound to an MPI object. The binding returned in the \fIbind\fP parameter may be -one of the following: -.TP 1i -MPI_T_BIND_NO_OBJECT -No object -.TP 1i -MPI_T_BIND_MPI_COMM -MPI communicator -.TP 1i -MPI_T_BIND_MPI_DATATYPE -MPI datatype -.TP 1i -MPI_T_BIND_MPI_ERRHANDLER -MPI error handler -.TP 1i -MPI_T_BIND_MPI_FILE -MPI file handle -.TP 1i -MPI_T_BIND_MPI_GROUP -MPI group -.TP 1i -MPI_T_BIND_MPI_OP -MPI reduction operator -.TP 1i -MPI_T_BIND_MPI_REQUEST -MPI request -.TP 1i -MPI_T_BIND_MPI_WIN -MPI window for one-sided communication -.TP 1i -MPI_T_BIND_MPI_MESSAGE -MPI message object -.TP 1i -MPI_T_BIND_MPI_INFO -MPI info object - -For more information see MPI-3 \[char167] 14.3.2. - -.SH NOTES -.ft R -This MPI tool interface function returns two strings. This function takes two argument for each string: -a buffer to store the string, and a length which must initially specify the size of the buffer. If the -length passed is n then this function will copy at most n - 1 characters of the string into the -corresponding buffer and set the length to the number of characters copied - 1. If the length argument -is NULL or the value specified in the length is 0 the corresponding string buffer is ignored and the -string is not returned. For more information see MPI-3 \[char167] 14.3.3. - -.SH ERRORS -.ft R -MPI_T_pvar_get_info() will fail if: -.TP 1i -[MPI_T_ERR_NOT_INITIALIZED] -The MPI Tools interface not initialized -.TP 1i -[MPI_T_ERR_INVALID_INDEX] -The performance variable index is invalid -. -.SH SEE ALSO -.ft R -.nf -MPI_T_cvar_get_info diff --git a/ompi/mpi/man/man3/MPI_T_pvar_get_num.3in b/ompi/mpi/man/man3/MPI_T_pvar_get_num.3in deleted file mode 100644 index 1f6fc63ac2d..00000000000 --- a/ompi/mpi/man/man3/MPI_T_pvar_get_num.3in +++ /dev/null @@ -1,38 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2013 Los Alamos National Security, LLC. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2010 Cisco Systems, Inc. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_T_pvar_get_num 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -. -.SH NAME -\fBMPI_T_pvar_get_num\fP \- Query the number of performance variables -. -.SH SYNTAX -.ft R -. -.SH C Syntax -.nf -#include -int MPI_T_pvar_get_num(int *\fInum_pvar\fP) - -.fi -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -num_pvar -Current number of performance variables. - -.SH DESCRIPTION -.ft R -MPI_T_pvar_get_num can be used to query the current number of performance variables. The number -of performance variables may increase throughout the exection of the process but will never -decrease. - -.SH ERRORS -.ft R -MPI_T_pvar_get_num() will fail if: -.TP 1i -[MPI_T_ERR_NOT_INITIALIZED] -The MPI Tools interface not initialized diff --git a/ompi/mpi/man/man3/MPI_T_pvar_handle_alloc.3in b/ompi/mpi/man/man3/MPI_T_pvar_handle_alloc.3in deleted file mode 100644 index dfcd4d19f55..00000000000 --- a/ompi/mpi/man/man3/MPI_T_pvar_handle_alloc.3in +++ /dev/null @@ -1,72 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2013 Los Alamos National Security, LLC. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2010 Cisco Systems, Inc. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_T_pvar_handle_alloc 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -. -.SH NAME -\fBMPI_T_pvar_handle_alloc\fP, \fBMPI_T_pvar_handle_free\fP \- Allocate/free MPI performance variable handles -. -.SH SYNTAX -.ft R -. -.SH C Syntax -.nf -#include -int MPI_T_pvar_handle_alloc(int \fIsession\fP, int \fIpvar_index\fP, void *\fIobj_handle\fP, - MPI_T_pvar_handle *\fIhandle\fP, int *\fIcount\fP) - -int MPI_T_pvar_handle_free(int \fIsession\fP, MPI_T_pvar_handle *\fIhandle\fP) - -.SH DESCRIPTION -.ft R -MPI_T_pvar_handle_alloc binds the performance variable specified in \fIpvar_index\fP to the MPI -object specified in \fIobj_handle\fP in the session identified by the parameter -\fIsession\fP. The object is passed in the argument \fIobj_handle\fP as an -address to a local variable that stores the object’s handle. If -MPI_T_pvar_get_info returns MPI_T_BIND_NO_OBJECT as the binding -for the variable the \fIobj_handle\fP argument is ignored. The handle -allocated to reference the variable is returned in the argument \fIhandle\fP. Upon successful -return, \fIcount\fP contains the number of elements (of the datatype returned by a previous -MPI_T_PVAR_GET_INFO call) used to represent this variable. - -The value of \fIpvar_index\fP should be in the range 0 to \fInum_pvar - 1\fP, -where \fInum_pvar\fP is the number of available performance variables as -determined from a prior call to \fIMPI_T_PVAR_GET_NUM\fP. The type of the -MPI object it references must be consistent with the type returned in the -bind argument in a prior call to \fIMPI_T_PVAR_GET_INFO\fP. - -MPI_T_pvar_handle_free frees a handle allocated by MPI_T_pvar_handle_alloc and sets the -\fIhandle\fP argument to MPI_T_PVAR_HANDLE_NULL. - - -.SH ERRORS -.ft R -MPI_T_pvar_handle_alloc() will fail if: -.TP 1i -[MPI_T_ERR_NOT_INITIALIZED] -The MPI Tools interface not initialized -.TP 1i -[MPI_T_ERR_INVALID_INDEX] -The performance variable index is invalid -.TP 1i -[MPI_T_ERR_OUT_OF_HANDLES] -No more handles available -.TP 1i -MPI_T_pvar_handle_free() will fail if: -.TP 1i -[MPI_T_ERR_NOT_INITIALIZED] -The MPI Tools interface not initialized -.TP 1i -[MPI_T_ERR_INVALID_HANDLE] -The handle is invalid or the handle argument passed in is not associated with the session argument - - -.SH SEE ALSO -.ft R -.nf -MPI_T_pvar_get_info -MPI_T_pvar_get_num - diff --git a/ompi/mpi/man/man3/MPI_T_pvar_handle_free.3in b/ompi/mpi/man/man3/MPI_T_pvar_handle_free.3in deleted file mode 100644 index 378fd2d10c8..00000000000 --- a/ompi/mpi/man/man3/MPI_T_pvar_handle_free.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/MPI_T_pvar_handle_alloc.3 diff --git a/ompi/mpi/man/man3/MPI_T_pvar_read.3in b/ompi/mpi/man/man3/MPI_T_pvar_read.3in deleted file mode 100644 index f3d346ff2f0..00000000000 --- a/ompi/mpi/man/man3/MPI_T_pvar_read.3in +++ /dev/null @@ -1,59 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2013 Los Alamos National Security, LLC. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2010 Cisco Systems, Inc. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_T_pvar_read 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -. -.SH NAME -\fBMPI_T_pvar_read\fP \- Read the value of a performance variable -. -.SH SYNTAX -.ft R -. -.SH C Syntax -.nf -#include -int MPI_T_pvar_read(MPI_T_pvar_session \fIsession\fP, MPI_T_pvar_handle \fIhandle\fP, const void *\fIbuf\fP) - -.fi -.SH INPUT PARAMETERS -.ft R -.TP 1i -session -Performance experiment session. -.TP 1i -handle -Performance variable handle. -.TP 1i -buf -Initial address of storage location for variable value. - -.SH DESCRIPTION -.ft R -MPI_T_pvar_read queries the value of a performance variable identified by the handle -specified in \fIhandle\fP in the session specified in \fIsession\fP. The result is -stored in the buffer pointed to by \fIbuf\fP. The caller must ensure that the -buffer pointed to by \fIbuf\fP is large enough to hold the entire value of the -performance variable. - -.SH ERRORS -.ft R -MPI_T_pvar_read() will fail if: -.TP 1i -[MPI_T_ERR_NOT_INITIALIZED] -The MPI Tools interface not initialized -.TP 1i -[MPI_T_ERR_INVALID_HANDLE] -The handle is invalid or not associated with the session -.TP 1i -[MPI_T_ERR_INVALID_SESSION] -Session argument is not a valid session - -.SH SEE ALSO -.ft R -.nf -MPI_T_pvar_handle_alloc -MPI_T_pvar_get_info -MPI_T_pvar_session_create diff --git a/ompi/mpi/man/man3/MPI_T_pvar_readreset.3in b/ompi/mpi/man/man3/MPI_T_pvar_readreset.3in deleted file mode 100644 index 007b7534830..00000000000 --- a/ompi/mpi/man/man3/MPI_T_pvar_readreset.3in +++ /dev/null @@ -1,69 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2013 Los Alamos National Security, LLC. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2010 Cisco Systems, Inc. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_T_pvar_readreset 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -. -.SH NAME -\fBMPI_T_pvar_readreset\fP \- Atomically read and reset the value of a performance variable -. -.SH SYNTAX -.ft R -. -.SH C Syntax -.nf -#include -int MPI_T_pvar_readreset(MPI_T_pvar_session \fIsession\fP, MPI_T_pvar_handle \fIhandle\fP, const void *\fIbuf\fP) - -.fi -.SH INPUT PARAMETERS -.ft R -.TP 1i -session -Performance experiment session. -.TP 1i -handle -Performance variable handle. -.TP 1i -buf -Initial address of storage location for variable value. - -.SH DESCRIPTION -.ft R -MPI_T_pvar_readreset atomically queries and resets the value of a performance variable -bound to the handle specified by \fIhandle\fP in the session specified by \fIsession\fP. -The result is stored in the buffer pointed to by \fIbuf\fP. This function can only be -used with performance variables that are atomic and not readonly. The caller must ensure -that the buffer pointed to by \fIbuf\fP is large enough to hold the entire value of the -performance variable. - -.SH ERRORS -.ft R -MPI_T_pvar_readreset() will fail if: -.TP 1i -[MPI_T_ERR_NOT_INITIALIZED] -The MPI Tools interface not initialized -.TP 1i -[MPI_T_ERR_INVALID_HANDLE] -The handle is invalid or not associated with the session -.TP 1i -[MPI_T_ERR_INVALID_SESSION] -Session argument is not a valid session -.TP 1i -[MPI_T_ERR_PVAR_NO_ATOMIC] -Variable cannot be read and written atomically -.TP 1i -[MPI_T_ERR_PVAR_NO_WRITE] -Variable cannot be reset - -.SH SEE ALSO -.ft R -.nf -MPI_T_pvar_handle_alloc -MPI_T_pvar_get_info -MPI_T_pvar_session_create -MPI_T_pvar_read -MPI_T_pvar_reset - diff --git a/ompi/mpi/man/man3/MPI_T_pvar_reset.3in b/ompi/mpi/man/man3/MPI_T_pvar_reset.3in deleted file mode 100644 index df145f6a3f3..00000000000 --- a/ompi/mpi/man/man3/MPI_T_pvar_reset.3in +++ /dev/null @@ -1,60 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2013 Los Alamos National Security, LLC. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2010 Cisco Systems, Inc. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_T_pvar_reset 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -. -.SH NAME -\fBMPI_T_pvar_reset\fP \- Reset the value of a performance variable -. -.SH SYNTAX -.ft R -. -.SH C Syntax -.nf -#include -int MPI_T_pvar_reset(MPI_T_pvar_session \fIsession\fP, MPI_T_pvar_handle \fIhandle\fP) - -.fi -.SH INPUT PARAMETERS -.ft R -.TP 1i -session -Performance experiment session. -.TP 1i -handle -Performance variable handle or MPI_T_PVAR_ALL_HANDLES. -. -. -.SH DESCRIPTION -.ft R -MPI_T_pvar_reset sets the performance variable specified by the handle in \fIhandle\fP -to its initial value. The special value MPI_T_PVAR_ALL_HANDLES can be passed in \fIhandle\fP -to reset all read-write handles in the session specified in \fIsession\fP. -. -. -.SH ERRORS -.ft R -MPI_T_pvar_reset() will fail if: -.TP 1i -[MPI_T_ERR_NOT_INITIALIZED] -The MPI Tools interface not initialized -.TP 1i -[MPI_T_ERR_INVALID_HANDLE] -The handle is invalid -.TP 1i -[MPI_T_ERR_INVALID_SESSION] -Session argument is not a valid session -.TP 1i -[MPI_T_ERR_PVAR_NO_WRITE] -Variable cannot be reset - -.SH SEE ALSO -.ft R -.nf -MPI_T_pvar_handle_alloc -MPI_T_pvar_get_info -MPI_T_pvar_session_create -MPI_T_pvar_write diff --git a/ompi/mpi/man/man3/MPI_T_pvar_session_create.3in b/ompi/mpi/man/man3/MPI_T_pvar_session_create.3in deleted file mode 100644 index dd46817f314..00000000000 --- a/ompi/mpi/man/man3/MPI_T_pvar_session_create.3in +++ /dev/null @@ -1,51 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2013 Los Alamos National Security, LLC. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2010 Cisco Systems, Inc. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_T_pvar_session_create 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -. -.SH NAME -\fBMPI_T_pvar_session_create\fP, \fBMPI_T_pvar_session_free\fP \- Create/free performance variable session -. -.SH SYNTAX -.ft R -. -.SH C Syntax -.nf -#include -int MPI_T_pvar_session_create(MPI_T_pvar_session *\fIsession\fP) - -int MPI_T_pvar_session_free(MPI_T_pvar_session *\fIsession\fP) - -.fi -.SH DESCRIPTION -.ft R -MPI_T_pvar_session_create creates a session for accessing performance variables. The -new session is returned in the \fIsession\fP parameter. - -MPI_T_pvar_session_free releases a session allocated by MPI_T_pvar_session_create and sets -the \fIsession\fP parameter to MPI_T_PVAR_SESSION_NULL. - -.SH ERRORS -.ft R -MPI_T_pvar_session_create() will fail if: -.TP 1i -[MPI_T_ERR_NOT_INITIALIZED] -The MPI Tools interface not initialized -.TP 1i -[MPI_T_ERR_MEMORY] -Out of memory -.TP 1i -[MPI_T_ERR_OUT_OF_SESSIONS] -No more sessions available -.TP 1i -MPI_T_pvar_session_free() will fail if: -.TP 1i -[MPI_T_ERR_NOT_INITIALIZED] -The MPI Tools interface not initialized -.TP 1i -[MPI_T_ERR_INVALID_SESSION] -The session parameter is not a valid session - diff --git a/ompi/mpi/man/man3/MPI_T_pvar_session_free.3in b/ompi/mpi/man/man3/MPI_T_pvar_session_free.3in deleted file mode 100644 index 4a42a63f6d8..00000000000 --- a/ompi/mpi/man/man3/MPI_T_pvar_session_free.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/MPI_T_pvar_session_create.3 diff --git a/ompi/mpi/man/man3/MPI_T_pvar_start.3in b/ompi/mpi/man/man3/MPI_T_pvar_start.3in deleted file mode 100644 index 2b1c9830d9b..00000000000 --- a/ompi/mpi/man/man3/MPI_T_pvar_start.3in +++ /dev/null @@ -1,63 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2013 Los Alamos National Security, LLC. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2010 Cisco Systems, Inc. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_T_pvar_start 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -. -.SH NAME -\fBMPI_T_pvar_start\fP, \fBMPI_T_pvar_stop\fP \- Start/stop a performance variable -. -.SH SYNTAX -.ft R -. -.SH C Syntax -.nf -#include -int MPI_T_pvar_start(MPI_T_pvar_session \fIsession\fP, MPI_T_pvar_handle \fIhandle\fP) - -int MPI_T_pvar_stop(MPI_T_pvar_session \fIsession\fP, MPI_T_pvar_handle \fIhandle\fP) - -.fi -.SH INPUT PARAMETERS -.ft R -.TP 1i -session -Performance experiment session. -.TP 1i -handle -Performance variable handle. - -.SH DESCRIPTION -.ft R -MPI_T_pvar_start starts the performance variable with the handle specified in \fIhandle\fP. -The special value MPI_T_PVAR_ALL_HANDLES can be passed in \fIhandle\fP to start all -non-continuous handles in the session specified in \fIsession\fP. - -MPI_T_pvar_stop stops the performance variable with the handle specified in \fIhandle\fP. -The special value MPI_T_PVAR_ALL_HANDLES can be passed in \fIhandle\fP to stop all -non-continuous handles in the session specified in \fIsession\fP. - -Continuous performance variables can neither be started nor stopped. - -.SH ERRORS -.ft R -MPI_T_pvar_start() and MPI_T_pvar_stop() will fail if: -.TP 1i -[MPI_T_ERR_NOT_INITIALIZED] -The MPI Tools interface not initialized -.TP 1i -[MPI_T_ERR_INVALID_SESSION] -Session parameter is not a valid session -.TP 1i -[MPI_T_ERR_INVALID_HANDLE] -Invalid handle or handle not associated with the session -.TP 1i -[MPI_T_ERR_PVAR_NO_STARTSTOP] -The variable cannot be started or stopped - -.SH SEE ALSO -.ft R -.nf -MPI_T_pvar_get_info diff --git a/ompi/mpi/man/man3/MPI_T_pvar_stop.3in b/ompi/mpi/man/man3/MPI_T_pvar_stop.3in deleted file mode 100644 index 3907001bdae..00000000000 --- a/ompi/mpi/man/man3/MPI_T_pvar_stop.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/MPI_T_pvar_start.3 diff --git a/ompi/mpi/man/man3/MPI_T_pvar_write.3in b/ompi/mpi/man/man3/MPI_T_pvar_write.3in deleted file mode 100644 index 944a93c8e2f..00000000000 --- a/ompi/mpi/man/man3/MPI_T_pvar_write.3in +++ /dev/null @@ -1,62 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2013 Los Alamos National Security, LLC. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2010 Cisco Systems, Inc. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_T_pvar_write 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -. -.SH NAME -\fBMPI_T_pvar_write\fP \- Write the value of a control variable -. -.SH SYNTAX -.ft R -. -.SH C Syntax -.nf -#include -int MPI_T_pvar_write(MPI_T_pvar_session \fIsession\fP, MPI_T_pvar_handle \fIhandle\fP, const void *\fIbuf\fP) - -.fi -.SH INPUT PARAMETERS -.ft R -.TP 1i -session -Performance experiment session. -.TP 1i -handle -Performance variable handle. -.TP 1i -buf -Initial address of storage location for variable value. - -.SH DESCRIPTION -.ft R -MPI_T_pvar_write attempts to set the value of the performance variable identified by -the handle specified in \fIhandle\fP in the session specified in \fIsession\fP. The -value to be written is specified in \fIbuf\fP. The caller must ensure that the buffer -specified in \fIbuf\fP is large enough to hold the entire value of the performance -variable. - -.SH ERRORS -.ft R -MPI_T_pvar_write() will fail if: -.TP 1i -[MPI_T_ERR_NOT_INITIALIZED] -The MPI Tools interface not initialized -.TP 1i -[MPI_T_ERR_INVALID_HANDLE] -The handle is invalid or not associated with the session -.TP 1i -[MPI_T_ERR_INVALID_SESSION] -Session argument is not a valid session -.TP 1i -[MPI_T_ERR_PVAR_NO_WRITE] -Variable cannot be written - -.SH SEE ALSO -.ft R -.nf -MPI_T_pvar_handle_alloc -MPI_T_pvar_get_info -MPI_T_pvar_session_create diff --git a/ompi/mpi/man/man3/MPI_Test.3in b/ompi/mpi/man/man3/MPI_Test.3in deleted file mode 100644 index 36026cb9503..00000000000 --- a/ompi/mpi/man/man3/MPI_Test.3in +++ /dev/null @@ -1,116 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright 2007-2008 Cisco Systems, Inc. All rights reserved. -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Test 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Test\fP \- Tests for the completion of a specific send or receive. - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -int MPI_Test(MPI_Request *\fIrequest\fP, int\fI *flag\fP, MPI_Status\fI *status\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_TEST(\fIREQUEST, FLAG, STATUS, IERROR\fP) - LOGICAL \fIFLAG\fP - INTEGER \fIREQUEST, STATUS(MPI_STATUS_SIZE), IERROR\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Test(\fIrequest\fP, \fIflag\fP, \fIstatus\fP, \fIierror\fP) - TYPE(MPI_Request), INTENT(INOUT) :: \fIrequest\fP - LOGICAL, INTENT(OUT) :: \fIflag\fP - TYPE(MPI_Status) :: \fIstatus\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETER -.ft R -.TP 1i -request -Communication request (handle). - -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -flag -True if operation completed (logical). -.TP 1i -status -Status object (status). -.ft R -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -A call to MPI_Test returns flag = true if the operation identified by request is complete. In such a case, the status object is set to contain information on the completed operation; if the communication object was created by a nonblocking send or receive, then it is deallocated and the request handle is set to MPI_REQUEST_NULL. The call returns flag = false, otherwise. In this case, the value of the status object is undefined. MPI_Test is a local operation. -.sp -The return status object for a receive operation carries information that can be accessed as described in Section 3.2.5 of the MPI-1 Standard, "Return Status." The status object for a send operation carries information that can be accessed by a call to MPI_Test_cancelled (see Section 3.8 of the MPI-1 Standard, "Probe and Cancel"). -.sp -If your application does not need to examine the \fIstatus\fP field, you can save resources by using the predefined constant MPI_STATUS_IGNORE as a special value for the \fIstatus\fP argument. -.sp -One is allowed to call MPI_Test with a null or inactive \fIrequest\fP argument. In such a case the operation returns with \fIflag\fP = true and empty \fIstatus\fP. -.sp -The functions MPI_Wait and MPI_Test can be used to complete both sends and -receives. - -.SH NOTES -The use of the nonblocking MPI_Test call allows the user to schedule alternative activities within a single thread of execution. An event-driven thread scheduler can be emulated with periodic calls to MPI_Test. - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for -I/O function errors. The error handler may be changed with -MPI_Comm_set_errhandler, MPI_File_set_errhandler, or -MPI_Win_set_errhandler (depending on the type of MPI handle that -generated the request); the predefined error handler MPI_ERRORS_RETURN -may be used to cause error values to be returned. Note that MPI does -not guarantee that an MPI program can continue past an error. -.sp -Note that per MPI-1 section 3.2.5, MPI errors on requests passed -to MPI_TEST do not set the status.MPI_ERROR field in the returned -status. The error code is passed to the back-end error handler -and may be passed back to the caller through the return value of -MPI_TEST if the back-end error handler returns it. The -pre-defined MPI error handler MPI_ERRORS_RETURN exhibits this -behavior, for example. - -.SH SEE ALSO -.ft R -.sp -MPI_Comm_set_errhandler -.br -MPI_File_set_errhandler -.br -MPI_Testall -.br -MPI_Testany -.br -MPI_Testsome -.br -MPI_Wait -.br -MPI_Waitall -.br -MPI_Waitany -.br -MPI_Waitsome -.br -MPI_Win_set_errhandler -.br - diff --git a/ompi/mpi/man/man3/MPI_Test_cancelled.3in b/ompi/mpi/man/man3/MPI_Test_cancelled.3in deleted file mode 100644 index 59db6376395..00000000000 --- a/ompi/mpi/man/man3/MPI_Test_cancelled.3in +++ /dev/null @@ -1,69 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2013 Los Alamos National Security, LLC. All rights reserved. -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Test_cancelled 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Test_cancelled\fP \- Tests whether a request was canceled. - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -int MPI_Test_cancelled(const MPI_Status *\fIstatus\fP, int \fI*flag\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_TEST_CANCELLED(\fISTATUS, FLAG, IERROR\fP) - LOGICAL \fIFLAG\fP - INTEGER \fISTATUS(MPI_STATUS_SIZE), IERROR\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Test_cancelled(\fIstatus\fP, \fIflag\fP, \fIierror\fP) - TYPE(MPI_Status), INTENT(IN) :: \fIstatus\fP - LOGICAL, INTENT(OUT) :: \fIflag\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETER -.ft R -.TP 1i -status -Status object (status). - -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -flag -True if operation was cancelled (logical). -.ft R -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -Returns \fIflag\fP = true if the communication associated with the status object -was canceled successfully. In such a case, all other fields of status (such as \fIcount\fP or \fItag\fP) are undefined. Otherwise, returns \fIflag\fP = false. If a receive operation might be canceled, one should call MPI_Test_cancelled first, to check whether the operation was canceled, before checking on the other fields of the return status. - -.SH NOTES -Cancel can be an expensive operation that should be used only exceptionally. - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for I/O function errors. The error handler may be changed with MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN may be used to cause error values to be returned. Note that MPI does not guarantee that an MPI program can continue past an error. - - - diff --git a/ompi/mpi/man/man3/MPI_Testall.3in b/ompi/mpi/man/man3/MPI_Testall.3in deleted file mode 100644 index 04b75c4b2f2..00000000000 --- a/ompi/mpi/man/man3/MPI_Testall.3in +++ /dev/null @@ -1,123 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2011 Cisco Systems, Inc. All rights reserved. -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Testall 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Testall\fP \- Tests for the completion of all previously initiated communications in a list. - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -int MPI_Testall(int \fIcount\fP, MPI_Request\fI array_of_requests[]\fP, - int\fI *flag\fP, MPI_Status\fI array_of_statuses[]\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_TESTALL(\fICOUNT, ARRAY_OF_REQUESTS, FLAG, ARRAY_OF_STATUSES, - IERROR\fP) - LOGICAL \fIFLAG\fP - INTEGER \fICOUNT, ARRAY_OF_REQUESTS(*)\fP - INTEGER \fIARRAY_OF_STATUSES(MPI_STATUS_SIZE,*), IERROR\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Testall(\fIcount\fP, \fIarray_of_requests\fP, \fIflag\fP, \fIarray_of_statuses\fP, \fIierror\fP) - INTEGER, INTENT(IN) :: \fIcount\fP - TYPE(MPI_Request), INTENT(INOUT) :: \fIarray_of_requests(count)\fP - LOGICAL, INTENT(OUT) :: \fIflag\fP - TYPE(MPI_Status) :: \fIarray_of_statuses(*)\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETERS -.ft R -.TP 1i -count -Lists length (integer). -.TP 1i -array_of_requests -Array of requests (array of handles). - -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -flag -True if previously initiated communications are complete (logical.) -.TP 1i -array_of_statuses -Array of status objects (array of status). -.ft R -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -Returns \fIflag\fP = true if all communications associated with active handles in the array have completed (this includes the case where no handle in the list is active). In this case, each status entry that corresponds to an active handle request is set to the status of the corresponding communication; if the request was allocated by a nonblocking communication call then it is deallocated, and the handle is set to MPI_REQUEST_NULL. Each status entry that corresponds to a null or inactive handle is set to empty. -.sp -Otherwise, \fIflag\fP = false is returned, no request is modified and the values of the status entries are undefined. This is a local operation. -.sp -If your application does not need to examine the \fIarray_of_statuses\fP field, you can save resources by using the predefined constant MPI_STATUSES_IGNORE can be used as a special value for the \fIarray_of_statuses\fP argument. -.sp -Errors that occurred during the execution of MPI_Testall are handled in the same manner as errors in MPI_Waitall. - -.SH NOTE -.ft R -\fIflag\fP is true only if all requests have completed. Otherwise, \fIflag\fP is false, -and neither \fIarray_of_requests\fP nor \fIarray_of_statuses\fP is modified. - -.SH ERRORS -For each invocation of MPI_Testall, if one or more requests generate -an MPI error, only the \fIfirst\fP MPI request that caused an -error will be passed to its corresponding error handler. No other -error handlers will be invoked (even if multiple requests generated -errors). However, \fIall\fP requests that generate an error -will have a relevant error code set in the corresponding -status.MPI_ERROR field (unless MPI_STATUSES_IGNORE was used). -.sp -The default error handler aborts the MPI job, except for I/O function -errors. The error handler may be changed with MPI_Comm_set_errhandler, -MPI_File_set_errhandler, or MPI_Win_set_errhandler (depending on the -type of MPI handle that generated the MPI request); the predefined -error handler MPI_ERRORS_RETURN may be used to cause error values to -be returned. Note that MPI does not guarantee that an MPI program can -continue past an error. -.sp -If the invoked error handler allows MPI_Testall to return to the -caller, the value MPI_ERR_IN_STATUS will be returned in the C and -Fortran bindings. - -.SH SEE ALSO -.ft R -.sp -MPI_Comm_set_errhandler -.br -MPI_File_set_errhandler -.br -MPI_Test -.br -MPI_Testany -.br -MPI_Testsome -.br -MPI_Wait -.br -MPI_Waitall -.br -MPI_Waitany -.br -MPI_Waitsome -.br -MPI_Win_set_errhandler -.br - diff --git a/ompi/mpi/man/man3/MPI_Testany.3in b/ompi/mpi/man/man3/MPI_Testany.3in deleted file mode 100644 index 169fd546a0d..00000000000 --- a/ompi/mpi/man/man3/MPI_Testany.3in +++ /dev/null @@ -1,121 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Testany 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Testany\fP \- Tests for completion of any one previously initiated communication in a list. - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -int MPI_Testany(int \fIcount\fP, MPI_Request\fI array_of_requests[]\fP, - int\fI *index\fP, int\fI *flag\fP, MPI_Status\fI *status\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_TESTANY(\fICOUNT, ARRAY_OF_REQUESTS, INDEX, FLAG, STATUS, IERROR\fP) - LOGICAL \fIFLAG\fP - INTEGER \fICOUNT, ARRAY_OF_REQUESTS(*), INDEX\fP - INTEGER \fISTATUS(MPI_STATUS_SIZE), IERROR\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Testany(\fIcount\fP, \fIarray_of_requests\fP, \fIindex\fP, \fIflag\fP, \fIstatus\fP, \fIierror\fP) - INTEGER, INTENT(IN) :: \fIcount\fP - TYPE(MPI_Request), INTENT(INOUT) :: \fIarray_of_requests(count)\fP - INTEGER, INTENT(OUT) :: \fIindex\fP - LOGICAL, INTENT(OUT) :: \fIflag\fP - TYPE(MPI_Status) :: \fIstatus\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETERS -.ft R -.TP 1i -count -List length (integer). -.TP 1i -array_of_requests -Array of requests (array of handles). - -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -index -Index of operation that completed, or MPI_UNDEFINED if none completed -(integer). -.TP 1i -flag -True if one of the operations is complete (logical). -.TP 1i -status -Status object (status). -.ft R -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -MPI_Testany tests for completion of either one or none of the operations associated with active handles. In the former case, it returns \fIflag\fP = true, returns in \fIindex\fP the index of this request in the array, and returns in \fIstatus\fP the status of that operation; if the request was allocated by a nonblocking communication call then the request is deallocated and the handle is set to MPI_REQUEST_NULL. (The array is indexed from 0 in C, and from 1 in Fortran.) In the latter case (no operation completed), it returns \fIflag\fP = false, returns a value of MPI_UNDEFINED in \fIindex\fP, and \fIstatus\fP is undefined. -.sp -The array may contain null or inactive handles. If the array contains no active handles then the call returns immediately with \fIflag\fP = true, \fIindex\fP = MPI_UNDEFINED, and an empty \fIstatus\fP. -.sp -If the array of requests contains active handles then the execution of -MPI_Testany(count, array_of_requests, index, status) has the same effect as the execution of MPI_Test(&\fIarray_of_requests[i\fP], \fIflag\fP, \fIstatus\fP), for \fIi\fP=0,1,...,count-1, in some arbitrary order, until one call returns \fIflag\fP = true, or all fail. In the former case, \fIindex\fP is set to the last value of \fIi\fP, and in the latter case, it is set to MPI_UNDEFINED. MPI_Testany with an array containing one active entry is equivalent to MPI_Test. -.sp -If your application does not need to examine the \fIstatus\fP field, you can save resources by using the predefined constant MPI_STATUS_IGNORE as a special value for the \fIstatus\fP argument. - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for -I/O function errors. The error handler may be changed with -MPI_Comm_set_errhandler, MPI_File_set_errhandler, or -MPI_Win_set_errhandler (depending on the type of MPI handle that -generated the request); the predefined error handler MPI_ERRORS_RETURN -may be used to cause error values to be returned. Note that MPI does -not guarantee that an MPI program can continue past an error. -.sp -Note that per MPI-1 section 3.2.5, MPI errors on requests passed -to MPI_TESTANY do not set the status.MPI_ERROR field in the returned -status. The error code is passed to the back-end error handler and -may be passed back to the caller through the return value of -MPI_TESTANY if the back-end error handler returns it. The pre-defined -MPI error handler MPI_ERRORS_RETURN exhibits this behavior, for -example. - -.SH SEE ALSO -.ft R -.sp -MPI_Comm_set_errhandler -.br -MPI_File_set_errhandler -.br -MPI_Test -.br -MPI_Testall -.br -MPI_Testsome -.br -MPI_Wait -.br -MPI_Waitall -.br -MPI_Waitany -.br -MPI_Waitsome -.br -MPI_Win_set_errhandler -.br - diff --git a/ompi/mpi/man/man3/MPI_Testsome.3in b/ompi/mpi/man/man3/MPI_Testsome.3in deleted file mode 100644 index 4deb07dd712..00000000000 --- a/ompi/mpi/man/man3/MPI_Testsome.3in +++ /dev/null @@ -1,143 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2011-2015 Cisco Systems, Inc. All rights reserved. -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Testsome 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Testsome\fP \- Tests for completion of one or more previously initiated communications in a list. - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -int MPI_Testsome(int \fIincount\fP, MPI_Request \fIarray_of_requests[]\fP, - int\fI *outcount\fP, int\fI array_of_indices[]\fP, - MPI_Status\fI array_of_statuses[]\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_TESTSOME(\fIINCOUNT, ARRAY_OF_REQUESTS, OUTCOUNT, - ARRAY_OF_INDICES, ARRAY_OF_STATUSES, IERROR\fP) - INTEGER \fIINCOUNT, ARRAY_OF_REQUESTS(*)\fP - INTEGER \fIOUTCOUNT, ARRAY_OF_INDICES(*)\fP - INTEGER \fIARRAY_OF_STATUSES(MPI_STATUS_SIZE,*), IERROR\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Testsome(\fIincount\fP, \fIarray_of_requests\fP, \fIoutcount\fP, \fIarray_of_indices\fP, - \fIarray_of_statuses\fP, \fIierror\fP) - INTEGER, INTENT(IN) :: \fIincount\fP - TYPE(MPI_Request), INTENT(INOUT) :: \fIarray_of_requests(incount)\fP - INTEGER, INTENT(OUT) :: \fIoutcount\fP, \fIarray_of_indices(*)\fP - TYPE(MPI_Status) :: \fIarray_of_statuses(*)\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETERS -.ft R -.TP 1i -incount -Length of array_of_requests (integer). -.TP 1i -array_of_requests -Array of requests (array of handles). - -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -outcount -Number of completed requests (integer). -.TP 1i -array_of_indices -Array of indices of operations that completed (array of integers). -.TP 1i -array_of_statuses -Array of status objects for operations that completed (array of status). -.ft R -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -Behaves like MPI_Waitsome, except that it returns immediately. -.sp -Returns in outcount the number of requests from the list -array_of_requests that have completed. Returns in the first outcount -locations of the array array_of_indices the indices of these -operations (index within the array array_of_requests; the array is -indexed from 0 in C and from 1 in Fortran). Returns in the first -outcount locations of the array array_of_status the status for these -completed operations. If a request that completed was allocated by a -nonblocking communication call, then it is deallocated, and the -associated handle is set to MPI_REQUEST_NULL. -.sp -If no operation has completed it returns outcount = 0. If there is no -active handle in the list, it returns outcount = MPI_UNDEFINED. -.sp -MPI_Testsome is a local operation, which returns immediately, whereas MPI_Waitsome blocks until a communication completes, if it was passed a list that contains at least one active handle. Both calls fulfill a fairness requirement: If a request for a receive repeatedly appears in a list of requests passed to MPI_Waitsome or MPI_Testsome, and a matching send has been posted, then the receive will eventually succeed unless the send is satisfied by another receive; send requests also fulfill this fairness requirement. -.sp -Errors that occur during the execution of MPI_Testsome are handled as for -MPI_Waitsome. -.sp -If your application does not need to examine the \fIarray_of_statuses\fP field, you can save resources by using the predefined constant MPI_STATUSES_IGNORE can be used as a special value for the \fIarray_of_statuses\fP argument. - -.SH NOTES -The use of MPI_Testsome is likely to be more -efficient than the use of MPI_Testany. The former returns information on all completed communications; with the latter, a new call is required for each communication that completes. -.sp -A server with multiple clients can use MPI_Waitsome so as not to starve any client. Clients send messages to the server with service requests. The server calls MPI_Waitsome with one receive request for each client, then handles all receives that have completed. If a call to MPI_Waitany is used instead, then one client could starve while requests from another client always sneak in first. - -.SH ERRORS -For each invocation of MPI_Testsome, if one or more requests generate -an MPI error, only the \fIfirst\fP MPI request that caused an -error will be passed to its corresponding error handler. No other -error handlers will be invoked (even if multiple requests generated -errors). However, \fIall\fP requests that generate an error -will have a relevant error code set in the corresponding -status.MPI_ERROR field (unless MPI_STATUSES_IGNORE was used). -.sp -The default error handler aborts the MPI job, except for I/O function -errors. The error handler may be changed with MPI_Comm_set_errhandler, -MPI_File_set_errhandler, or MPI_Win_set_errhandler (depending on the -type of MPI handle that generated the MPI request); the predefined -error handler MPI_ERRORS_RETURN may be used to cause error values to -be returned. Note that MPI does not guarantee that an MPI program can -continue past an error. -.sp -If the invoked error handler allows MPI_Testsome to return to the -caller, the value MPI_ERR_IN_STATUS will be returned in the C and -Fortran bindings. - -.SH SEE ALSO -.ft R -.sp -MPI_Comm_set_errhandler -.br -MPI_File_set_errhandler -.br -MPI_Test -.br -MPI_Testall -.br -MPI_Testany -.br -MPI_Wait -.br -MPI_Waitall -.br -MPI_Waitany -.br -MPI_Waitsome -.br -MPI_Win_set_errhandler -.br - diff --git a/ompi/mpi/man/man3/MPI_Topo_test.3in b/ompi/mpi/man/man3/MPI_Topo_test.3in deleted file mode 100644 index d53552a3833..00000000000 --- a/ompi/mpi/man/man3/MPI_Topo_test.3in +++ /dev/null @@ -1,77 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Topo_test 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Topo_test\fP \- Determines the type of topology (if any) associated with a communicator. - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -int MPI_Topo_test(MPI_Comm \fIcomm\fP, int\fI *top_type\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_TOPO_TEST(\fICOMM, TOP_TYPE, IERROR\fP) - INTEGER \fICOMM, TOP_TYPE, IERROR\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Topo_test(\fIcomm\fP, \fIstatus\fP, \fIierror\fP) - TYPE(MPI_Comm), INTENT(IN) :: \fIcomm\fP - INTEGER, INTENT(OUT) :: \fIstatus\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - \fIDOUBLE PRECISION MPI_Wtick(\fP) - \fIDOUBLE PRECISION MPI_Wtime(\fP) - -.fi -.SH INPUT PARAMETER -.ft R -.TP 1i -comm -Communicator (handle). - -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -top_type -Topology type of communicator comm (choice). -.ft R -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -The function MPI_Topo_test returns the type of topology that is assigned to a communicator. -.sp -The output value \fItop_type\fP is one of the following: -.sp -.nf - MPI_GRAPH graph topology - MPI_CART Cartesian topology - MPI_DIST_GRAPH distributed graph topology - MPI_UNDEFINED no topology - -.fi -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for I/O function errors. The error handler may be changed with MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN may be used to cause error values to be returned. Note that MPI does not guarantee that an MPI program can continue past an error. - -.SH SEE ALSO -MPI_Graph_create -.br -MPI_Cart_create - diff --git a/ompi/mpi/man/man3/MPI_Type_c2f.3in b/ompi/mpi/man/man3/MPI_Type_c2f.3in deleted file mode 100644 index a13fce697dd..00000000000 --- a/ompi/mpi/man/man3/MPI_Type_c2f.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/MPI_Comm_f2c.3 diff --git a/ompi/mpi/man/man3/MPI_Type_commit.3in b/ompi/mpi/man/man3/MPI_Type_commit.3in deleted file mode 100644 index 2e373d995bb..00000000000 --- a/ompi/mpi/man/man3/MPI_Type_commit.3in +++ /dev/null @@ -1,68 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Type_commit 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Type_commit\fP \- Commits a data type. - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -int MPI_Type_commit(MPI_Datatype *\fIdatatype\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_TYPE_COMMIT(\fIDATATYPE, IERROR\fP) - INTEGER \fIDATATYPE, IERROR\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Type_commit(\fIdatatype\fP, \fIierror\fP) - TYPE(MPI_Datatype), INTENT(INOUT) :: \fIdatatype\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETER -.ft R -.TP 1i -datatype -Data type (handle). - -.SH OUTPUT PARAMETER -.ft R -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -The commit operation commits the data type. A data type is the formal description of a communication buffer, not the content of that buffer. After a data type has been committed, it can be repeatedly reused to communicate the changing content of a buffer or, indeed, the content of different buffers, with different starting addresses. -.sp -\fBExample:\fP The following Fortran code fragment gives examples of using MPI_Type_commit. -.sp -.nf - INTEGER type1, type2 - CALL MPI_TYPE_CONTIGUOUS(5, MPI_REAL, type1, ierr) - ! new type object created - CALL MPI_TYPE_COMMIT(type1, ierr) - ! now type1 can be used for communication -.fi -.sp -If the data type specified in \fIdatatype\fP is already committed, it is equivalent to a no-op. - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for I/O function errors. The error handler may be changed with MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN may be used to cause error values to be returned. Note that MPI does not guarantee that an MPI program can continue past an error. - diff --git a/ompi/mpi/man/man3/MPI_Type_contiguous.3in b/ompi/mpi/man/man3/MPI_Type_contiguous.3in deleted file mode 100644 index 3f0eec18efb..00000000000 --- a/ompi/mpi/man/man3/MPI_Type_contiguous.3in +++ /dev/null @@ -1,94 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Type_contiguous 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Type_contiguous\fP \- Creates a contiguous datatype. - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -int MPI_Type_contiguous(int \fIcount\fP, MPI_Datatype\fI oldtype\fP, - MPI_Datatype\fI *newtype\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_TYPE_CONTIGUOUS(\fICOUNT, OLDTYPE, NEWTYPE, IERROR\fP) - INTEGER \fICOUNT, OLDTYPE, NEWTYPE, IERROR\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Type_contiguous(\fIcount\fP, \fIoldtype\fP, \fInewtype\fP, \fIierror\fP) - INTEGER, INTENT(IN) :: \fIcount\fP - TYPE(MPI_Datatype), INTENT(IN) :: \fIoldtype\fP - TYPE(MPI_Datatype), INTENT(OUT) :: \fInewtype\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETERS -.ft R -.TP 1i -count -Replication count (nonnegative integer). -.TP 1i -oldtype -Old datatype (handle). -.sp -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -newtype -New datatype (handle). -.ft R -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -The simplest datatype constructor is MPI_Type_contiguous, which allows replication of a datatype into contiguous locations. -.sp -\fInewtype\fP is the datatype obtained by concatenating \fIcount\fP copies of \fIoldtype\fP. Concatenation is defined using the extent of \fIoldtype\fP as the size of the concatenated copies. -.sp -\fBExample:\fR Let oldtype have type map {(double, 0), (char, 8)}, with extent 16, and let count = 3. The type map of the datatype returned by newtype is -.sp -.nf - {(double, 0), (char, 8), (double, 16), (char, 24), - (double, 32), (char, 40)]; -.fi -.sp -i.e., alternating double and char elements, with displacements 0, 8, 16, 24, 32, 40. -.sp -In general, assume that the type map of oldtype is -.sp -.nf - {(type(0), disp(0)),...,(type(n-1), disp(n-1))}, -.fi -.sp -with extent ex. Then newtype has a type map with count times n entries defined by: -.sp -.nf - {(type(0), disp(0)), ...,(type(n-1), disp(n-1)), - (type(0), disp(0) + ex), ...,(type(n-1), - disp(n-1) + ex), ...,(type(0), disp(0) + ex * (count - 1)), - ...,(type(n-1), disp(n-1) + ex * (count - 1))}. -.fi -.sp -For more information about derived datatypes, see Section 3.12 of the MPI-1 Standard. - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for I/O function errors. The error handler may be changed with MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN may be used to cause error values to be returned. Note that MPI does not guarantee that an MPI program can continue past an error. - diff --git a/ompi/mpi/man/man3/MPI_Type_create_darray.3in b/ompi/mpi/man/man3/MPI_Type_create_darray.3in deleted file mode 100644 index 4cbbea129c7..00000000000 --- a/ompi/mpi/man/man3/MPI_Type_create_darray.3in +++ /dev/null @@ -1,150 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2013 Los Alamos National Security, LLC. All rights reserved. -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Type_create_darray 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Type_create_darray\fP \- Creates a distributed array datatype; - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -int MPI_Type_create_darray(int \fIsize\fP, int \fIrank\fP, int \fIndims\fP, - const int \fIarray_of_gsizes\fP[], const int \fIarray_of_distribs\fP[], - const int \fIarray_of_dargs\fP[], const int \fIarray_of_psizes\fP[], - int \fIorder\fP, MPI_Datatype \fIoldtype\fP, MPI_Datatype \fI*newtype\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_TYPE_CREATE_DARRAY(\fISIZE, RANK, NDIMS, ARRAY_OF_GSIZES, - ARRAY_OF_DISTRIBS, ARRAY_OF_DARGS, ARRAY_OF_PSIZES, ORDER, - OLDTYPE, NEWTYPE, IERROR\fP) - - INTEGER \fISIZE, RANK, NDIMS, ARRAY_OF_GSIZES(*), ARRAY_OF_DISTRIBS(*), - ARRAY_OF_DARGS(*), ARRAY_OF_PSIZES(*), ORDER, OLDTYPE, - NEWTYPE, IERROR\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Type_create_darray(\fIsize\fP, \fIrank\fP, \fIndims\fP, \fIarray_of_gsizes\fP, - \fIarray_of_distribs\fP, \fIarray_of_dargs\fP, \fIarray_of_psizes\fP, \fIorder,\fP - \fIoldtype\fP, \fInewtype\fP, \fIierror\fP) - INTEGER, INTENT(IN) :: \fIsize\fP, \fIrank\fP, \fIndims\fP, \fIarray_of_gsizes(ndims),\fP - \fIarray_of_distribs(ndims)\fP, \fIarray_of_dargs(ndims),\fP - \fIarray_of_psizes(ndims)\fP, \fIorder\fP - TYPE(MPI_Datatype), INTENT(IN) :: \fIoldtype\fP - TYPE(MPI_Datatype), INTENT(OUT) :: \fInewtype\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETERS -.ft R -.TP 1i -size -Size of process group (positive integer). -.TP 1i -rank -Rank in process group (nonnegative integer). -.TP 1i -ndims -Number of array dimensions as well as process grid dimensions (positive integer). -.sp -.TP 1i -array_of_gsizes -Number of elements of type \fIoldtype\fP in each dimension of global array (array of positive integers). -.sp -.TP 1i -array_of_distribs -Distribution of array in each dimension (array of state). -.TP 1i -array_of_dargs -Distribution argument in each dimension (array of positive integers). -.sp -.TP 1i -array_of_psizes -Size of process grid in each dimension (array of positive integers). -.sp -.TP 1i -order -Array storage order flag (state). -.TP 1i -oldtype -Old data type (handle). - -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -newtype -New data type (handle). -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R - -MPI_Type_create_darray can be used to generate the data types corresponding to the distribution of an ndims-dimensional array of \fIoldtype\fP elements onto an \fIndims\fP-dimensional grid of logical processes. Unused dimensions of \fIarray_of_psizes\fP should be set to 1. For a call to MPI_Type_create_darray to be correct, the equation -.sp -.nf - \fIndims\fP-1 - pi \fIarray_of_psizes[i]\fP = \fIsize\fP - \fIi\fP=0 - -.fi -.sp -must be satisfied. The ordering of processes in the process grid is assumed to be row-major, as in the case of virtual Cartesian process topologies in MPI-1. -.sp -Each dimension of the array can be distributed in one of three ways: -.sp -.nf -- MPI_DISTRIBUTE_BLOCK - Block distribution -- MPI_DISTRIBUTE_CYCLIC - Cyclic distribution -- MPI_DISTRIBUTE_NONE - Dimension not distributed. -.fi -.sp -The constant MPI_DISTRIBUTE_DFLT_DARG specifies a default distribution argument. The distribution argument for a dimension that is not distributed is ignored. For any dimension \fIi\fP in which the distribution is MPI_DISTRIBUTE_BLOCK, it erroneous to specify \fIarray_of_dargs[i]\fP \fI*\fP \fIarray_of_psizes[i]\fP < \fIarray_of_gsizes[i]\fP. -.sp -For example, the HPF layout ARRAY(CYCLIC(15)) corresponds to MPI_DISTRIBUTE_CYCLIC with a distribution argument of 15, and the HPF layout ARRAY(BLOCK) corresponds to MPI_DISTRIBUTE_BLOCK with a distribution argument of MPI_DISTRIBUTE_DFLT_DARG. -.sp -The \fIorder\fP argument is used as in MPI_TYPE_CREATE_SUBARRAY to specify the storage order. Therefore, arrays described by this type constructor may be stored in Fortran (column-major) or C (row-major) order. Valid values for order are MPI_ORDER_FORTRAN and MPI_ORDER_C. -.sp -This routine creates a new MPI data type with a typemap defined in terms of a function called "cyclic()" (see below). -.sp -Without loss of generality, it suffices to define the typemap for the MPI_DISTRIBUTE_CYCLIC case where MPI_DISTRIBUTE_DFLT_DARG is not used. -.sp -MPI_DISTRIBUTE_BLOCK and MPI_DISTRIBUTE_NONE can be reduced to the MPI_DISTRIBUTE_CYCLIC case for dimension \fIi\fP as follows. -.sp -MPI_DISTRIBUTE_BLOCK with \fIarray_of_dargs[i]\fP equal to MPI_DISTRIBUTE_DFLT_DARG is equivalent to MPI_DISTRIBUTE_CYCLIC with \fIarray_of_dargs[i]\fP set to -.sp -.nf - (\fIarray_of_gsizes[i]\fP + \fIarray_of_psizes[i]\fP - 1)/\fIarray_of_psizes[i]\fP -.fi -.sp -If \fIarray_of_dargs[i]\fP is not MPI_DISTRIBUTE_DFLT_DARG, then MPI_DISTRIBUTE_BLOCK and DISTRIBUTE_CYCLIC are equivalent. -.sp -MPI_DISTRIBUTE_NONE is equivalent to MPI_DISTRIBUTE_CYCLIC with \fIarray_of_dargs[i]\fP set to \fIarray_of_gsizes[i]\fP. -.sp -Finally, MPI_DISTRIBUTE_CYCLIC with \fIarray_of_dargs[i]\fP equal to MPI_DISTRIBUTE_DFLT_DARG is equivalent to MPI_DISTRIBUTE_CYCLIC with \fIarray_of_dargs[i]\fP set to 1. -.sp - -.SH NOTES -.ft R -For both Fortran and C arrays, the ordering of processes in the process grid is assumed to be row-major. This is consistent with the ordering used in virtual Cartesian process topologies in MPI-1. To create such virtual process topologies, or to find the coordinates of a process in the process grid, etc., users may use the corresponding functions provided in MPI-1. - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for I/O function errors. The error handler may be changed with MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN may be used to cause error values to be returned. Note that MPI does not guarantee that an MPI program can continue past an error. - - diff --git a/ompi/mpi/man/man3/MPI_Type_create_f90_complex.3in b/ompi/mpi/man/man3/MPI_Type_create_f90_complex.3in deleted file mode 100644 index e74b5cda97f..00000000000 --- a/ompi/mpi/man/man3/MPI_Type_create_f90_complex.3in +++ /dev/null @@ -1,140 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Type_create_f90_complex 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" - -.SH NAME -.nf -\fBMPI_Type_create_f90_complex\fP \- Returns a bounded MPI complex datatype - -.fi -.SH SYNTAX -.ft R - -.SH C Syntax -.nf -#include -int MPI_Type_create_f90_complex(int \fIp\fP, int \fIr\fP, - MPI_Datatype *\fInewtype\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_TYPE_CREATE_F90_COMPLEX (\fIP, R, NEWTYPE, IERROR\fP) - INTEGER \fIP, R, NEWTYPE, IERROR\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Type_create_f90_complex(\fIp\fP, \fIr\fP, \fInewtype\fP, \fIierror\fP) - INTEGER, INTENT(IN) :: \fIp\fP, \fIr\fP - TYPE(MPI_Datatype), INTENT(OUT) :: \fInewtype\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETERS -.ft R -.TP 1i -p -Precision, in decimal digits (integer). -.TP 1i -r -Decimal exponent range (integer). - -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -newtype -New data type (handle). -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -This function provides a way to declare KIND-parameterized COMPLEX MPI -datatypes. The arguments are interpreted in a similar fashion to the -F90 function SELECTED_REAL_KIND. The parameters \fIp\fP and \fIr\fP -must be scalar integers. The argument \fIp\fP represents the required -level of numerical precision, in decimal digits. The \fIr\fP parameter -indicates the range of exponents desired: the returned datatype will -have at least one exponent between \+\fIr\fP and \-\fIr\fP (inclusive). -.sp -Either \fIp\fP or \fIr\fP, but not both, may be omitted from calls to -SELECTED_REAL_KIND. Similarly, either argument to -MPI_Type_create_f90_complex may be set to MPI_UNDEFINED. - -.SH NOTES -.ft R -It is erroneous to supply values for \fIp\fP and \fIr\fP not supported by -the compiler. -.sp -The Fortran function SELECTED_REAL_KIND maps a large number of -(\fIp,r\fP) pairs to a much smaller number of KIND parameters -supported by the compiler. KIND parameters are not specified by the -language and are not portable. From the point of view of the language, -variables of the same base type and KIND parameter are equivalent, -even if their KIND parameters were generated by different (\fIp,r\fP) -arguments to SELECTED_REAL_KIND. However, to help facilitate -interoperability in a heterogeneous environment, equivalency is more -strictly defined for datatypes returned by -MPI_Type_create_f90_complex. Two MPI datatypes, each generated by this -function, will match if and only if they have identical values for -both \fIp\fP and \fIr\fP. -.sp -The interaction between the datatypes returned by this function and -the external32 data representation \- used by MPI_Pack_external, -MPI_Unpack_external, and many MPI_File functions \- is subtle. The -external32 representation of returned datatypes is as follows. -.sp -.nf - if (\fIp\fP > 33) and/or (\fIr\fP > 4931): - external32 size = n/a (undefined) - else if (\fIp\fP > 15) and/or (\fIr\fP > 307): - external32 size = 32 - else if (\fIp\fP > 6) and/or (\fIr\fP > 37): - external32 size = 16 - else: - external32 size = 8 -.fi -.sp -If the external32 representation of a datatype is undefined, so are -the results of using that datatype in operations that require the -external32 format. Care should be taken not to use incompatible -datatypes indirectly, e.g., as part of another datatype or through a -duplicated datatype, in these functions. -.sp -If a variable is declared specifying a nondefault KIND value that was -not obtained with SELECTED_REAL_KIND (i.e., \fIp\fP and/or \fIr\fP are -unknown), the only way to obtain a matching MPI datatype is to use the -functions MPI_Sizeof and MPI_Type_match_size. - -.SH ERRORS -.ft R -Almost all MPI routines return an error value; C routines as -the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for -I/O function errors. The error handler may be changed with -MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN -may be used to cause error values to be returned. Note that MPI does not -guarantee that an MPI program can continue past an error. -.sp -See the MPI man page for a full list of MPI error codes. - -.SH SEE ALSO -.ft R -.nf -MPI_Pack_external -MPI_Sizeof -MPI_Type_match_size -MPI_Unpack_external -SELECTED_REAL_KIND - diff --git a/ompi/mpi/man/man3/MPI_Type_create_f90_integer.3in b/ompi/mpi/man/man3/MPI_Type_create_f90_integer.3in deleted file mode 100644 index b0cacf9c261..00000000000 --- a/ompi/mpi/man/man3/MPI_Type_create_f90_integer.3in +++ /dev/null @@ -1,133 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Type_create_f90_integer 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" - -.SH NAME -.nf -\fBMPI_Type_create_f90_integer\fP \- Returns a bounded MPI integer datatype - -.fi -.SH SYNTAX -.ft R - -.SH C Syntax -.nf -#include -int MPI_Type_create_f90_integer(int \fIr\fP, MPI_Datatype *\fInewtype\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_TYPE_CREATE_F90_INTEGER (\fIR, NEWTYPE, IERROR\fP) - INTEGER \fIR, NEWTYPE, IERROR\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Type_create_f90_integer(\fIr\fP, \fInewtype\fP, \fIierror\fP) - INTEGER, INTENT(IN) :: \fIr\fP - TYPE(MPI_Datatype), INTENT(OUT) :: \fInewtype\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETER -.ft R -.TP 1i -r -Precision, in decimal digits (integer). - -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -newtype -New data type (handle). -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -This function provides a way to declare KIND-parameterized INTEGER MPI -datatypes. The argument is interpreted in a similar fashion to the F90 -function SELECTED_INT_KIND: \fIr\fP must be a scalar integer, and -represents the desired level of numerical precision, in decimal -digits. - -.SH NOTES -.ft R -It is erroneous to supply a value for \fIr\fP not supported by the -compiler. -.sp -The Fortran function SELECTED_INT_KIND maps a large number of \fIr\fP -values to a much smaller number of KIND parameters supported by the -compiler. KIND parameters are not specified by the language and are -not portable. From the point of view of the language, variables of the -same base type and KIND parameter are equivalent, even if their KIND -parameters were generated by different \fIr\fP arguments to -SELECTED_INT_KIND. However, to help facilitate interoperability in a -heterogeneous environment, equivalency is more strictly defined for -datatypes returned by MPI_Type_create_f90_integer. Two MPI datatypes, -each generated by this function, will match if and only if they have -identical values for \fIr\fP. -.sp -The interaction between the datatypes returned by this function and -the external32 data representation \- used by MPI_Pack_external, -MPI_Unpack_external and many MPI_File functions \- is subtle. The -external32 representation of returned datatypes is as follows. -.sp -.nf - if (\fIr\fP > 38): - external32 size = n/a (undefined) - else if (\fIr\fP > 18): - external32 size = 16 - else if (\fIr\fP > 9): - external32 size = 8 - else if (\fIr\fP > 4): - external32 size = 4 - else if (\fIr\fP > 2): - external32 size = 2 - else: - external32 size = 1 -.fi -.sp -If the external32 representation of a datatype is undefined, so are -the results of using that datatype in operations that require the -external32 format. Care should be taken not to use incompatible -datatypes indirectly, e.g., as part of another datatype or through a -duplicated datatype, in these functions. -.sp -If a variable is declared specifying a nondefault KIND value that was -not obtained with SELECTED_INT_KIND (i.e., \fIr\fP is unknown), the -only way to obtain a matching MPI datatype is to use the functions -MPI_Sizeof and MPI_Type_match_size. - -.SH ERRORS -.ft R -Almost all MPI routines return an error value; C routines as -the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for -I/O function errors. The error handler may be changed with -MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN -may be used to cause error values to be returned. Note that MPI does not -guarantee that an MPI program can continue past an error. -.sp -See the MPI man page for a full list of MPI error codes. - -.SH SEE ALSO -.ft R -.nf -MPI_Pack_external -MPI_Sizeof -MPI_Type_match_size -MPI_Unpack_external -SELECTED_INT_KIND - diff --git a/ompi/mpi/man/man3/MPI_Type_create_f90_real.3in b/ompi/mpi/man/man3/MPI_Type_create_f90_real.3in deleted file mode 100644 index b074399f186..00000000000 --- a/ompi/mpi/man/man3/MPI_Type_create_f90_real.3in +++ /dev/null @@ -1,139 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Type_create_f90_real 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" - -.SH NAME -.nf -\fBMPI_Type_create_f90_real\fP \- Returns a bounded MPI real datatype - -.fi -.SH SYNTAX -.ft R - -.SH C Syntax -.nf -#include -int MPI_Type_create_f90_real(int \fIp\fP, int \fIr\fP, MPI_Datatype *\fInewtype\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_TYPE_CREATE_F90_REAL (\fIP, R, NEWTYPE, IERROR\fP) - INTEGER \fIP, R, NEWTYPE, IERROR\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Type_create_f90_real(\fIp\fP, \fIr\fP, \fInewtype\fP, \fIierror\fP) - INTEGER, INTENT(IN) :: \fIp\fP, \fIr\fP - TYPE(MPI_Datatype), INTENT(OUT) :: \fInewtype\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETERS -.ft R -.TP 1i -p -Precision, in decimal digits (integer). -.TP 1i -r -Decimal exponent range (integer). - -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -newtype -New data type (handle). -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -This function provides a way to declare KIND-parameterized REAL MPI -datatypes. The arguments are interpreted in a similar fashion to the -F90 function SELECTED_REAL_KIND. The parameters \fIp\fP and \fIr\fP -must be scalar integers. The argument \fIp\fP represents the required -level of numerical precision, in decimal digits. The \fIr\fP parameter -indicates the range of exponents desired: the returned datatype will -have at least one exponent between \+\fIr\fP and \-\fIr\fP (inclusive). -.sp -Either \fIp\fP or \fIr\fP, but not both, may be omitted from calls to -SELECTED_REAL_KIND. Similarly, either argument to -MPI_Type_create_f90_real may be set to MPI_UNDEFINED. - -.SH NOTES -.ft R -It is erroneous to supply values for \fIp\fP and \fIr\fP not supported by -the compiler. -.sp -The Fortran function SELECTED_REAL_KIND maps a large number of -(\fIp,r\fP) pairs to a much smaller number of KIND parameters -supported by the compiler. KIND parameters are not specified by the -language and are not portable. From the point of view of the language, -variables of the same base type and KIND parameter are equivalent, -even if their KIND parameters were generated by different (\fIp,r\fP) -arguments to SELECTED_REAL_KIND. However, to help facilitate -interoperability in a heterogeneous environment, equivalency is more -strictly defined for datatypes returned by -MPI_Type_create_f90_real. Two MPI datatypes, each generated by this -function, will match if and only if they have identical values for -both \fIp\fP and \fIr\fP. -.sp -The interaction between the datatypes returned by this function and -the external32 data representation \- used by MPI_Pack_external, -MPI_Unpack_external and many MPI_File functions \- is subtle. The -external32 representation of returned datatypes is as follows. -.sp -.nf - if (\fIp\fP > 33) and/or (\fIr\fP > 4931): - external32 size = n/a (undefined) - else if (\fIp\fP > 15) and/or (\fIr\fP > 307): - external32 size = 16 - else if (\fIp\fP > 6) and/or (\fIr\fP > 37): - external32 size = 8 - else: - external32 size = 4 -.fi -.sp -If the external32 representation of a datatype is undefined, so are -the results of using that datatype in operations that require the -external32 format. Care should be taken not to use incompatible -datatypes indirectly, e.g., as part of another datatype or through a -duplicated datatype, in these functions. -.sp -If a variable is declared specifying a nondefault KIND value that was -not obtained with SELECTED_REAL_KIND (i.e., \fIp\fP and/or \fIr\fP are -unknown), the only way to obtain a matching MPI datatype is to use the -functions MPI_Sizeof and MPI_Type_match_size. - -.SH ERRORS -.ft R -Almost all MPI routines return an error value; C routines as -the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for -I/O function errors. The error handler may be changed with -MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN -may be used to cause error values to be returned. Note that MPI does not -guarantee that an MPI program can continue past an error. -.sp -See the MPI man page for a full list of MPI error codes. - -.SH SEE ALSO -.ft R -.nf -MPI_Pack_external -MPI_Sizeof -MPI_Type_match_size -MPI_Unpack_external -SELECTED_REAL_KIND - diff --git a/ompi/mpi/man/man3/MPI_Type_create_hindexed.3in b/ompi/mpi/man/man3/MPI_Type_create_hindexed.3in deleted file mode 100644 index 079131d99d6..00000000000 --- a/ompi/mpi/man/man3/MPI_Type_create_hindexed.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/MPI_Type_indexed.3 diff --git a/ompi/mpi/man/man3/MPI_Type_create_hindexed_block.3in b/ompi/mpi/man/man3/MPI_Type_create_hindexed_block.3in deleted file mode 100644 index eb17e26c190..00000000000 --- a/ompi/mpi/man/man3/MPI_Type_create_hindexed_block.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/MPI_Type_create_indexed_block.3 diff --git a/ompi/mpi/man/man3/MPI_Type_create_hvector.3in b/ompi/mpi/man/man3/MPI_Type_create_hvector.3in deleted file mode 100644 index 25a0e3bbae5..00000000000 --- a/ompi/mpi/man/man3/MPI_Type_create_hvector.3in +++ /dev/null @@ -1,96 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Type_create_hvector 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Type_create_hvector\fP \- Creates a vector (strided) data type with offset in bytes. - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -int MPI_Type_create_hvector(int \fIcount\fP, int \fIblocklength\fP, - MPI_Aint \fIstride\fP, MPI_Datatype \fIoldtype\fP, MPI_Datatype *\fInewtype\fP) - -.fi -.SH Fortran Syntax (see FORTRAN 77 NOTES) -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_TYPE_CREATE_HVECTOR(\fICOUNT, BLOCKLENGTH, STRIDE, OLDTYPE, - NEWTYPE, IERROR\fP) - - INTEGER \fICOUNT, BLOCKLENGTH, OLDTYPE, NEWTYPE, IERROR\fP - INTEGER(KIND=MPI_ADDRESS_KIND) \fISTRIDE\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Type_create_hvector(\fIcount\fP, \fIblocklength\fP, \fIstride\fP, \fIoldtype\fP, \fInewtype\fP, - \fIierror\fP) - INTEGER, INTENT(IN) :: \fIcount\fP, \fIblocklength\fP - INTEGER(KIND=MPI_ADDRESS_KIND), INTENT(IN) :: \fIstride\fP - TYPE(MPI_Datatype), INTENT(IN) :: \fIoldtype\fP - TYPE(MPI_Datatype), INTENT(OUT) :: \fInewtype\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETERS -.ft R -.TP 1i -count -Number of blocks (nonnegative integer). -.TP 1i -blocklength -Number of elements in each block (nonnegative integer). -.TP 1i -stride -Number of bytes between start of each block (integer). -.TP 1i -oldtype -Old data type (handle). - -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -newtype -New data type (handle). -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -MPI_Type_create_hvector creates a vector (strided) data type with offset in bytes. -.PP -NOTE \- This routine replaces MPI_Type_hvector, which is deprecated. See the man page MPI_Type_hvector(3) for information about that routine. - -.SH FORTRAN 77 NOTES -.ft R -The MPI standard prescribes portable Fortran syntax for -the \fISTRIDE\fP argument only for Fortran 90. FORTRAN 77 -users may use the non-portable syntax -.sp -.nf - INTEGER*MPI_ADDRESS_KIND \fISTRIDE\fP -.fi -.sp -where MPI_ADDRESS_KIND is a constant defined in mpif.h -and gives the length of the declared integer in bytes. - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for I/O function errors. The error handler may be changed with MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN may be used to cause error values to be returned. Note that MPI does not guarantee that an MPI program can continue past an error. - -.SH SEE ALSO -.ft R -MPI_Type_hvector -.br -MPI_Type_vector -.br diff --git a/ompi/mpi/man/man3/MPI_Type_create_indexed_block.3in b/ompi/mpi/man/man3/MPI_Type_create_indexed_block.3in deleted file mode 100644 index ad4a92b0473..00000000000 --- a/ompi/mpi/man/man3/MPI_Type_create_indexed_block.3in +++ /dev/null @@ -1,99 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2013 Los Alamos National Security, LLC. All rights reserved. -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" Copyright (c) 2020 FUJITSU LIMITED. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Type_create_indexed_block 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Type_create_indexed_block, MPI_Type_create_hindexed_block\fP \- Creates an indexed data type with the same block length for all blocks. - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -int MPI_Type_create_indexed_block(int \fIcount\fP, int \fIblocklength\fP, const int \fIarray_of_displacements\fP[], MPI_Datatype \fIoldtype\fP, MPI_Datatype *\fInewtype\fP) - -int MPI_Type_create_hindexed_block(int \fIcount\fP, int \fIblocklength\fP, const MPI_Aint \fIarray_of_displacements\fP[], MPI_Datatype \fIoldtype\fP, MPI_Datatype *\fInewtype\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_TYPE_CREATE_INDEXED_BLOCK(\fICOUNT, BLOCKLENGTH, - ARRAY_OF_DISPLACEMENTS, OLDTYPE, NEWTYPE, IERROR\fP) - INTEGER \fICOUNT, BLOCKLENGTH, ARRAY_OF_DISPLACEMENTS(*), - OLDTYPE, NEWTYPE, IERROR \fP - -MPI_TYPE_CREATE_HINDEXED_BLOCK(\fICOUNT, BLOCKLENGTH, - ARRAY_OF_DISPLACEMENTS, OLDTYPE, NEWTYPE, IERROR\fP) - INTEGER \fICOUNT, BLOCKLENGTH, OLDTYPE, NEWTYPE\fP - INTEGER(KIND=MPI_ADDRESS_KIND) \fIARRAY_OF_DISPLACEMENTS(*)\fP - INTEGER \fIIERROR\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Type_create_indexed_block(\fIcount\fP, \fIblocklength\fP, \fIarray_of_displacements\fP, - \fIoldtype\fP, \fInewtype\fP, \fIierror\fP) - INTEGER, INTENT(IN) :: \fIcount\fP, \fIblocklength,\fP - \fIarray_of_displacements(count)\fP - TYPE(MPI_Datatype), INTENT(IN) :: \fIoldtype\fP - TYPE(MPI_Datatype), INTENT(OUT) :: \fInewtype\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -MPI_Type_create_hindexed_block(\fIcount\fP, \fIblocklength\fP, \fIarray_of_displacements\fP, - \fIoldtype\fP, \fInewtype\fP, \fIierror\fP) - INTEGER, INTENT(IN) :: \fIcount\fP, \fIblocklength\fP - INTEGER(KIND=MPI_ADDRESS_KIND), INTENT(IN) :: - \fIarray_of_displacements(count)\fP - TYPE(MPI_Datatype), INTENT(IN) :: \fIoldtype\fP - TYPE(MPI_Datatype), INTENT(OUT) :: \fInewtype\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETERS -.ft R -.TP 1i -count -Length of array of displacements (integer). -.TP 1i -blocklength -Size of block (integer). -.TP 1i -array_of_displacements -Array of displacements (array of integers). In units of the extent of \fIoldtype\fP for MPI_Type_create_indexed_block and bytes for MPI_Type_create_hindexed_block. -.TP 1i -oldtype -Old data type (handle). - -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -newtype -New data type (handle). -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -MPI_Type_create_indexed_block and MPI_Type_create_hindexed_block create an indexed data type with the same block length for all blocks. The only difference between the two functions is MPI_Type_create_indexed_block takes an array of displacements in units of the extent of \fIoldtype\fP while MPI_Type_create_hindexed_block takes displacements in bytes. - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for I/O function errors. The error handler may be changed with MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN may be used to cause error values to be returned. Note that MPI does not guarantee that an MPI program can continue past an error. - -.SH SEE ALSO -.ft R -.sp -MPI_Type_indexed -.br - diff --git a/ompi/mpi/man/man3/MPI_Type_create_keyval.3in b/ompi/mpi/man/man3/MPI_Type_create_keyval.3in deleted file mode 100644 index e678ed95c62..00000000000 --- a/ompi/mpi/man/man3/MPI_Type_create_keyval.3in +++ /dev/null @@ -1,127 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Type_create_keyval 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Type_create_keyval\fP \- Generates a new attribute key for caching on data types. - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -int MPI_Type_create_keyval(MPI_Type_copy_attr_function *\fItype_copy_attr_fn\fP, - MPI_Type_delete_attr_function *\fItype_delete_attr_fn\fP, - int *\fItype_keyval\fP, void *\fIextra_state\fP) - -.fi -.SH Fortran Syntax (see FORTRAN 77 NOTES) -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_TYPE_CREATE_KEYVAL(\fITYPE_COPY_ATTR_FN, TYPE_DELETE_ATTR_FN, - TYPE_KEYVAL, EXTRA_STATE, IERROR\fP) - EXTERNAL \fITYPE_COPY_ATTR_FN, TYPE_DELETE_ATTR_FN\fP - INTEGER \fITYPE_KEYVAL, IERROR \fP - INTEGER(KIND=MPI_ADDRESS_KIND) \fIEXTRA_STATE\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Type_create_keyval(\fItype_copy_attr_fn\fP, \fItype_delete_attr_fn\fP, \fItype_keyval\fP, - \fIextra_state\fP, \fIierror\fP) - PROCEDURE(MPI_Type_copy_attr_function) :: \fItype_copy_attr_fn\fP - PROCEDURE(MPI_Type_delete_attr_function) :: \fItype_delete_attr_fn\fP - INTEGER, INTENT(OUT) :: \fItype_keyval\fP - INTEGER(KIND=MPI_ADDRESS_KIND), INTENT(IN) :: \fIextra_state\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETERS -.ft R -.TP 1i -type_copy_attr_fn -Copy callback function for \fItype_keyval\fP (function). -.TP 1i -type_delete_attr_fn -Delete callback function for \fItype_keyval\fP (function). -.TP 1i -extra_state -Extra state for callback functions. - -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -type_keyval -Key value for future access (integer). -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -MPI_Type_create_keyval generates a new attribute key for caching on data types. This routine partially replaces MPI_Keyval_create. -.sp -The argument \fItype_copy_attr_fn\fP may be specified as MPI_TYPE_NULL_COPY_FN or MPI_TYPE_DUP_FN from C or Fortran. MPI_TYPE_NULL_COPY_FN is a function that does nothing other than returning \fIflag\fP = 0 and MPI_SUCCESS. MPI_TYPE_DUP_FN is a simple-minded copy function that sets \fIflag\fP = 1, returns the value of \fIattribute_val_in\fP in \fIattribute_val_out\fP, and returns MPI_SUCCESS. -.sp -The argument \fItype_delete_attr_fn\fP may be specified as MPI_TYPE_NULL_DELETE_FN from C or Fortran. MPI_TYPE_NULL_DELETE_FN is a function that does nothing beyond returning MPI_SUCCESS. -The C callback functions are: -.sp -.nf -typedef int MPI_Type_copy_attr_function(MPI_Datatype \fIoldtype\fP, - int \fItype_keyval\fP, void *\fIextra_state\fP, void *\fIattribute_val_in\fP, - void *\fIattribute_val_out\fP, int *\fIflag\fP); -.fi -and -.nf -typedef int MPI_Type_delete_attr_function(MPI_Datatype \fItype\fP, int \fItype_keyval\fP, - void *\fIattribute_val\fP, void *\fIextra_state\fP); -.fi -.sp -The Fortran callback functions are: -.sp -.nf -SUBROUTINE TYPE_COPY_ATTR_FN(\fIOLDTYPE, TYPE_KEYVAL, EXTRA_STATE, - ATTRIBUTE_VAL_IN, ATTRIBUTE_VAL_OUT, FLAG, IERROR\fP) - INTEGER \fIOLDTYPE, TYPE KEYVAL, IERROR\fP - INTEGER(KIND=MPI_ADDRESS_KIND) \fIEXTRA_STATE, - ATTRIBUTE_VAL_IN, ATTRIBUTE_VAL_OUT\fP - LOGICAL \fIFLAG\fP -.fi -and -.nf -SUBROUTINE TYPE_DELETE_ATTR_FN(\fITYPE, TYPE_KEYVAL, ATTRIBUTE_VAL, EXTRA_STATE, - IERROR\fP) - INTEGER \fITYPE, TYPE_KEYVAL, IERROR\fP - INTEGER(KIND=MPI_ADDRESS_KIND) \fIATTRIBUTE VAL, EXTRA_STATE\fP -.fi -.sp - -.SH FORTRAN 77 NOTES -.ft R -The MPI standard prescribes portable Fortran syntax for -the \fIEXTRA_STATE\fP argument only for Fortran 90. FORTRAN 77 -users may use the non-portable syntax -.sp -.nf - INTEGER*MPI_ADDRESS_KIND \fIEXTRA_STATE\fP -.fi -.sp -where MPI_ADDRESS_KIND is a constant defined in mpif.h -and gives the length of the declared integer in bytes. - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for I/O function errors. The error handler may be changed with MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN may be used to cause error values to be returned. Note that MPI does not guarantee that an MPI program can continue past an error. - -.SH SEE ALSO -.ft R -.sp -MPI_Type_free_keyval - diff --git a/ompi/mpi/man/man3/MPI_Type_create_resized.3in b/ompi/mpi/man/man3/MPI_Type_create_resized.3in deleted file mode 100644 index 392b677e043..00000000000 --- a/ompi/mpi/man/man3/MPI_Type_create_resized.3in +++ /dev/null @@ -1,92 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Type_create_resized 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Type_create_resized\fP \- Returns a new data type with new extent and upper and lower bounds. - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -int MPI_Type_create_resized(MPI_Datatype \fIoldtype\fP, MPI_Aint\fI lb\fP, - MPI_Aint \fIextent\fP, MPI_Datatype *\fInewtype\fP) - -.fi -.SH Fortran Syntax (see FORTRAN 77 NOTES) -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_TYPE_CREATE_RESIZED(\fIOLDTYPE, LB, EXTENT, NEWTYPE, IERROR\fP) - INTEGER \fIOLDTYPE, NEWTYPE, IERROR\fP - INTEGER(KIND=MPI_ADDRESS_KIND) \fILB, EXTENT\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Type_create_resized(\fIoldtype\fP, \fIlb\fP, \fIextent\fP, \fInewtype\fP, \fIierror\fP) - INTEGER(KIND=MPI_ADDRESS_KIND), INTENT(IN) :: \fIlb\fP, \fIextent\fP - TYPE(MPI_Datatype), INTENT(IN) :: \fIoldtype\fP - TYPE(MPI_Datatype), INTENT(OUT) :: \fInewtype\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETERS -.ft R -.TP 1i -oldtype -Input data type (handle). -.TP 1i -lb -New lower bound of data type (integer). -.TP 1i -extent -New extent of data type (integer). - -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -newtype -Output data type (handle). -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -MPI_Type_create_resized returns in \fInewtype\fP a handle to a new data type that is identical to \fIoldtype\fP, except that the lower bound of this new data type is set to be \fIlb\fP, and its upper bound is set to be \fIlb\fP + \fIextent\fP. Any previous \fIlb\fP and \fIub\fP markers are erased, and a new pair of lower bound and upper bound markers are put in the positions indicated by the \fIlb\fP and \fIextent\fP arguments. This affects the behavior of the data type when used in communication operations, with \fIcount\fP > 1, and when used in the construction of new derived data types. - -.SH FORTRAN 77 NOTES -.ft R -The MPI standard prescribes portable Fortran syntax for -the \fILB\fP and \fIEXTENT\fP arguments only for Fortran 90. FORTRAN 77 -users may use the non-portable syntax -.sp -.nf - INTEGER*MPI_ADDRESS_KIND \fILB\fP -or - INTEGER*MPI_ADDRESS_KIND \fIEXTENT\fP -.fi -.sp -where MPI_ADDRESS_KIND is a constant defined in mpif.h -and gives the length of the declared integer in bytes. - -.SH NOTE -.ft R -Use of MPI_Type_create_resized is strongly recommended over the old MPI-1 functions MPI_Type_extent and MPI_Type_lb. - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for I/O function errors. The error handler may be changed with MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN may be used to cause error values to be returned. Note that MPI does not guarantee that an MPI program can continue past an error. - -.SH SEE ALSO - -MPI_Type_get_extent - diff --git a/ompi/mpi/man/man3/MPI_Type_create_struct.3in b/ompi/mpi/man/man3/MPI_Type_create_struct.3in deleted file mode 100644 index 2e6e0d72d61..00000000000 --- a/ompi/mpi/man/man3/MPI_Type_create_struct.3in +++ /dev/null @@ -1,100 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2013 Los Alamos National Security, LLC. All rights reserved. -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Type_create_struct 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Type_create_struct\fP \- Creates a structured data type. - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -int MPI_Type_create_struct(int \fIcount\fP, int \fIarray_of_blocklengths\fP[], - const MPI_Aint \fIarray_of_displacements\fP[], const MPI_Datatype \fIarray_of_types\fP[], - MPI_Datatype *\fInewtype\fP) - -.fi -.SH Fortran Syntax (see FORTRAN 77 NOTES) -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_TYPE_CREATE_STRUCT(\fICOUNT, ARRAY_OF_BLOCKLENGTHS, - ARRAY_OF_DISPLACEMENTS, ARRAY_OF_TYPES, NEWTYPE, IERROR\fP) - INTEGER \fICOUNT, ARRAY_OF_BLOCKLENGTHS(*), ARRAY_OF_TYPES(*),\fP - INTEGER \fINEWTYPE, IERROR \fP - INTEGER(KIND=MPI_ADDRESS_KIND) \fIARRAY_OF_DISPLACEMENTS(*)\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Type_create_struct(\fIcount\fP, \fIarray_of_blocklengths\fP, - \fIarray_of_displacements\fP, \fIarray_of_types\fP, \fInewtype\fP, \fIierror\fP) - INTEGER, INTENT(IN) :: \fIcount\fP, \fIarray_of_blocklengths(count)\fP - INTEGER(KIND=MPI_ADDRESS_KIND), INTENT(IN) :: - \fIarray_of_displacements(count)\fP - TYPE(MPI_Datatype), INTENT(IN) :: \fIarray_of_types(count)\fP - TYPE(MPI_Datatype), INTENT(OUT) :: \fInewtype\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETERS -.ft R -.TP 1i -count -Number of blocks (integer) -- also number of entries in arrays \fIarray_of_types\fP, \fIarray_of_displacements\fP, and \fIarray_of_blocklengths\fP. -.TP 1i -array_of_blocklengths -Number of elements in each block (array of integers). -.TP 1i -array_of_displacements -Byte displacement of each block (array of integers). -.TP 1i -array_of_types -Type of elements in each block (array of handles to data-type objects). - -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -newtype -New data type (handle). -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -MPI_Type_create_struct creates a structured data type. This routine replaces MPI_Type_struct, which is now deprecated. -.PP -NOTE \- This routine replaces MPI_Type_struct, which is deprecated. See the man page MPI_Type_struct(3) for information about that routine. - -.SH FORTRAN 77 NOTES -.ft R -The MPI standard prescribes portable Fortran syntax for -the \fIARRAY_OF_DISPLACEMENTS\fP(*) argument only for Fortran 90. FORTRAN 77 -users may use the non-portable syntax -.sp -.nf - INTEGER*MPI_ADDRESS_KIND \fIARRAY_OF_DISPLACEMENTS\fP(*) -.fi -.sp -where MPI_ADDRESS_KIND is a constant defined in mpif.h -and gives the length of the declared integer in bytes. - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for I/O function errors. The error handler may be changed with MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN may be used to cause error values to be returned. Note that MPI does not guarantee that an MPI program can continue past an error. - -.SH SEE ALSO -.ft R -.sp -MPI_Type_struct -.br -MPI_Type_create_hindexed - diff --git a/ompi/mpi/man/man3/MPI_Type_create_subarray.3in b/ompi/mpi/man/man3/MPI_Type_create_subarray.3in deleted file mode 100644 index a41bf5e9547..00000000000 --- a/ompi/mpi/man/man3/MPI_Type_create_subarray.3in +++ /dev/null @@ -1,137 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2013 Los Alamos National Security, LLC. All rights reserved. -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Type_create_subarray 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Type_create_subarray\fP \- Creates a data type describing an \fIn\fP-dimensional subarray of an \fIn\fP-dimensional array. - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -int MPI_Type_create_subarray(int \fIndims\fP, const int \fIarray_of_sizes[]\fP, const int \fIarray_of_subsizes[]\fP, const int \fIarray_of_starts[]\fP, int \fIorder\fP, MPI_Datatype \fIoldtype\fP, MPI_Datatype \fI*newtype\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_TYPE_CREATE_SUBARRAY(\fINDIMS, ARRAY_OF_SIZES, ARRAY_OF_SUBSIZES, - ARRAY_OF_STARTS, ORDER, OLDTYPE, NEWTYPE, IERROR\fP) - - INTEGER \fINDIMS, ARRAY_OF_SIZES(*), ARRAY_OF_SUBSIZES(*), - ARRAY_OF_STARTS(*), ORDER, OLDTYPE, NEWTYPE, IERROR\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Type_create_subarray(\fIndims\fP, \fIarray_of_sizes\fP, \fIarray_of_subsizes\fP, - \fIarray_of_starts\fP, \fIorder\fP, \fIoldtype\fP, \fInewtype\fP, \fIierror\fP) - INTEGER, INTENT(IN) :: \fIndims\fP, \fIarray_of_sizes(ndims),\fP - \fIarray_of_subsizes(ndims)\fP, \fIarray_of_starts(ndims)\fP, \fIorder\fP - TYPE(MPI_Datatype), INTENT(IN) :: \fIoldtype\fP - TYPE(MPI_Datatype), INTENT(OUT) :: \fInewtype\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETERS -.ft R -.TP 1i -ndims -Number of array dimensions (positive integer). -.TP 1i -array_of_sizes -Number of elements of type \fIoldtype\fP in each dimension of the full array (array of positive integers). -.TP 1i -array_of_subsizes -Number of elements of type \fIoldtype\fP in each dimension of the subarray (array of positive integers). -.TP 1i -array_of_starts -Starting coordinates of the subarray in each dimension (array of nonnegative integers). -.TP 1i -order -Array storage order flag (state). -.TP 1i -oldtype -Array element data type (handle). - - -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -newtype -New data type (handle). -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -The subarray type constructor creates an MPI data type describing an \fIn\fP-dimensional subarray of an \fIn\fP-dimensional array. The subarray may be situated anywhere within the full array, and may be of any nonzero size up to the size of the larger array as long as it is confined within this array. This type constructor facilitates creating file types to access arrays distributed in blocks among processes to a single file that contains the global array. -.sp -This type constructor can handle arrays with an arbitrary number of dimensions and works for both C- and Fortran-ordered matrices (that is, row-major or column-major). Note that a C program may use Fortran order and a Fortran program may use C order. -.sp -The \fIndims\fP parameter specifies the number of dimensions in the full data array and gives the number of elements in \fIarray_of_sizes\fP, \fIarray_of_subsizes\fP, and \fIarray_of_starts\fP. -.sp -The number of elements of type \fIoldtype\fP in each dimension of the \fIn\fP-dimensional array and the requested subarray are specified by \fIarray_of_sizes\fP and \fIarray_of_subsizes\fP, respectively. For any dimension \fIi\fP, it is erroneous to specify \fIarray_of_subsizes[i]\fP < 1 or \fIarray_of_subsizes[i]\fP > \fIarray of sizes[i]\fP. -.sp -The \fIarray_of_starts\fP contains the starting coordinates of each dimension of the subarray. Arrays are assumed to be indexed starting from zero. For any dimension \fIi\fP, it is erroneous to specify -.sp -.nf -\fIarray_of_starts[i]\fP < 0 -.fi -.sp -or -.sp -.nf -\fIarray_of_starts[i]\fP > (\fIarray_of_sizes[i]\fP - \fIarray_of_subsizes[i]\fP). -.fi -.sp -The \fIorder\fP argument specifies the storage order for the subarray as well as the full array. It must be set to one of the following: -.sp -- MPI_ORDER_C: The ordering used by C arrays, (that is, row-major order) -.sp -- MPI_ORDER_FORTRAN: The ordering used by Fortran arrays, (that is, column-major order) -.sp -A \fIndims\fP-dimensional subarray (\fInewtype\fP) with no extra padding can be defined by the function Subarray() as follows: -.sp -.nf - newtype = Subarray(ndims, {size , size ,\..., size }, - 0 1 ndims-1 - {subsize , subsize , \..., subsize }, - 0 1 ndims-1 - {start , start , \..., start }, \fIoldtype\fP) - 0 1 ndims-1 -.fi -.sp -Let the typemap of \fIoldtype\fP have the form: -.sp -.nf - {(type , disp ), (type , disp ), \..., (type , disp )} - 0 0 1 1 n-1 n-1 -.fi -.sp -where type\fIi\fP is a predefined MPI data type, and let \fIex\fP be the extent of \fIoldtype\fP. -.sp -The Subarray() function is defined recursively in three equations on page 72 of the MPI-2 standard. -.sp -For an example use of MPI_Type_create_subarray in the context of I/O, see Section 9.9.2 of the MPI-2 standard. - - -.SH NOTES -.ft R -In a Fortran program with arrays indexed starting from 1, if the starting coordinate of a particular dimension of the subarray is \fIn\fP, then the entry in array of starts for that dimension is \fIn\fP-1. - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for I/O function errors. The error handler may be changed with MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN may be used to cause error values to be returned. Note that MPI does not guarantee that an MPI program can continue past an error. - - diff --git a/ompi/mpi/man/man3/MPI_Type_delete_attr.3in b/ompi/mpi/man/man3/MPI_Type_delete_attr.3in deleted file mode 100644 index b9f57103500..00000000000 --- a/ompi/mpi/man/man3/MPI_Type_delete_attr.3in +++ /dev/null @@ -1,72 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright (c) 2010-2014 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Type_delete_attr 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Type_delete_attr\fP \- Deletes a datatype-caching attribute value associated with a key. - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -int MPI_Type_delete_attr(MPI_Datatype \fItype\fP, int \fItype_keyval\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_TYPE_DELETE_ATTR(\fITYPE, TYPE_KEYVAL, IERROR\fP) - INTEGER \fITYPE, TYPE_KEYVAL, IERROR \fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Type_delete_attr(\fIdatatype\fP, \fItype_keyval\fP, \fIierror\fP) - TYPE(MPI_Datatype), INTENT(IN) :: \fIdatatype\fP - INTEGER, INTENT(IN) :: \fItype_keyval\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT/OUTPUT PARAMETER -.ft R -.TP 1i -type -Data type from which the attribute is deleted (handle).n - -.SH INPUT PARAMETER -.ft R -.TP 1i -type_keyval -Key value (integer). - -.SH OUTPUT PARAMETER -.ft R -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -MPI_Type_delete_attr deletes a datatype-caching attribute value associated with a key. This routines partially replaces MPI_Attr_delete, which is now deprecated. - - -.SH NOTES -Note that it is not defined by the MPI standard what happens if the -delete_fn callback invokes other MPI functions. In Open MPI, it is -not valid for delete_fn callbacks (or any of their children) to add or -delete attributes on the same object on which the delete_fn callback -is being invoked. - - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for I/O function errors. The error handler may be changed with MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN may be used to cause error values to be returned. Note that MPI does not guarantee that an MPI program can continue past an error. - diff --git a/ompi/mpi/man/man3/MPI_Type_dup.3in b/ompi/mpi/man/man3/MPI_Type_dup.3in deleted file mode 100644 index 631120cdb30..00000000000 --- a/ompi/mpi/man/man3/MPI_Type_dup.3in +++ /dev/null @@ -1,75 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright (c) 2010-2014 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Type_dup 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Type_dup\fP \- Duplicates a data type with associated key values. - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -int MPI_Type_dup(MPI_Datatype \fItype\fP, MPI_Datatype *\fInewtype\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_TYPE_DUP(\fITYPE, NEWTYPE, IERROR\fP) - INTEGER \fITYPE, NEWTYPE, IERROR \fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Type_dup(\fIoldtype\fP, \fInewtype\fP, \fIierror\fP) - TYPE(MPI_Datatype), INTENT(IN) :: \fIoldtype\fP - TYPE(MPI_Datatype), INTENT(OUT) :: \fInewtype\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETER -.ft R -.TP 1i -type -Data type (handle). - -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -newtype -Copy of \fItype\fP (handle). -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -MPI_Type_dup is a type constructor that duplicates the existing type with associated key values. For each key value, the respective copy callback function determines the attribute value associated with this key in the new communicator. One particular action that a copy callback may take is to delete the attribute from the new data type. Returns in \fInewtype\fP a new data type with exactly the same properties as \fItype\fP, as well as any copied cached information. The new data type has identical upper bound and lower bound and yields the same net result when fully decoded with the functions described in Section 8.6 of the MPI-2 standard. \fInewtype\fP has the same committed state as the old \fItype\fP. - - -.SH NOTES -Note that it is not defined by the MPI standard what happens if the -attribute copy callback invokes other MPI functions. In Open MPI, it -is not valid for attribute copy callbacks (or any of their children) -to add or delete attributes on the same object on which the attribute -copy callback is being invoked. - - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for I/O function errors. The error handler may be changed with MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN may be used to cause error values to be returned. Note that MPI does not guarantee that an MPI program can continue past an error. - -.SH SEE ALSO -.ft R -.sp -MPI_Type_create_keyval -.br - diff --git a/ompi/mpi/man/man3/MPI_Type_extent.3in b/ompi/mpi/man/man3/MPI_Type_extent.3in deleted file mode 100644 index 9cb673b3e6b..00000000000 --- a/ompi/mpi/man/man3/MPI_Type_extent.3in +++ /dev/null @@ -1,92 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Type_extent 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Type_extent\fP \- Returns the extent of a data type, the difference between the upper and lower bounds of the data type -- use of this routine is deprecated. - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -int MPI_Type_extent(MPI_Datatype \fIdatatype\fP, MPI_Aint\fI *extent\fP) - -.fi -.SH Fortran Syntax -.nf -INCLUDE 'mpif.h' -MPI_TYPE_EXTENT(\fIDATATYPE, EXTENT, IERROR\fP) - INTEGER \fIDATATYPE, EXTENT, IERROR\fP - - -.fi -.SH INPUT PARAMETER -.ft R -.TP 1i -datatype -Datatype (handle). -.sp -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -extent -Datatype extent (integer). -.sp -.ft R -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -Note that use of this routine is \fIdeprecated\fP as of MPI-2. Please use MPI_Type_get_extent instead. -.sp -MPI_Type_extent returns the extent of a data type, the difference between the upper and lower bounds of the data type. -.sp -In general, if -.sp -.nf - Typemap = {(type(0), disp(0)), ..., (type(n-1), disp(n-1))} -.fi -.sp -then the lower bound of Typemap is defined to be -.sp -.nf - ( min(j) disp(j) if no entry has - lb(Typemap)=( basic type lb - (min(j) {disp(j) such that type(j) = lb} otherwise - -.fi -.sp -Similarly, the upper bound of Typemap is defined to be -.sp -.nf - (max(j) disp(j) + sizeof(type(j)) + e if no entry has - ub(Typemap)=( basic type ub - (max(j) {disp(j) such that type(j) = ub} otherwise -.fi -.sp -Then -.sp -.nf - extent(Typemap) = ub(Typemap) - lb(Typemap) -.fi -.sp -If type(i) requires alignment to a byte address that is a multiple of k(i), then e is the least nonnegative increment needed to round extent(Typemap) to the next multiple of max(i) k(i). - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for I/O function errors. The error handler may be changed with MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN may be used to cause error values to be returned. Note that MPI does not guarantee that an MPI program can continue past an error. - -.SH SEE ALSO -.ft R -.sp -MPI_Type_get_extent -.br - diff --git a/ompi/mpi/man/man3/MPI_Type_f2c.3in b/ompi/mpi/man/man3/MPI_Type_f2c.3in deleted file mode 100644 index a13fce697dd..00000000000 --- a/ompi/mpi/man/man3/MPI_Type_f2c.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/MPI_Comm_f2c.3 diff --git a/ompi/mpi/man/man3/MPI_Type_free.3in b/ompi/mpi/man/man3/MPI_Type_free.3in deleted file mode 100644 index dcd7781c105..00000000000 --- a/ompi/mpi/man/man3/MPI_Type_free.3in +++ /dev/null @@ -1,60 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Type_free 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Type_free\fP \- Frees a data type. - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -int MPI_Type_free(MPI_Datatype *\fIdatatype\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_TYPE_FREE(\fIDATATYPE, IERROR\fP) - INTEGER \fIDATATYPE, IERROR\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Type_free(\fIdatatype\fP, \fIierror\fP) - TYPE(MPI_Datatype), INTENT(INOUT) :: \fIdatatype\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT/OUTPUT PARAMETER -.ft R -.TP 1i -datatype -Datatype that is freed (handle). -.sp -.SH OUTPUT PARAMETER -.ft R -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -Marks the datatype object associated with datatype for de-allocation and sets datatype to MPI_DATATYPE_NULL. Any communication that is currently using this datatype will complete normally. Derived datatypes that were defined from the freed datatype are not affected. -.sp -Freeing a datatype does not affect any other datatype that was built from the freed datatype. The system behaves as if input datatype arguments to derived datatype constructors are passed by value. - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for I/O function errors. The error handler may be changed with MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN may be used to cause error values to be returned. Note that MPI does not guarantee that an MPI program can continue past an error. - - - diff --git a/ompi/mpi/man/man3/MPI_Type_free_keyval.3in b/ompi/mpi/man/man3/MPI_Type_free_keyval.3in deleted file mode 100644 index e459573eb40..00000000000 --- a/ompi/mpi/man/man3/MPI_Type_free_keyval.3in +++ /dev/null @@ -1,61 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Type_free_keyval 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Type_free_keyval\fP \- Frees a previously created type key value. - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -int MPI_Type_free_keyval(int *\fItype_keyval\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_TYPE_FREE_KEYVAL(\fITYPE_KEYVAL, IERROR\fP) - INTEGER \fITYPE_KEYVAL, IERROR \fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Type_free_keyval(\fItype_keyval\fP, \fIierror\fP) - INTEGER, INTENT(INOUT) :: \fItype_keyval\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT/OUTPUT PARAMETER -.ft R -.TP 1i -type_keyval -Key value to free (integer). - -.SH OUTPUT PARAMETER -.ft R -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for I/O function errors. The error handler may be changed with MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN may be used to cause error values to be returned. Note that MPI does not guarantee that an MPI program can continue past an error. - -.SH SEE ALSO -.ft R -.sp -MPI_Type_create_keyval -.br - diff --git a/ompi/mpi/man/man3/MPI_Type_get_attr.3in b/ompi/mpi/man/man3/MPI_Type_get_attr.3in deleted file mode 100644 index c27c0fa5b21..00000000000 --- a/ompi/mpi/man/man3/MPI_Type_get_attr.3in +++ /dev/null @@ -1,87 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Type_get_attr 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Type_get_attr\fP \- Returns the attribute associated with a data type. - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -int MPI_Type_get_attr(MPI_Datatype \fItype\fP, int \fItype_keyval\fP, void *\fIattribute_val\fP, int *\fIflag\fP) - -.fi -.SH Fortran Syntax (see FORTRAN 77 NOTES) -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_TYPE_GET_ATTR(\fITYPE, TYPE_KEYVAL, ATTRIBUTE_VAL, FLAG, IERROR\fP) - INTEGER \fITYPE, TYPE_KEYVAL, IERROR \fP - INTEGER(KIND=MPI_ADDRESS_KIND) \fIATTRIBUTE_VAL\fP - LOGICAL \fIFLAG\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Type_get_attr(\fIdatatype\fP, \fItype_keyval\fP, \fIattribute_val\fP, \fIflag\fP, \fIierror\fP) - TYPE(MPI_Datatype), INTENT(IN) :: \fIdatatype\fP - INTEGER, INTENT(IN) :: \fItype_keyval\fP - INTEGER(KIND=MPI_ADDRESS_KIND), INTENT(OUT) :: \fIattribute_val\fP - LOGICAL, INTENT(OUT) :: \fIflag\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETERS -.ft R -.TP 1i -type -Data type to which the attribute is attached (handle). -.TP 1i -type_keyval -Key value (integer). - -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -attribute_val -Attribute value, unless \fIflag\fP = false -.TP 1i -flag -"false" if no attribute is associated with the key (logical). -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -For the given data type, MPI_Type_get_attr returns an attribute value that corresponds to the specified key value. - -.SH FORTRAN 77 NOTES -.ft R -The MPI standard prescribes portable Fortran syntax for -the \fIATTRIBUTE_VAL\fP argument only for Fortran 90. Sun FORTRAN 77 -users may use the non-portable syntax -.sp -.nf - INTEGER*MPI_ADDRESS_KIND \fIATTRIBUTE_VAL\fP -.fi -.sp -where MPI_ADDRESS_KIND is a constant defined in mpif.h -and gives the length of the declared integer in bytes. - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for I/O function errors. The error handler may be changed with MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN may be used to cause error values to be returned. Note that MPI does not guarantee that an MPI program can continue past an error. - -.SH SEE ALSO -.ft R -.sp -MPI_Type_set_attr -.br diff --git a/ompi/mpi/man/man3/MPI_Type_get_contents.3in b/ompi/mpi/man/man3/MPI_Type_get_contents.3in deleted file mode 100644 index 3b4cb1c33e0..00000000000 --- a/ompi/mpi/man/man3/MPI_Type_get_contents.3in +++ /dev/null @@ -1,116 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Type_get_contents 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Type_get_contents\fP \- Returns information about arguments used in creation of a data type. - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -int MPI_Type_get_contents(MPI_Datatype \fIdatatype\fP, int \fImax_integers\fP, - int \fImax_addresses\fP, int \fImax_datatypes\fP, int \fIarray_of_integers\fP[], MPI_Aint \fIarray_of_addresses\fP[], MPI_Datatype array_of_datatypes\fP[]) - -.fi -.SH Fortran Syntax (see FORTRAN 77 NOTES) -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_TYPE_GET_CONTENTS(\fIDATATYPE, MAX_INTEGERS, MAX_ADDRESSES, - MAX_DATATYPES, ARRAY_OF_INTEGERS, ARRAY_OF_ADDRESSES, - ARRAY_OF_DATATYPES, IERROR\fP) - INTEGER \fIDATATYPE, MAX_INTEGERS, MAX_ADDRESSES, MAX_DATATYPES\fP - INTEGER \fIARRAY_OF_INTEGERS(*), ARRAY_OF_DATATYPES(*), IERROR\fP - INTEGER(KIND=MPI_ADDRESS_KIND) \fIARRAY_OF_ADDRESSES\fP(*) - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Type_get_contents(\fIdatatype\fP, \fImax_integers\fP, \fImax_addresses\fP, \fImax_datatypes\fP, - \fIarray_of_integers\fP, \fIarray_of_addresses\fP, \fIarray_of_datatypes,\fP - \fIierror\fP) - TYPE(MPI_Datatype), INTENT(IN) :: \fIdatatype\fP - INTEGER, INTENT(IN) :: \fImax_integers\fP, \fImax_addresses\fP, \fImax_datatypes\fP - INTEGER, INTENT(OUT) :: \fIarray_of_integers(max_integers)\fP - INTEGER(KIND=MPI_ADDRESS_KIND), INTENT(OUT) :: - \fIarray_of_addresses(max_addresses)\fP - TYPE(MPI_Datatype), INTENT(OUT) :: \fIarray_of_datatypes(max_datatypes)\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETERS -.ft R -.TP 1i -datatype -Data type to access (handle). -.TP 1i -max_integers -Number of elements in \fIarray_of_integers\fP (nonnegative integer). -.TP 1i -max_addresses -Number of elements in \fIarray_of_addresses\fP (nonnegative integer). -.TP 1i -max_datatypes -Number of elements in \fIarray_of_datatypes\fP (nonnegative integer). - -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -array_of_integers -Contains integer arguments used in constructing \fIdatatype\fP (array of integers). -.TP 1i -array_of_addresses -Contains address arguments used in constructing \fIdatatype\fP (array of integers). -.TP 1i -array_of_datatypes -Contains data-type arguments used in constructing \fIdatatype\fP (array of integers). -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -For the given data type, MPI_Type_get_envelope returns information on the number and type of input arguments used in the call that created the data type. The number-of-arguments values returned can be used to provide sufficiently large arrays in the decoding routine MPI_Type_get_contents. This call and the meaning of the returned values is described below. The combiner reflects the MPI data type constructor call that was used in creating \fIdatatype\fP. - -The parameter \fIdatatype\fP must be a predefined unnamed or a derived data type. The call is erroneous if \fIdatatype\fP is a predefined named data type. -.sp -The values given for \fImax_integers\fP, \fImax_addresses\fP, and \fImax_datatypes\fP must be at least as large as the value returned in \fInum_integers\fP, \fInum_addresses\fP, and \fInum_datatypes\fP, respectively, in the call MPI_Type_get_envelope for the same \fIdatatype\fP argument. -.sp -The data types returned in \fIarray_of_datatypes\fP are handles to data-type objects that are equivalent to the data types used in the original construction call. If these were derived data types, then the returned data types are new data-type objects, and the user is responsible for freeing these datatypes with MPI_Type_free. If these were predefined data types, then the returned data type is equal to that (constant) predefined data type and cannot be freed. -.sp -The committed state of returned derived data types is undefined, that is, the data types may or may not be committed. Furthermore, the content of attributes of returned data types is undefined. -.sp -Note that MPI_Type_get_contents can be invoked with a data-type argument that was constructed using MPI_Type_create_f90_real, MPI_Type_create_f90_integer, or MPI_Type_create_f90_complex (an unnamed predefined data type). In such a case, an empty \fIarray_of_datatypes\fP is returned. -.sp -In the MPI-1 data-type constructor calls, the address arguments in Fortran are of type INTEGER. In the new MPI-2 calls, the address arguments are of type INTEGER(KIND=MPI_ADDRESS_KIND). The call MPI_Type_get_contents returns all addresses in an argument of type INTEGER(KIND=MPI_ADDRESS_KIND). This is true even if the old MPI-1 calls were used. Thus, the location of values returned can be thought of as being returned by the C bindings. It can also be determined by examining the new MPI-2 calls for data-type constructors for the deprecated MPI-1 calls that involve addresses. - -.SH FORTRAN 77 NOTES -.ft R -The MPI standard prescribes portable Fortran syntax for -the \fIARRAY_OF_ADDRESSES\fP argument only for Fortran 90. FORTRAN 77 -users may use the non-portable syntax -.sp -.nf - INTEGER*MPI_ADDRESS_KIND \fIARRAY_OF_ADDRESSES\fP(*) -.fi -.sp -where MPI_ADDRESS_KIND is a constant defined in mpif.h -and gives the length of the declared integer in bytes. - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for I/O function errors. The error handler may be changed with MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN may be used to cause error values to be returned. Note that MPI does not guarantee that an MPI program can continue past an error. - -.SH SEE ALSO -.ft r -MPI_Type_get_envelope -.br - diff --git a/ompi/mpi/man/man3/MPI_Type_get_envelope.3in b/ompi/mpi/man/man3/MPI_Type_get_envelope.3in deleted file mode 100644 index 309e8533ae9..00000000000 --- a/ompi/mpi/man/man3/MPI_Type_get_envelope.3in +++ /dev/null @@ -1,110 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright (c) 2010-2022 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Type_get_envelope 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Type_get_envelope\fP \- Returns information about input arguments associated with a data type. - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -int MPI_Type_get_envelope(MPI_Datatype \fIdatatype\fP, int *\fInum_integers\fP, - int *\fInum_addresses\fP, int *\fInum_datatypes\fP, int *\fIcombiner\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_TYPE_GET_ENVELOPE(\fIDATATYPE, NUM_INTEGERS, NUM_ADDRESSES, - NUM_DATATYPES, COMBINER, IERROR\fP) - INTEGER \fIDATATYPE, NUM_INTEGERS, NUM_ADDRESSES\fP - INTEGER \fINUM_DATATYPES, COMBINER, IERROR\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Type_get_envelope(\fIdatatype\fP, \fInum_integers\fP, \fInum_addresses\fP, \fInum_datatypes\fP, - \fIcombiner\fP, \fIierror\fP) - TYPE(MPI_Datatype), INTENT(IN) :: \fIdatatype\fP - INTEGER, INTENT(OUT) :: \fInum_integers\fP, \fInum_addresses\fP, \fInum_datatypes,\fP - \fIcombiner\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETER -.ft R -.TP 1i -datatype -Data type to access (handle). - -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -num_integers -Number of input integers used in the call constructing \fIcombiner\fP (nonnegative integer). -.TP 1i -num_addresses -Number of input addresses used in the call constructing \fIcombiner\fP (nonnegative integer). -.TP 1i -num_datatypes -Number of input data types used in the call constructing \fIcombiner\fP (nonnegative integer). -.TP 1i -combiner -Combiner (state). -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -For the given data type, MPI_Type_get_envelope returns information on the number and type of input arguments used in the call that created the data type. The number-of-arguments values returned can be used to provide sufficiently large arrays in the decoding routine MPI_Type_get_contents. This call and the meaning of the returned values is described below. The combiner reflects the MPI data type constructor call that was used in creating \fIdatatype\fP. - -.SH NOTES -.ft R -These are the values that can be returned in \fIcombiner\fP and their associated calls: -.sp -.nf -Values Associated Calls - -MPI_COMBINER_NAMED a named predefined data type -MPI_COMBINER_DUP MPI_Type_dup -MPI_COMBINER_CONTIGUOUS MPI_Type_contiguous -MPI_COMBINER_VECTOR MPI_Type_vector -MPI_COMBINER_HVECTOR MPI_Type_hvector - and MPI_Type_create_hvector -MPI_COMBINER_INDEXED MPI_Type_indexed -MPI_COMBINER_HINDEXED MPI_Type_hindexed - and MPI_Type_create_hindexed -MPI_COMBINER_INDEXED_BLOCK MPI_Type_create_indexed_block -MPI_COMBINER_STRUCT MPI_Type_struct - and MPI_Type_create_struct -MPI_COMBINER_SUBARRAY MPI_Type_create_subarray -MPI_COMBINER_DARRAY MPI_Type_create_darray -MPI_COMBINER_F90_REAL MPI_Type_create_f90_real -MPI_COMBINER_F90_COMPLEX MPI_Type_create_f90_complex -MPI_COMBINER_F90_INTEGER MPI_Type_create_f90_integer -MPI_COMBINER_RESIZED MPI_Type_create_resized -.fi -.sp -If \fIcombiner\fP is MPI_COMBINER_NAMED, then \fIdatatype\fP is a named predefined data type. -.sp -The actual arguments used in the creation call for a data type can be obtained from the call MPI_Type_get_contents. - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for I/O function errors. The error handler may be changed with MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN may be used to cause error values to be returned. Note that MPI does not guarantee that an MPI program can continue past an error. - -.SH SEE ALSO -.ft r -MPI_Type_get_contents -.br - diff --git a/ompi/mpi/man/man3/MPI_Type_get_extent.3in b/ompi/mpi/man/man3/MPI_Type_get_extent.3in deleted file mode 100644 index e7c1368faca..00000000000 --- a/ompi/mpi/man/man3/MPI_Type_get_extent.3in +++ /dev/null @@ -1,104 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2013 Los Alamos National Security, LLC. All rights reserved. -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Type_get_extent 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Type_get_extent\fP, \fBMPI_Type_get_extent_x\fP \- Returns the lower bound and extent of a data type. - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -int MPI_Type_get_extent(MPI_Datatype \fIdatatype\fP, MPI_Aint\fI *lb\fP, - MPI_Aint *\fIextent\fP) -int MPI_Type_get_extent_x(MPI_Datatype \fIdatatype\fP, MPI_Count\fI *lb\fP, - MPI_Count *\fIextent\fP) - -.fi -.SH Fortran Syntax (see FORTRAN 77 NOTES) -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_TYPE_GET_EXTENT(\fIDATATYPE, LB, EXTENT, IERROR\fP) - INTEGER \fIDATATYPE, IERROR\fP - INTEGER(KIND=MPI_ADDRESS_KIND) \fILB, EXTENT\fP -MPI_TYPE_GET_EXTENT_X(\fIDATATYPE, LB, EXTENT, IERROR\fP) - INTEGER \fIDATATYPE, IERROR\fP - INTEGER(KIND=MPI_COUNT_KIND) \fILB, EXTENT\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Type_get_extent(\fIdatatype\fP, \fIlb\fP, \fIextent\fP, \fIierror\fP) - TYPE(MPI_Datatype), INTENT(IN) :: \fIdatatype\fP - INTEGER(KIND=MPI_ADDRESS_KIND), INTENT(OUT) :: \fIlb\fP, \fIextent\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP -MPI_Type_get_extent_x(\fIdatatype\fP, \fIlb\fP, \fIextent\fP, \fIierror\fP) - TYPE(MPI_Datatype), INTENT(IN) :: \fIdatatype\fP - INTEGER(KIND = MPI_COUNT_KIND), INTENT(OUT) :: \fIlb\fP, \fIextent\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETER -.ft R -.TP 1i -datatype -Data type (handle). -.sp -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -lb -Lower bound of data type (integer). -.TP 1i -extent -Data type extent (integer). -.ft R -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -MPI_Type_get_extent returns the lower bound and the extent of \fIdatatype\fP. For either function, if either the \fIlb\fP or \fIextent\fP parameter cannot express the value to be returned (e.g., if the parameter is too small to hold the output value), it is set to MPI_UNDEFINED. - -.SH NOTE -.ft R -Use of MPI_Type_get_extent is strongly recommended over the old MPI-1 functions MPI_Type_extent and MPI_Type_lb. - -.SH FORTRAN 77 NOTES -.ft R -The MPI standard prescribes portable Fortran syntax for -the \fILB\fP and \fIEXTENT\fP arguments only for Fortran 90. FORTRAN 77 -users may use the non-portable syntax -.sp -MPI_Type_get_extent: -.sp -.nf - INTEGER*MPI_ADDRESS_KIND \fILB\fP -or - INTEGER*MPI_ADDRESS_KIND \fIEXTENT\fP -.fi -.sp -MPI_Type_get_extent_x: -.sp -.nf - INTEGER*MPI_COUNT_KIND \fILB\fP -or - INTEGER*MPI_COUNT_KIND \fIEXTENT\fP -.fi -.sp -where MPI_ADDRESS_KIND and MPI_COUNT_KIND are constants defined in mpif.h -and give the length of the declared integer in bytes. - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for I/O function errors. The error handler may be changed with MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN may be used to cause error values to be returned. Note that MPI does not guarantee that an MPI program can continue past an error. diff --git a/ompi/mpi/man/man3/MPI_Type_get_extent_x.3in b/ompi/mpi/man/man3/MPI_Type_get_extent_x.3in deleted file mode 100644 index 84dc57aa67c..00000000000 --- a/ompi/mpi/man/man3/MPI_Type_get_extent_x.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/MPI_Type_get_extent.3 diff --git a/ompi/mpi/man/man3/MPI_Type_get_name.3in b/ompi/mpi/man/man3/MPI_Type_get_name.3in deleted file mode 100644 index d636fa56469..00000000000 --- a/ompi/mpi/man/man3/MPI_Type_get_name.3in +++ /dev/null @@ -1,72 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Type_get_name 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Type_get_name\fP \- Gets the name of a data type. - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -int MPI_Type_get_name(MPI_Datatype \fItype\fP, char *\fItype_name\fP, - int *\fIresultlen\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_TYPE_GET_NAME(\fITYPE, TYPE_NAME, RESULTLEN, IERROR\fP) - INTEGER \fITYPE, RESULTLEN, IERROR \fP - CHARACTER*(*) \fITYPE_NAME\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Type_get_name(\fIdatatype\fP, \fItype_name\fP, \fIresultlen\fP, \fIierror\fP) - TYPE(MPI_Datatype), INTENT(IN) :: \fIdatatype\fP - CHARACTER(LEN=MPI_MAX_OBJECT_NAME), INTENT(OUT) :: \fItype_name\fP - INTEGER, INTENT(OUT) :: \fIresultlen\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETER -.ft R -.TP 1i -type -Data type whose name is to be returned (handle). - -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -type_name -The name previously stored on the data type, or an empty string if not such name exists (string). -.TP 1i -resultlen -Length of returned name (integer). -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -MPI_Type_get_name returns the printable identifier associated with an MPI data type. - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for I/O function errors. The error handler may be changed with MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN may be used to cause error values to be returned. Note that MPI does not guarantee that an MPI program can continue past an error. - -.SH SEE ALSO -.ft R -.sp -MPI_Type_set_name -.br - diff --git a/ompi/mpi/man/man3/MPI_Type_get_true_extent.3in b/ompi/mpi/man/man3/MPI_Type_get_true_extent.3in deleted file mode 100644 index a133cdac12b..00000000000 --- a/ompi/mpi/man/man3/MPI_Type_get_true_extent.3in +++ /dev/null @@ -1,103 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2013 Los Alamos National Security, LLC. All rights reserved. -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Type_get_true_extent 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Type_get_true_extent\fP, \fBMPI_Type_get_true_extent_x\fP \- Returns the true lower bound and extent of a data type's corresponding typemap, ignoring MPI_UB and MPI_LB markers. - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -int MPI_Type_get_true_extent(MPI_Datatype \fIdatatype\fP, - MPI_Aint *\fItrue_lb\fP, MPI_Aint *\fItrue_extent\fP) -int MPI_Type_get_true_extent_x(MPI_Datatype \fIdatatype\fP, - MPI_Count *\fItrue_lb\fP, MPI_Count *\fItrue_extent\fP) - -.fi -.SH Fortran Syntax (see FORTRAN 77 NOTES) -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_TYPE_GET_TRUE_EXTENT(\fIDATATYPE, TRUE_LB, TRUE_EXTENT, IERROR\fP) - INTEGER \fIDATATYPE, IERROR\fP - INTEGER(KIND=MPI_ADDRESS_KIND) \fITRUE_LB, TRUE_EXTENT\fP -MPI_TYPE_GET_TRUE_EXTENT_X(\fIDATATYPE, TRUE_LB, TRUE_EXTENT, IERROR\fP) - INTEGER \fIDATATYPE, IERROR\fP - INTEGER(KIND=MPI_COUNT_KIND) \fITRUE_LB, TRUE_EXTENT\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Type_get_true_extent(\fIdatatype\fP, \fItrue_lb\fP, \fItrue_extent\fP, \fIierror\fP) - TYPE(MPI_Datatype), INTENT(IN) :: \fIdatatype\fP - INTEGER(KIND=MPI_ADDRESS_KIND), INTENT(OUT) :: \fItrue_lb\fP, \fItrue_extent\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP -MPI_Type_get_true_extent_x(\fIdatatype\fP, \fItrue_lb\fP, \fItrue_extent\fP, \fIierror\fP) - TYPE(MPI_Datatype), INTENT(IN) :: \fIdatatype\fP - INTEGER(KIND = MPI_COUNT_KIND), INTENT(OUT) :: \fItrue_lb\fP, \fItrue_extent\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETER -.ft R -.TP 1i -datatype -Data type for which information is wanted (handle). -.sp -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -true_lb -True lower bound of data type (integer). -.TP 1i -true_extent -True size of data type (integer). -.ft R -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -The \fItrue_lb\fP parameter returns the offset of the lowest unit of store that is addressed by the data type, that is, the lower bound of the corresponding typemap, ignoring MPI_LB markers. The \fItrue_extent\fP parameter returns the true size of the data type, that is, the extent of the corresponding typemap, ignoring MPI_LB and MPI_UB markers, and performing no rounding for alignment. For both functions, if either the \fItrue_lb\fP or \fItrue_extent\fP parameter cannot express the value to be returned (e.g., if the parameter is too small to hold the output value), it is set to MPI_UNDEFINED. -.sp -The \fItrue_extent\fP is the minimum number of bytes of memory necessary to hold a data type, uncompressed. -.sp -See § 4.1.8 of the MPI-3 standard for more detailed definitions of these parameters in relation to the typemap. - -.SH FORTRAN 77 NOTES -.ft R -The MPI standard prescribes portable Fortran syntax for -the \fITRUE_LB\fP and \fITRUE_EXTENT\fP arguments only for Fortran 90. FORTRAN 77 users may use the non-portable syntax -.sp -MPI_Type_get_true_extent: -.sp -.nf - INTEGER*MPI_ADDRESS_KIND \fITRUE_LB\fP -or - INTEGER*MPI_ADDRESS_KIND \fITRUE_EXTENT\fP -.fi -.sp -MPI_Type_get_true_extent_x: -.sp -.nf - INTEGER*MPI_COUNT_KIND \fITRUE_LB\fP -or - INTEGER*MPI_COUNT_KIND \fITRUE_EXTENT\fP -.fi -.sp -where MPI_ADDRESS_KIND and MPI_COUNT_KIND are constants defined in mpif.h and give the length of the declared integer in bytes. - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for I/O function errors. The error handler may be changed with MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN may be used to cause error values to be returned. Note that MPI does not guarantee that an MPI program can continue past an error. - diff --git a/ompi/mpi/man/man3/MPI_Type_get_true_extent_x.3in b/ompi/mpi/man/man3/MPI_Type_get_true_extent_x.3in deleted file mode 100644 index c7e538643c9..00000000000 --- a/ompi/mpi/man/man3/MPI_Type_get_true_extent_x.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/MPI_Type_get_true_extent.3 diff --git a/ompi/mpi/man/man3/MPI_Type_hindexed.3in b/ompi/mpi/man/man3/MPI_Type_hindexed.3in deleted file mode 100644 index 34336a02704..00000000000 --- a/ompi/mpi/man/man3/MPI_Type_hindexed.3in +++ /dev/null @@ -1,99 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2010-2014 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Type_hindexed 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Type_hindexed\fP \- Creates an indexed datatype with offsets in bytes -- use of this routine is deprecated. - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -int MPI_Type_hindexed(int \fIcount\fP, int\fI *array_of_blocklengths\fP, - MPI_Aint\fI *array_of_displacements\fP, MPI_Datatype\fI oldtype\fP, - MPI_Datatype\fI *newtype\fP) - -.fi -.SH Fortran Syntax -.nf -INCLUDE 'mpif.h' -MPI_TYPE_HINDEXED(\fICOUNT, ARRAY_OF_BLOCKLENGTHS, - ARRAY_OF_DISPLACEMENTS, OLDTYPE, NEWTYPE, IERROR\fP) - INTEGER \fICOUNT, ARRAY_OF_BLOCKLENGTHS(*)\fP - INTEGER \fIARRAY_OF_DISPLACEMENTS(*), OLDTYPE, NEWTYPE\fP - INTEGER \fIIERROR\fP - - -.fi -.SH INPUT PARAMETERS -.ft R -.TP 1i -count -Number of blocks -- also number of entries in array_of_displacements and -array_of_blocklengths (integer). -.TP 1i -array_of_blocklengths -Number of elements in each block (array of nonnegative integers). -.TP 1i -array_of_displacements -Byte displacement of each block (C: array of -.IR MPI_Aint , -Fortran: array of integer). -.TP 1i -oldtype -Old datatype (handle). -.sp -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -newtype -New datatype (handle). -.sp -.ft R -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -Note that use of this routine is \fIdeprecated\fP as of MPI-2. Use MPI_Type_create_hindexed instead. -.sp -The function is identical to MPI_Type_indexed, except that block displacements in array_of_displacements are specified in bytes, rather than in multiples of the oldtype extent. -.sp -Assume that oldtype has type map -.sp -.nf - {(type(0), disp(0)), ..., (type(n-1), disp(n-1))}, -.fi -.sp -with extent ex. Let B be the array_of_blocklength argument and D be the -array_of_displacements argument. The newly created datatype has -.nf -n x S^count-1 - (i=0) B[i] entries: - - {(type(0), disp(0) + D[0]),...,(type(n-1), disp(n-1) + D[0]),..., - (type(0), disp(0) + (D[0] + B[0]-1)* ex),..., - type(n-1), disp(n-1) + (D[0]+ B[0]-1)* ex),..., - (type(0), disp(0) + D[count-1]),...,(type(n-1), disp(n-1) + D[count-1]),..., - (type(0), disp(0) + D[count-1] + (B[count-1] -1)* ex),..., - (type(n-1), disp(n-1) + D[count-1] + (B[count-1] -1)* ex)} -.fi - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for I/O function errors. The error handler may be changed with MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN may be used to cause error values to be returned. Note that MPI does not guarantee that an MPI program can continue past an error. - -.SH SEE ALSO -.ft R -MPI_Type_create_hindexed -.br -MPI_Type_indexed -.br - diff --git a/ompi/mpi/man/man3/MPI_Type_hvector.3in b/ompi/mpi/man/man3/MPI_Type_hvector.3in deleted file mode 100644 index c869b5cc916..00000000000 --- a/ompi/mpi/man/man3/MPI_Type_hvector.3in +++ /dev/null @@ -1,97 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Type_hvector 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Type_hvector\fP \- Creates a vector (strided) datatype with offset in bytes -- use of this routine is deprecated. - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -int MPI_Type_hvector(int \fIcount\fP, int\fI blocklength\fP, MPI_Aint\fI stride\fP, - MPI_Datatype\fI oldtype\fP, MPI_Datatype\fI *newtype\fP) - -.fi -.SH Fortran Syntax -.nf -INCLUDE 'mpif.h' -MPI_TYPE_HVECTOR(\fICOUNT, BLOCKLENGTH, STRIDE, OLDTYPE, NEWTYPE, - IERROR\fP) - INTEGER \fICOUNT, BLOCKLENGTH, STRIDE, OLDTYPE\fP - INTEGER \fINEWTYPE, IERROR\fP - -.fi -.SH INPUT PARAMETERS -.ft R -.TP 1i -count -Number of blocks (nonnegative integer). -.TP 1i -blocklength -Number of elements in each block (nonnegative integer). -.TP 1i -stride -Number of bytes between start of each block (integer). -.TP 1i -oldtype -Old datatype (handle). -.sp -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -newtype -New datatype (handle). -.ft R -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -Note that use of this routine is \fIdeprecated\fP as of MPI-2. Use MPI_Type_create_hvector instead. -.sp -The function MPI_Type_hvector is identical to MPI_Type_vector, except that -stride is given in bytes, rather than in elements. The use for both types -of vector constructors is illustrated in the examples in Section 3.12.7 of the MPI-1 Standard. -.sp -Assume that oldtype has type map -.sp -.nf - {(type(0), disp(0)), ..., (type(n-1), disp(n-1))} -.fi -.sp -with extent ex. Let bl be the blocklength. The newly created datatype has a type map with count * bl * n entries: -.sp -.nf - {(type(0), disp(0)), ..., (type(n-1), disp(n-1)), - (type(0), disp(0) + ex), ..., (type(n-1), disp(n-1) + ex), - ..., (type(0), disp(0) + (bl -1) * ex),...,(type(n-1), - disp(n-1) + (bl -1) * ex), (type(0), disp(0) + stride), - ...,(type(n-1), disp(n-1) + stride), ..., (type(0), - disp(0) + stride + (bl - 1) * ex), ..., (type(n-1), - disp(n-1) + stride + (bl -1) * ex), ..., (type(0), - disp(0) + stride * (count -1)), ...,(type(n-1), - disp(n-1) + stride * (count -1)), ..., (type(0), - disp(0) + stride * (count -1) + (bl -1) * ex), ..., - (type(n-1), disp(n-1) + stride * (count -1) + (bl -1) * ex)} - -.fi -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for I/O function errors. The error handler may be changed with MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN may be used to cause error values to be returned. Note that MPI does not guarantee that an MPI program can continue past an error. - -.SH SEE ALSO -.ft R -.sp -MPI_Type_create_hvector -.br -MPI_Type_vector -.br - diff --git a/ompi/mpi/man/man3/MPI_Type_indexed.3in b/ompi/mpi/man/man3/MPI_Type_indexed.3in deleted file mode 100644 index 281f22ad1bc..00000000000 --- a/ompi/mpi/man/man3/MPI_Type_indexed.3in +++ /dev/null @@ -1,159 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2013 Los Alamos National Security, LLC. All rights reserved. -.\" Copyright 2010-2014 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Type_indexed 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Type_indexed, MPI_Type_create_hindexed\fP \- Creates an indexed datatype. - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -int MPI_Type_indexed(int \fIcount\fP, const int\fI array_of_blocklengths[]\fP, - const int\fI array_of_displacements[]\fP, MPI_Datatype\fI oldtype\fP, - MPI_Datatype\fI *newtype\fP) - -int MPI_Type_create_hindexed(int \fIcount\fP, - const int\fI array_of_blocklengths[]\fP, - const MPI_Aint\fI array_of_displacements[]\fP, MPI_Datatype\fI oldtype\fP, - MPI_Datatype\fI *newtype\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_TYPE_INDEXED(\fICOUNT, ARRAY_OF_BLOCKLENGTHS, - ARRAY_OF_DISPLACEMENTS, OLDTYPE, NEWTYPE, IERROR\fP) - INTEGER \fICOUNT, ARRAY_OF_BLOCKLENGTHS(*)\fP - INTEGER \fIARRAY_OF_DISPLACEMENTS(*), OLDTYPE, NEWTYPE\fP - INTEGER \fIIERROR\fP - -MPI_TYPE_CREATE_HINDEXED(\fICOUNT, ARRAY_OF_BLOCKLENGTHS, - ARRAY_OF_DISPLACEMENTS, OLDTYPE, NEWTYPE, IERROR\fP) - INTEGER \fICOUNT, ARRAY_OF_BLOCKLENGTHS(*)\fP - INTEGER \fIOLDTYPE, NEWTYPE\fP - INTEGER(KIND=MPI_ADDRESS_KIND) \fIARRAY_OF_DISPLACEMENTS(*)\fP - INTEGER \fIIERROR\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Type_indexed(\fIcount\fP, \fIarray_of_blocklengths\fP, \fIarray_of_displacements\fP, - \fIoldtype\fP, \fInewtype\fP, \fIierror\fP) - INTEGER, INTENT(IN) :: \fIcount\fP, \fIarray_of_blocklengths(count),\fP - \fIarray_of_displacements(count)\fP - TYPE(MPI_Datatype), INTENT(IN) :: \fIoldtype\fP - TYPE(MPI_Datatype), INTENT(OUT) :: \fInewtype\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -MPI_Type_create_hindexed(\fIcount\fP, \fIarray_of_blocklengths\fP, - \fIarray_of_displacements\fP, \fIoldtype\fP, \fInewtype\fP, \fIierror\fP) - INTEGER, INTENT(IN) :: \fIcount\fP, \fIarray_of_blocklengths(count)\fP - INTEGER(KIND=MPI_ADDRESS_KIND), INTENT(IN) :: - \fIarray_of_displacements(count)\fP - TYPE(MPI_Datatype), INTENT(IN) :: \fIoldtype\fP - TYPE(MPI_Datatype), INTENT(OUT) :: \fInewtype\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETERS -.ft R -.TP 1i -count -Number of blocks -- also number of entries in array_of_displacements and -array_of_blocklengths (nonnegative integer). -.TP 1i -array_of_blocklengths -Number of elements per block (array of nonnegative integers). -.TP 1i -array_of_displacements -Displacement for each block, in multiples of oldtype extent for MPI_Type_indexed and bytes for MPI_Type_create_hindexed (array of -integer for -.BR MPI_TYPE_INDEXED , -array of -.I MPI_Aint -for -.BR MPI_TYPE_CREATE_HINDEXED ). -.TP 1i -oldtype -Old datatype (handle). -.sp -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -newtype -New datatype (handle). -.ft R -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -The function MPI_Type_indexed allows replication of an old datatype into a sequence of blocks (each block is a concatenation of the old datatype), where each block can contain a different number of copies and have a different displacement. All block displacements are multiples of the old data type's extent. -.sp - -\fBExample:\fP Let oldtype have type map {(double, 0), (char, 8)}, with extent 16. Let B = (3, 1) and let D = (4, 0). A call to MPI_Type_indexed(2, B, D, oldtype, newtype) returns a datatype with type map -.sp -.nf - {(double, 64), (char, 72), (double, 80), (char, 88), - (double, 96), (char, 104), - (double, 0), (char, 8)} -.fi -.sp -That is, three copies of the old type starting at displacement 4 x 16 = 64, and one copy starting at displacement 0. -.sp -In general, assume that oldtype has type map -.sp -.nf - {(type(0), disp(0)), ..., (type(n-1), disp(n-1))}, -.fi -.sp -with extent ex. Let B be the array_of_blocklength argument and D be the -array_of_displacements argument. The newly created datatype has -.br -.nf -n x S ^count-1 - i = 0 B[i] entries: - - {(type(0), disp(0) + D[0]* ex), ..., - (type(n-1), disp(n-1) + D[0]* ex), ..., - (type(0), disp(0) + (D[0] + B[0]-1)* ex), ..., - (type(n-1), disp(n-1) + (D[0]+ B[0]-1)* ex), ..., - (type(0), disp(0) + D[count-1]* ex), ..., - (type(n-1), disp(n-1) + D[count-1]* ex), ..., - (type(0), disp(0) + (D[count-1] + B[count-1] -1)* ex), ..., - (type(n-1), disp(n-1) + (D[count-1] + B[count-1] -1)* ex)} -.fi -.sp -A call to MPI_Type_vector(count, blocklength, stride, oldtype, newtype) is equivalent to a call to MPI_Type_indexed(count, B, D, oldtype, newtype) where -.sp -.nf - D[j] = j * stride, j = 0,..., count-1 - -and - - B[j] = blocklength, j = 0, .., count-1 -.fi - -The function MPI_Type_create_hindexed is identical to MPI_Type_indexed, except that block displacements in \fIarray_of_displacements\fP are specified in bytes, rather than in multiples of the \fIoldtype\fP extent. - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for I/O function errors. The error handler may be changed with MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN may be used to cause error values to be returned. Note that MPI does not guarantee that an MPI program can continue past an error. - -.SH SEE ALSO -.ft R -.sp -MPI_Type_hindexed -.br - diff --git a/ompi/mpi/man/man3/MPI_Type_lb.3in b/ompi/mpi/man/man3/MPI_Type_lb.3in deleted file mode 100644 index fc21aba60fc..00000000000 --- a/ompi/mpi/man/man3/MPI_Type_lb.3in +++ /dev/null @@ -1,91 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Type_lb 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Type_lb\fP \- Returns the lower bound of a data type -- use of this routine is deprecated. - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -int MPI_Type_lb(MPI_Datatype \fIdatatype\fP, MPI_Aint\fI *displacement\fP) - -.fi -.SH Fortran Syntax -.nf -INCLUDE 'mpif.h' -MPI_TYPE_LB(\fIDATATYPE, DISPLACEMENT, IERROR\fP) - INTEGER \fIDATATYPE, DISPLACEMENT, IERROR\fP - - -.fi -.SH INPUT PARAMETER -.ft R -.TP 1i -datatype -Datatype (handle). -.sp -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -displacement -Displacement of lower bound from origin, in bytes (integer). -.ft R -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -Note that use of this routine is \fIdeprecated\fP as of MPI-2. Please use MPI_Type_get_extent instead. -.sp -MPI_Type_lb returns the lower bound of a data type. This may differ from zero if the type was constructed using MPI_LB. -.sp -The "pseudo-datatypes," MPI_LB and MPI_UB, can be used, respectively, to mark the lower bound (or the upper bound) of a datatype. These pseudo-datatypes occupy no space (extent (MPI_LB) = extent (MPI_UB) =0. They do not affect the size or count of a datatype, and do not affect the context of a message created with this datatype. However, they do affect the definition of the extent of a datatype and, therefore, affect the outcome of a replication of this datatype by a datatype constructor. -.sp -In general, if -.sp -.nf - Typemap = {(type0, disp0), ..., (type(n-1), disp(n-1)} -.fi -.sp -then the lower bound of Typemap is defined to be -.nf - - (min(j) disp(j) if no entry has - lb(Typemap) = ( basic type lb - (min(j) {disp(j) such that type(j) = lb} otherwise - -.fi -Similarly, the upper bound of Typemap is defined to be -.nf - - (max(j) disp(j) + sizeof((type(j)) + e if no entry has - ub(Typemap) = ( basic type ub - (max(j) {disp(j) such that type(j) = ub} otherwise - -Then - - extent(Typemap) = ub(Typemap) - lb(Typemap) -.fi -.sp -If type(i) requires alignment to a byte address that is a multiple of k(i), -then e is the least nonnegative increment needed to round extent(Typemap) to the next multiple of max(i) k(i). - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for I/O function errors. The error handler may be changed with MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN may be used to cause error values to be returned. Note that MPI does not guarantee that an MPI program can continue past an error. - -.SH SEE ALSO -.ft R -.sp -MPI_Type_get_extent -.br - diff --git a/ompi/mpi/man/man3/MPI_Type_match_size.3in b/ompi/mpi/man/man3/MPI_Type_match_size.3in deleted file mode 100644 index 2834ae8b83c..00000000000 --- a/ompi/mpi/man/man3/MPI_Type_match_size.3in +++ /dev/null @@ -1,96 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Type_match_size 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME - -\fBMPI_Type_match_size\fP \- Returns an MPI datatype of a given type and size - -.SH SYNTAX -.ft R - -.SH C Syntax -.nf -#include -int MPI_Type_match_size(int \fItypeclass\fP, int \fIsize\fP, - MPI_Datatype *\fItype\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_TYPE_MATCH_SIZE(\fITYPECLASS, SIZE, TYPE, IERROR\fP) - INTEGER \fITYPECLASS, SIZE, TYPE, IERROR\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Type_match_size(\fItypeclass\fP, \fIsize\fP, \fIdatatype\fP, \fIierror\fP) - INTEGER, INTENT(IN) :: \fItypeclass\fP, \fIsize\fP - TYPE(MPI_Datatype), INTENT(OUT) :: \fIdatatype\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETERS -.ft R -.TP 1i -typeclass -Generic type specifier (integer). -.ft R -.TP 1i -size -Size, in bytes, of representation (integer). - -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -type -Datatype with correct type and size (handle). -.ft R -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -The function returns an MPI datatype matching a local variable of type -(\fItypeclass\fP, \fIsize\fP). The returned type is a reference -(handle) to a predefined named datatype, not a duplicate. This type -cannot be freed. -.sp -The value of \fItypeclass\fR may be set to one of MPI_TYPECLASS_REAL, -MPI_TYPECLASS_INTEGER, or MPI_TYPECLASS_COMPLEX, corresponding to the -desired datatype. -.sp -MPI_type_match_size can be used to obtain a size-specific type that -matches a Fortran numeric intrinsic type: first call MPI_Sizeof to -compute the variable size, then call MPI_Type_match_size to find a -suitable datatype. In C use the sizeof builtin instead of MPI_Sizeof. -.sp -It is erroneous to specify a size not supported by the compiler. - -.SH ERRORS -.ft R -Almost all MPI routines return an error value; C routines as -the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for -I/O function errors. The error handler may be changed with -MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN -may be used to cause error values to be returned. Note that MPI does not -guarantee that an MPI program can continue past an error. -.sp -See the MPI man page for a full list of MPI error codes. - -.SH SEE ALSO -.ft R -.nf -MPI_Sizeof -MPI_Type_get_extent - diff --git a/ompi/mpi/man/man3/MPI_Type_set_attr.3in b/ompi/mpi/man/man3/MPI_Type_set_attr.3in deleted file mode 100644 index bd2911ba3ea..00000000000 --- a/ompi/mpi/man/man3/MPI_Type_set_attr.3in +++ /dev/null @@ -1,87 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Type_set_attr 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Type_set_attr\fP \- Sets a key value/attribute pair to a data type. - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -int MPI_Type_set_attr(MPI_Datatype \fItype\fP, int \fItype_keyval\fP, - void *\fIattribute_val\fP) - -.fi -.SH Fortran Syntax (see FORTRAN 77 NOTES) -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_TYPE_SET_ATTR(\fITYPE, TYPE_KEYVAL, ATTRIBUTE_VAL, IERROR\fP) - INTEGER \fITYPE, TYPE_KEYVAL, IERROR\fP - INTEGER(KIND=MPI_ADDRESS_KIND) \fIATTRIBUTE_VAL\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Type_set_attr(\fIdatatype\fP, \fItype_keyval\fP, \fIattribute_val\fP, \fIierror\fP) - TYPE(MPI_Datatype), INTENT(IN) :: \fIdatatype\fP - INTEGER, INTENT(IN) :: \fItype_keyval\fP - INTEGER(KIND=MPI_ADDRESS_KIND), INTENT(IN) :: \fIattribute_val\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT/OUTPUT PARAMETER -.ft R -.TP 1i -type -Data type to which attribute will be attached (handle). - -.SH INPUT PARAMETERS -.ft R -.TP 1i -type_keyval -Key value (integer). -.TP 1i -attribute_val -Attribute value. - -.SH OUTPUT PARAMETER -.ft R -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -For the given data type, MPI_Type_set_attr sets the key value to the value of the specified attribute. - -.SH FORTRAN 77 NOTES -.ft R -The MPI standard prescribes portable Fortran syntax for -the \fIATTRIBUTE_VAL\fP argument only for Fortran 90. FORTRAN 77 -users may use the non-portable syntax -.sp -.nf - INTEGER*MPI_ADDRESS_KIND \fIATTRIBUTE_VAL\fP -.fi -.sp -where MPI_ADDRESS_KIND is a constant defined in mpif.h -and gives the length of the declared integer in bytes. - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for I/O function errors. The error handler may be changed with MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN may be used to cause error values to be returned. Note that MPI does not guarantee that an MPI program can continue past an error. - -.SH SEE ALSO -.ft R -.sp -MPI_Type_get_attr -.br - diff --git a/ompi/mpi/man/man3/MPI_Type_set_name.3in b/ompi/mpi/man/man3/MPI_Type_set_name.3in deleted file mode 100644 index 9265458ca97..00000000000 --- a/ompi/mpi/man/man3/MPI_Type_set_name.3in +++ /dev/null @@ -1,72 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2013 Los Alamos National Security, LLC. All rights reserved. -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Type_set_name 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Type_set_name\fP \- Sets the name of a data type. - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -int MPI_Type_set_name(MPI_Datatype \fItype\fP, const char *\fItype_name\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_TYPE_SET_NAME(\fITYPE, TYPE_NAME, IERROR\fP) - INTEGER \fITYPE, IERROR\fP - CHARACTER*(*) \fITYPE_NAME\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Type_set_name(\fIdatatype\fP, \fItype_name\fP, \fIierror\fP) - TYPE(MPI_Datatype), INTENT(IN) :: \fIdatatype\fP - CHARACTER(LEN=*), INTENT(IN) :: \fItype_name\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT/OUTPUT PARAMETER -.ft R -.TP 1i -type -Data type for which the identifier is to be set (handle). - -.SH INPUT PARAMETER -.ft R -.TP 1i -type_name -The character string remembered as the name (string). - -.SH OUTPUT PARAMETER -.ft R -.TP 1i -IERROR -Fortran only: Error status (integer). - - -.SH DESCRIPTION -.ft R -MPI_Type_set_name associates a printable identifier with an MPI data type. - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for I/O function errors. The error handler may be changed with MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN may be used to cause error values to be returned. Note that MPI does not guarantee that an MPI program can continue past an error. - -.SH SEE ALSO -.ft R -.sp -MPI_Type_get_name -.br - diff --git a/ompi/mpi/man/man3/MPI_Type_size.3in b/ompi/mpi/man/man3/MPI_Type_size.3in deleted file mode 100644 index cc2c78c3761..00000000000 --- a/ompi/mpi/man/man3/MPI_Type_size.3in +++ /dev/null @@ -1,83 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2013 Los Alamos National Security, LLC. All rights reserved. -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Type_size 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Type_size\fP, \fBMPI_Type_size_x\fP \- Returns the number of bytes occupied by entries in a data type. - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -int MPI_Type_size(MPI_Datatype \fIdatatype\fP, int\fI *size\fP) -int MPI_Type_size_x(MPI_Datatype \fIdatatype\fP, MPI_Count\fI *size\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_TYPE_SIZE(\fIDATATYPE, SIZE, IERROR\fP) - INTEGER \fIDATATYPE, SIZE, IERROR\fP -MPI_TYPE_SIZE_X(\fIDATATYPE, SIZE, IERROR\fP) - INTEGER \fIDATATYPE\fP - INTEGER(KIND=MPI_COUNT_KIND) \fISIZE\fP - INTEGER \fIIERROR\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Type_size(\fIdatatype\fP, \fIsize\fP, \fIierror\fP) - TYPE(MPI_Datatype), INTENT(IN) :: \fIdatatype\fP - INTEGER, INTENT(OUT) :: \fIsize\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP -MPI_Type_size_x(\fIdatatype\fP, \fIsize\fP, \fIierror\fP) - TYPE(MPI_Datatype), INTENT(IN) :: \fIdatatype\fP - INTEGER(KIND=MPI_COUNT_KIND), INTENT(OUT) :: \fIsize\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETER -.ft R -.TP 1i -datatype -Datatype (handle). -.sp - -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -size -Datatype size (integer). -.sp -.ft R -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -MPI_Type_size returns the total size, in bytes, of the entries in the type signature associated with datatype; i.e., the total size of the data in a message that would be created with this datatype. Entries that occur multiple times in the datatype are counted with their multiplicity. For either function, if the \fIsize\fP parameter cannot express the value to be returned (e.g., if the parameter is too small to hold the output value), it is set to MPI_UNDEFINED. - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for I/O function errors. The error handler may be changed with MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN may be used to cause error values to be returned. Note that MPI does not guarantee that an MPI program can continue past an error. - -.SH FORTRAN 77 NOTES -.ft R -The MPI standard prescribes portable Fortran syntax for the \fISIZE\fP argument of MPI_Type_size_x only for Fortran 90. FORTRAN 77 users may use the non-portable syntax -.sp -.nf - INTEGER*MPI_COUNT_KIND \fISIZE\fP -.fi -.sp -where MPI_COUNT_KIND is a constant defined in mpif.h and gives the length of the declared integer in bytes. - diff --git a/ompi/mpi/man/man3/MPI_Type_size_x.3in b/ompi/mpi/man/man3/MPI_Type_size_x.3in deleted file mode 100644 index 4e33322d255..00000000000 --- a/ompi/mpi/man/man3/MPI_Type_size_x.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/MPI_Type_size.3 diff --git a/ompi/mpi/man/man3/MPI_Type_struct.3in b/ompi/mpi/man/man3/MPI_Type_struct.3in deleted file mode 100644 index de6b602dd8e..00000000000 --- a/ompi/mpi/man/man3/MPI_Type_struct.3in +++ /dev/null @@ -1,117 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Type_struct 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Type_struct\fP \- Creates a \fIstruct\fP data type -- use of this routine is deprecated. - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -int MPI_Type_struct(int \fIcount\fP, int\fI *array_of_blocklengths\fP, - MPI_Aint\fI *array_of_displacements\fP, MPI_Datatype\fI *array_of_types\fP, - MPI_Datatype\fI *newtype\fP) - -.fi -.SH Fortran Syntax -.nf -INCLUDE 'mpif.h' -MPI_TYPE_STRUCT(\fICOUNT, ARRAY_OF_BLOCKLENGTHS, - ARRAY_OF_DISPLACEMENTS, ARRAY_OF_TYPES, - NEWTYPE, IERROR\fP) - INTEGER \fICOUNT, ARRAY_OF_BLOCKLENGTHS(*)\fP - INTEGER \fIARRAY_OF_DISPLACEMENTS(*)\fP - INTEGER \fIARRAY_OF_TYPES(*), NEWTYPE, IERROR\fP - - -.fi -.SH INPUT PARAMETERS -.ft R -.TP 1i -count -Number of blocks (integer) also number of entries in arrays -array_of_types, array_of_displacements, and array_of_blocklengths. -.TP 1i -array_of_blocklengths -Number of elements in each block (array). -.TP 1i -array_of_displacements -Byte displacement of each block (array). -.TP 1i -array_of_types -Type of elements in each block (array of handles to datatype objects). -.sp - -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -newtype -New datatype (handle). -.ft R -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -Note that use of this routine is \fIdeprecated\fP as of MPI-2. Use MPI_Type_create_struct instead. -.sp -MPI_Type_struct is the most general type constructor. It further generalizes MPI_Type_hindexed in that it allows each block to consist of replications of different datatypes. -.sp -\fBExample:\fP Let type1 have type map -.nf - - {(double, 0), (char, 8)} - -.fi -with extent 16. Let B = (2, 1, 3), D = (0, 16, 26), and T = (MPI_FLOAT, type1, MPI_CHAR). Then a call to MPI_Type_struct(3, B, D, T, newtype) returns a datatype with type map -.nf - - {(float, 0), (float,4), (double, 16), (char, 24), - (char, 26), (char, 27), (char, 28)} - -.fi -That is, two copies of MPI_FLOAT starting at 0, followed by one copy of type1 starting at 16, followed by three copies of MPI_CHAR, starting at 26. (We assume that a float occupies 4 bytes.) -.sp -For more information, see section 3.12.1 of the MPI-1.1 Standard. - -.SH NOTES -If an upperbound is set explicitly by using the MPI datatype MPI_UB, the corresponding index must be positive. -.sp -The MPI-1 Standard originally made vague statements about padding and alignment; this was intended to allow the simple definition of structures that could be sent with a count greater than one. For example, -.nf - struct {int a; char b;} foo; -.fi -may have -.nf - sizeof(foo) = sizeof(int) + sizeof(char); -.fi -defining the extent of a datatype as including an epsilon, which would have allowed an implementation to make the extent an MPI datatype for this structure equal to 2*sizeof(int). However, since different systems might define different paddings, a clarification to the standard made epsilon zero. Thus, if you define a structure datatype and wish to send or receive multiple items, you should explicitly include an MPI_UB entry as the last member of the structure. For example, the following code can be used for the structure foo: -.nf - - blen[0] = 1; indices[0] = 0; oldtypes[0] = MPI_INT; - blen[1] = 1; indices[1] = &foo.b - &foo; oldtypes[1] = MPI_CHAR; - blen[2] = 1; indices[2] = sizeof(foo); oldtypes[2] = MPI_UB; - MPI_Type_struct( 3, blen, indices, oldtypes, &newtype ); - -.fi - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for I/O function errors. The error handler may be changed with MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN may be used to cause error values to be returned. Note that MPI does not guarantee that an MPI program can continue past an error. - -.SH SEE ALSO -.ft R -.sp -MPI_Type_create_struct -.br -MPI_Type_create_hindexed -.br - diff --git a/ompi/mpi/man/man3/MPI_Type_ub.3in b/ompi/mpi/man/man3/MPI_Type_ub.3in deleted file mode 100644 index aad0d6b227a..00000000000 --- a/ompi/mpi/man/man3/MPI_Type_ub.3in +++ /dev/null @@ -1,94 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Type_ub 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Type_ub\fP \- Returns the upper bound of a datatype -- use of this routine is deprecated. - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -int MPI_Type_ub(MPI_Datatype \fIdatatype\fP, MPI_Aint\fI *displacement\fP) - -.fi -.SH Fortran Syntax -.nf -INCLUDE 'mpif.h' -MPI_TYPE_UB(\fIDATATYPE, DISPLACEMENT, IERROR\fP) - INTEGER \fIDATATYPE, DISPLACEMENT, IERROR\fP - - -.fi -.SH INPUT PARAMETER -.ft R -.TP 1i -datatype -Datatype (handle). -.sp - -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -displacement -Displacement of upper bound from origin, in bytes (integer). -.sp -.ft R -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -Note that use of this routine is \fIdeprecated\fP as of MPI-2. Please use MPI_Type_get_extent instead. -.sp -MPI_Type_ub returns the upper bound of a data type. This will differ from zero if the type was constructed using MPI_UB. The upper bound will take into account any alignment considerations. -.sp -The "pseudo-datatypes," MPI_LB and MPI_UB, can be used, respectively, to mark the upper bound (or the lower bound) of a datatype. These pseudo-datatypes occupy no space (extent (MPI_LB) = extent (MPI_UB) =0. They do not affect the size or count of a datatype, and do not affect the context of a message created with this datatype. However, they do affect the definition of the extent of a datatype and, therefore, affect the outcome of a replication of this datatype by a datatype constructor. -.sp -In general, if -.nf - - Typemap = {(type(0), disp(0)), ..., (type(n-1), disp(n-1))} - -.fi -then the lower bound of Typemap is defined to be -.nf - - (min(j) disp(j) if no entry has - lb(Typemap) = ( basic type lb - (min(j) {disp(j) such that type(j) = lb} otherwise - -.fi -Similarly, the upper bound of Typemap is defined to be -.nf - - (max(j) disp(j) + sizeof(type(j) = lb} if no entry has - ub(Typemap) = ( basic type ub - (max(j) {disp(j) such that type(j) = ub} otherwise - -.fi -Then -.nf - - extent(Typemap) = ub(Typemap) - lb(Typemap) - -.fi -If type(i) requires alignment to a byte address that is a multiple of k(i), then e is the least nonnegative increment needed to round extent(Typemap) to the next multiple of max(i) k(i). - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for I/O function errors. The error handler may be changed with MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN may be used to cause error values to be returned. Note that MPI does not guarantee that an MPI program can continue past an error. - -.SH SEE ALSO -.ft R -.sp -MPI_Type_get_extent -.br - diff --git a/ompi/mpi/man/man3/MPI_Type_vector.3in b/ompi/mpi/man/man3/MPI_Type_vector.3in deleted file mode 100644 index 85d8533833f..00000000000 --- a/ompi/mpi/man/man3/MPI_Type_vector.3in +++ /dev/null @@ -1,126 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Type_vector 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Type_vector\fP \- Creates a vector (strided) datatype. - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -int MPI_Type_vector(int \fIcount\fP, int\fI blocklength\fP, int\fI stride\fP, - MPI_Datatype\fI oldtype\fP, MPI_Datatype\fI *newtype\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_TYPE_VECTOR(\fICOUNT, BLOCKLENGTH, STRIDE, OLDTYPE, NEWTYPE, - IERROR\fP) - INTEGER \fICOUNT, BLOCKLENGTH, STRIDE, OLDTYPE\fP - INTEGER \fINEWTYPE, IERROR\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Type_vector(\fIcount\fP, \fIblocklength\fP, \fIstride\fP, \fIoldtype\fP, \fInewtype\fP, \fIierror\fP) - INTEGER, INTENT(IN) :: \fIcount\fP, \fIblocklength\fP, \fIstride\fP - TYPE(MPI_Datatype), INTENT(IN) :: \fIoldtype\fP - TYPE(MPI_Datatype), INTENT(OUT) :: \fInewtype\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETERS -.ft R -.TP 1i -count -Number of blocks (nonnegative integer). -.TP 1i -blocklength -Number of elements in each block (nonnegative integer). -.TP 1i -stride -Number of elements between start of each block (integer). -.TP 1i -oldtype -Old datatype (handle). -.sp - -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -newtype -New datatype (handle). -.sp -.ft R -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -The function MPI_Type_vector is a general constructor that allows replication of a datatype into locations that consist of equally spaced blocks. Each block is obtained by concatenating the same number of copies of the old datatype. The spacing between blocks is a multiple of the extent of the old datatype. -.sp -\fBExample 1:\fP Assume, again, that oldtype has type map {(double, 0), (char, 8)}, with extent 16. A call to MPI_Type_vector(2, 3, 4, oldtype, newtype) will create the datatype with type map -.nf - {(double, 0), (char, 8), (double, 16), (char, 24), - (double, 32), (char, 40), - (double, 64), (char, 72), - (double, 80), (char, 88), (double, 96), (char, 104)} -.fi -.sp -That is, two blocks with three copies each of the old type, with a stride of 4 elements (4 x 16 bytes) between the blocks. -.sp -\fBExample 2:\fP A call to MPI_Type_vector(3, 1, -2, oldtype, newtype) will create the datatype -.nf - - {(double, 0), (char, 8), (double, -32), (char, -24), - (double, -64), (char, -56)} - -.fi -In general, assume that oldtype has type map -.nf - - {(type(0), disp(0)), ..., (type(n-1), disp(n-1))}, - -.fi -with extent ex. Let bl be the blocklength. The newly created datatype has a type map with count x bl x n entries: -.nf - - {(type(0), disp(0)), ..., (type(n-1), disp(n-1)), - (type(0), disp(0) + ex), ..., (type(n-1), disp(n-1) + ex), ..., - (type(0), disp(0) + (bl -1) * ex),..., - (type(n-1), disp(n-1) + (bl -1)* ex), - (type(0), disp(0) + stride * ex),..., (type(n-1), - disp(n-1) + stride * ex), ..., - (type(0), disp(0) + (stride + bl - 1) * ex), ..., - (type(n-1), disp(n-1) + (stride + bl -1) * ex), ..., - (type(0), disp(0) + stride * (count -1) * ex), ..., - (type(n-1), disp(n-1) + stride * (count -1) * ex), ..., - (type(0), disp(0) + (stride * (count -1) + bl -1) * ex), ..., - (type(n-1), disp(n-1) + (stride * (count -1) + bl -1) * ex)} - -.fi -A call to MPI_Type_contiguous(count, oldtype, newtype) is equivalent to a call to MPI_Type_vector(count, 1, 1, oldtype, newtype), or to a call to MPI_Type_vector(1, count, n, oldtype, newtype), n arbitrary. - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for I/O function errors. The error handler may be changed with MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN may be used to cause error values to be returned. Note that MPI does not guarantee that an MPI program can continue past an error. - -.SH SEE ALSO -.ft R -.sp -MPI_Type_create_hvector -.br -MPI_Type_hvector -.br - diff --git a/ompi/mpi/man/man3/MPI_Unpack.3in b/ompi/mpi/man/man3/MPI_Unpack.3in deleted file mode 100644 index 33ca9e85fb9..00000000000 --- a/ompi/mpi/man/man3/MPI_Unpack.3in +++ /dev/null @@ -1,111 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2013 Los Alamos National Security, LLC. All rights reserved. -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Unpack 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Unpack\fP \- Unpacks a datatype into contiguous memory. - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -int MPI_Unpack(const void *\fIinbuf\fP, int\fI insize\fP, int\fI *position\fP, - void\fI *outbuf\fP, int\fI outcount\fP, MPI_Datatype\fI datatype\fP, - MPI_Comm\fI comm\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_UNPACK(\fIINBUF, INSIZE, POSITION, OUTBUF, OUTCOUNT, - DATATYPE, COMM, IERROR\fP) - \fIINBUF(*), OUTBUF(*)\fP - INTEGER \fIINSIZE, POSITION, OUTCOUNT, DATATYPE, - COMM, IERROR\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Unpack(\fIinbuf\fP, \fIinsize\fP, \fIposition\fP, \fIoutbuf\fP, \fIoutcount\fP, \fIdatatype\fP, \fIcomm\fP, - \fIierror\fP) - TYPE(*), DIMENSION(..), INTENT(IN) :: \fIinbuf\fP - TYPE(*), DIMENSION(..) :: \fIoutbuf\fP - INTEGER, INTENT(IN) :: \fIinsize\fP, \fIoutcount\fP - INTEGER, INTENT(INOUT) :: \fIposition\fP - TYPE(MPI_Datatype), INTENT(IN) :: \fIdatatype\fP - TYPE(MPI_Comm), INTENT(IN) :: \fIcomm\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETERS -.ft R -.TP 1i -inbuf -Input buffer start (choice). -.TP 1i -insize -Size of input buffer, in bytes (integer). -.TP 1i -outcount -Number of items to be unpacked (integer). -.TP 1i -datatype -Datatype of each output data item (handle). -.TP 1i -comm -Communicator for packed message (handle). -.sp -.SH INPUT/OUTPUT PARAMETER -.ft R -.TP 1i -position -Current position in bytes (integer). - -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -outbuf -Output buffer start (choice). -.ft R -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -Unpacks a message into the receive buffer specified by outbuf, outcount, datatype from the buffer space specified by inbuf and insize. The output buffer can be any communication buffer allowed in MPI_Recv. The input buffer is a contiguous storage area containing insize bytes, starting at address inbuf. The input value of position is the first location in the input buffer occupied by the packed message. \fIposition\fP is incremented by the size of the packed message, so that the output value of position is the first location in the input buffer after the locations occupied by the message that was unpacked. \fIcomm\fP is the communicator used to receive the packed message. - -.SH NOTES -Note the difference between MPI_Recv and MPI_Unpack: In MPI_Recv, the \fIcount\fP argument specifies the maximum number of items that can be received. The actual number of items received is determined by the length of the incoming message. In MPI_Unpack, the count argument specifies the actual number of items that are to be unpacked; the "size" of the corresponding message is the increment in position. The reason for this change is that the "incoming message size" is not predetermined since the user decides how much to unpack; nor is it easy to determine the "message size" from the number of items to be unpacked. -.sp -To understand the behavior of pack and unpack, it is convenient to think of the data part of a message as being the sequence obtained by concatenating the successive values sent in that message. The pack operation stores this sequence in the buffer space, as if sending the message to that buffer. The unpack operation retrieves this sequence from buffer space, as if receiving a message from that buffer. (It is helpful to think of internal Fortran files or sscanf in C for a similar function.) -.sp -Several messages can be successively packed into one packing unit. This is effected by several successive related calls to MPI_Pack, where the first call provides position = 0, and each successive call inputs the value of position that was output by the previous call, and the same values for outbuf, outcount, and comm. This packing unit now contains the equivalent information that would have been stored in a message by one send call with a send buffer that is the "concatenation" of the individual send buffers. -.sp -A packing unit can be sent using type MPI_Packed. Any point-to-point or collective communication function can be used to move the sequence of bytes that forms the packing unit from one process to another. This packing unit can now be received using any receive operation, with any datatype: The type-matching rules are relaxed for messages sent with type MPI_Packed. -.sp -A message sent with any type (including MPI_Packed) can be received using the type MPI_Packed. Such a message can then be unpacked by calls to MPI_Unpack. -.sp -A packing unit (or a message created by a regular, "typed" send) can be unpacked into several successive messages. This is effected by several successive related calls to MPI_Unpack, where the first call provides position = 0, and each successive call inputs the value of position that was output by the previous call, and the same values for inbuf, insize, and comm. -.sp -The concatenation of two packing units is not necessarily a packing unit; nor is a substring of a packing unit necessarily a packing unit. Thus, one cannot concatenate two packing units and then unpack the result as one packing unit; nor can one unpack a substring of a packing unit as a separate packing unit. Each packing unit that was created by a related sequence of pack calls or by a regular send must be unpacked as a unit, by a sequence of related unpack calls. - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for I/O function errors. The error handler may be changed with MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN may be used to cause error values to be returned. Note that MPI does not guarantee that an MPI program can continue past an error. - -.SH SEE ALSO -.ft R -MPI_Pack -.br -MPI_Pack_size - diff --git a/ompi/mpi/man/man3/MPI_Unpack_external.3in b/ompi/mpi/man/man3/MPI_Unpack_external.3in deleted file mode 100644 index 2697ea3e415..00000000000 --- a/ompi/mpi/man/man3/MPI_Unpack_external.3in +++ /dev/null @@ -1,182 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2013 Los Alamos National Security, LLC. All rights reserved. -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Unpack_external 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" - -.SH NAME -\fBMPI_Unpack_external\fP \- Reads data from a portable format - -.SH SYNTAX -.ft R - -.SH C Syntax -.nf -#include -int MPI_Unpack_external(const char \fIdatarep\fP[], const void *\fIinbuf\fP, - MPI_Aint \fIinsize\fP, MPI_Aint *\fIposition\fP, - void *\fIoutbuf\fP, int \fIoutcount\fP, - MPI_Datatype \fIdatatype\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_UNPACK_EXTERNAL(\fIDATAREP, INBUF, INSIZE, POSITION, - OUTBUF, OUTCOUNT, DATATYPE, IERROR\fP) - - INTEGER \fIOUTCOUNT, DATATYPE, IERROR\fP - INTEGER(KIND=MPI_ADDRESS_KIND) \fIINSIZE, POSITION\fP - CHARACTER*(*) \fIDATAREP\fP - \fIINBUF(*), OUTBUF(*)\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Unpack_external(\fIdatarep\fP, \fIinbuf\fP, \fIinsize\fP, \fIposition\fP, \fIoutbuf\fP, \fIoutcount\fP, - \fIdatatype\fP, \fIierror\fP) - CHARACTER(LEN=*), INTENT(IN) :: \fIdatarep\fP - TYPE(*), DIMENSION(..), INTENT(IN) :: \fIinbuf\fP - TYPE(*), DIMENSION(..) :: \fIoutbuf\fP - INTEGER(KIND=MPI_ADDRESS_KIND), INTENT(IN) :: \fIinsize\fP - INTEGER(KIND=MPI_ADDRESS_KIND), INTENT(INOUT) :: \fIposition\fP - INTEGER, INTENT(IN) :: \fIoutcount\fP - TYPE(MPI_Datatype), INTENT(IN) :: \fIdatatype\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETERS -.ft R -.TP 1i -datarep -Data Representation (string). -.ft R -.TP 1i -inbuf -Input buffer start (choice). -.TP 1i -insize -Size of input buffer, in bytes (integer). -.TP 1i -outcount -Number of items to be unpacked (integer). -.TP 1i -datatype -Datatype of each output data item (handle). - -.SH INPUT/OUTPUT PARAMETER -.ft R -.TP 1i -position -Current position in buffer, in bytes (integer). - -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -outbuf -Output buffer start (choice). -.ft R -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -MPI_Unpack_external unpacks data from the external32 format, a -universal data representation defined by the MPI Forum. This format is -useful for exchanging data between MPI implementations, or when -writing data to a file. -.sp -The input buffer is a contiguous storage area pointed to by -\fIinbuf\fP containing \fIinsize\fP bytes. The output buffer can be -any communication buffer allowed in MPI_Recv, and is specified by -\fIoutbuf\fP, \fIoutcount\fP, and \fIdatatype\fP. -.sp -The input value of \fIposition\fP is the first position in \fIinbuf\fP -to be read for unpacking (measured in bytes, not elements, relative to -the start of the buffer). When the function returns, \fIposition\fP is -incremented by the size of the packed message, so that it points to -the first location in \fIinbuf\fP following the message that was -unpacked. This way it may be used as input to a subsequent call to -MPI_Unpack_external. - -.SH NOTES -.ft R -Note the difference between MPI_Recv and MPI_Unpack_external: In -MPI_Recv, the \fIcount\fP argument specifies the maximum number of -items that can be received. In MPI_Unpack_external, the \fIoutcount\fP -argument specifies the actual number of items that are to be -unpacked. With a regular receive operation, the incoming message size -determines the number of components that will be received. With -MPI_Unpack_external, it is up to the user to specify how many -components to unpack, since the user may wish to unpack the received -message multiple times into various buffers. -.sp -To understand the behavior of pack and unpack, it is convenient to -think of the data part of a message as being the sequence obtained by -concatenating the successive values sent in that message. The pack -operation stores this sequence in the buffer space, as if sending the -message to that buffer. The unpack operation retrieves this sequence -from buffer space, as if receiving a message from that buffer. (It is -helpful to think of internal Fortran files or sscanf in C for a -similar function.) -.sp -Several messages can be successively packed into one packing -unit. This is effected by several successive related calls to -MPI_Pack_external, where the first call provides \fIposition\fP=0, -and each successive call inputs the value of \fIposition\fP that was -output by the previous call, along with the same values for -\fIoutbuf\fP and \fIoutcount\fP. This packing unit now contains the -equivalent information that would have been stored in a message by one -send call with a send buffer that is the "concatenation" of the -individual send buffers. -.sp -A packing unit can be sent using type MPI_BYTE. Any point-to-point -or collective communication function can be used to move the sequence -of bytes that forms the packing unit from one process to another. This -packing unit can now be received using any receive operation, with any -datatype: The type-matching rules are relaxed for messages sent with -type MPI_BYTE. -.sp -A packing unit can be unpacked into several successive messages. This -is effected by several successive related calls to -MPI_Unpack_external, where the first call provides \fIposition\fP=0, -and each successive call inputs the value of position that was output -by the previous call, and the same values for \fIinbuf\fP and -\fIinsize\fP. -.sp -The concatenation of two packing units is not necessarily a packing -unit; nor is a substring of a packing unit necessarily a packing -unit. Thus, one cannot concatenate two packing units and then unpack -the result as one packing unit; nor can one unpack a substring of a -packing unit as a separate packing unit. Each packing unit that was -created by a related sequence of pack calls must be unpacked as a unit -by a sequence of related unpack calls. - -.SH ERRORS -.ft R -Almost all MPI routines return an error value; C routines as -the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for -I/O function errors. The error handler may be changed with -MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN -may be used to cause error values to be returned. Note that MPI does not -guarantee that an MPI program can continue past an error. -.sp -See the MPI man page for a full list of MPI error codes. - -.SH SEE ALSO -.ft R -.nf -MPI_Pack_external -MPI_Pack_external_size -MPI_Recv -sscanf(3C) - diff --git a/ompi/mpi/man/man3/MPI_Unpublish_name.3in b/ompi/mpi/man/man3/MPI_Unpublish_name.3in deleted file mode 100644 index ca70aba504c..00000000000 --- a/ompi/mpi/man/man3/MPI_Unpublish_name.3in +++ /dev/null @@ -1,131 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2013 Los Alamos National Security, LLC. All rights reserved. -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2007-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Unpublish_name 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" - -.SH NAME -.nf -\fBMPI_Unpublish_name\fP \- Unpublishes a service name - -.fi -.SH SYNTAX -.ft R - -.SH C Syntax -.nf -#include -int MPI_Unpublish_name(const char *\fIservice_name\fP, MPI_Info \fIinfo\fP, - const char *\fIport_name\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_UNPUBLISH_NAME(\fISERVICE_NAME, INFO, PORT_NAME, IERROR\fP) - CHARACTER*(*) \fISERVICE_NAME, PORT_NAME\fP - INTEGER \fIINFO, IERROR\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Unpublish_name(\fIservice_name\fP, \fIinfo\fP, \fIport_name\fP, \fIierror\fP) - CHARACTER(LEN=*), INTENT(IN) :: \fIservice_name\fP, \fIport_name\fP - TYPE(MPI_Info), INTENT(IN) :: \fIinfo\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETERS -.ft R -.TP 1.4i -service_name -A service name (string). -.TP 1.4i -info -Options to the name service functions (handle). -.ft R -.TP 1.4i -port_name -A port name (string). - -.SH OUTPUT PARAMETER -.TP 1.4i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -This routine removes the pair (\fIservice_name, port_name\fP) so that -applications may no longer retrieve \fIport_name\fP by calling -MPI_Lookup_name. It is an error to unpublish a \fIservice_name\fP -that was not published via MPI_Publish_name. Both the \fIservice_name\fP -and \fIport_name\fP arguments to MPI_Unpublish_name must be identical -to the arguments to the previous call to MPI_Publish_name. - -.SH INFO ARGUMENTS -The following keys for \fIinfo\fP are recognized: -.sp -.sp -.nf -Key Type Description ---- ---- ----------- - -ompi_global_scope bool If set to true, unpublish the name from - the global scope. Unpublish from the local - scope otherwise. See the NAME SCOPE - section for more details. - -.fi - -.sp -\fIbool\fP info keys are actually strings but are evaluated as -follows: if the string value is a number, it is converted to an -integer and cast to a boolean (meaning that zero integers are false -and non-zero values are true). If the string value is -(case-insensitive) "yes" or "true", the boolean is true. If the -string value is (case-insensitive) "no" or "false", the boolean is -false. All other string values are unrecognized, and therefore false. -.PP -If no info key is provided, the function will first check to see if a -global server has been specified and is available. If so, then the -unpublish function will default to global scope first, followed by local. Otherwise, -the data will default to unpublish with local scope. - -.SH NAME SCOPE -Open MPI supports two name scopes: \fIglobal\fP and \fIlocal\fP. Local scope -values are placed in a data store located on the mpirun of the calling -process' job, while global scope values reside on a central server. Calls -to MPI_Unpublish_name must correctly specify the scope to be used in -finding the value to be removed. The function will return an error if -the specified service name is not found on the indicated location. -.sp -For a more detailed description of scoping rules, please see the MPI_Publish_name -man page. - -.SH ERRORS -.ft R -Almost all MPI routines return an error value; C routines as -the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for -I/O function errors. The error handler may be changed with -MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN -may be used to cause error values to be returned. Note that MPI does not -guarantee that an MPI program can continue past an error. -.sp -See the MPI man page for a full list of MPI error codes. - -.SH SEE ALSO -.ft R -.nf -MPI_Publish_name -MPI_Lookup_name -MPI_Open_port - - diff --git a/ompi/mpi/man/man3/MPI_Wait.3in b/ompi/mpi/man/man3/MPI_Wait.3in deleted file mode 100644 index 0d1434e00ff..00000000000 --- a/ompi/mpi/man/man3/MPI_Wait.3in +++ /dev/null @@ -1,122 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2007-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Wait 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Wait\fP \- Waits for an MPI send or receive to complete. - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -int MPI_Wait(MPI_Request *\fIrequest\fP, MPI_Status\fI *status\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_WAIT(\fIREQUEST, STATUS, IERROR\fP) - INTEGER \fIREQUEST, STATUS(MPI_STATUS_SIZE), IERROR\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Wait(\fIrequest\fP, \fIstatus\fP, \fIierror\fP) - TYPE(MPI_Request), INTENT(INOUT) :: \fIrequest\fP - TYPE(MPI_Status) :: \fIstatus\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETER -.ft R -.TP 1i -request -Request (handle). -.sp -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -status -Status object (status). -.ft R -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -A call to MPI_Wait returns when the operation identified by request is complete. If the communication object associated with this request was created by a nonblocking send or receive call, then the object is deallocated by the call to MPI_Wait and the request handle is set to MPI_REQUEST_NULL. -.sp -The call returns, in status, information on the completed operation. The content of the status object for a receive operation can be accessed as described in Section 3.2.5 of the MPI-1 Standard, "Return Status." The status object for a send operation may be queried by a call to MPI_Test_cancelled (see Section 3.8 of the MPI-1 Standard, "Probe and Cancel"). -.sp -If your application does not need to examine the \fIstatus\fP field, you can save resources by using the predefined constant MPI_STATUS_IGNORE as a special value for the \fIstatus\fP argument. -.sp -One is allowed to call MPI_Wait with a null or inactive request argument. In this case the operation returns immediately with empty status. - -.SH NOTES -Successful return of MPI_Wait after an MPI_Ibsend implies that the user send buffer can be reused i.e., data has been sent out or copied into a buffer attached with MPI_Buffer_attach. Note that, at this point, we can no longer cancel the send (for more information, see Section 3.8 of the MPI-1 Standard, "Probe and Cancel"). If a matching receive is never posted, then the buffer cannot be freed. This runs somewhat counter to the stated goal of MPI_Cancel (always being able to free program space that was committed to the communication subsystem). -.sp -Example: Simple usage of nonblocking operations and MPI_Wait. -.sp -.nf - CALL MPI_COMM_RANK(comm, rank, ierr) - IF(rank.EQ.0) THEN - CALL MPI_ISEND(a(1), 10, MPI_REAL, 1, tag, comm, request, ierr) - **** do some computation **** - CALL MPI_WAIT(request, status, ierr) - ELSE - CALL MPI_IRECV(a(1), 15, MPI_REAL, 0, tag, comm, request, ierr) - **** do some computation **** - CALL MPI_WAIT(request, status, ierr) - END IF - -.fi -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for -I/O function errors. The error handler may be changed with -MPI_Comm_set_errhandler, MPI_File_set_errhandler, or -MPI_Win_set_errhandler (depending on the type of MPI handle that -generated the request); the predefined error handler MPI_ERRORS_RETURN -may be used to cause error values to be returned. Note that MPI does -not guarantee that an MPI program can continue past an error. -.sp -Note that per MPI-1 section 3.2.5, MPI errors on requests passed -to MPI_WAIT do not set the status.MPI_ERROR field in the returned -status. The error code is passed to the back-end error handler -and may be passed back to the caller through the return value of -MPI_WAIT if the back-end error handler returns it. The -pre-defined MPI error handler MPI_ERRORS_RETURN exhibits this -behavior, for example. - -.SH SEE ALSO -.ft R -.sp -MPI_Comm_set_errhandler -.br -MPI_File_set_errhandler -.br -MPI_Test -.br -MPI_Testall -.br -MPI_Testany -.br -MPI_Testsome -.br -MPI_Waitall -.br -MPI_Waitany -.br -MPI_Waitsome -.br -MPI_Win_set_errhandler -.br - diff --git a/ompi/mpi/man/man3/MPI_Waitall.3in b/ompi/mpi/man/man3/MPI_Waitall.3in deleted file mode 100644 index bb0bcd35eb5..00000000000 --- a/ompi/mpi/man/man3/MPI_Waitall.3in +++ /dev/null @@ -1,112 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2011 Cisco Systems, Inc. All rights reserved. -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Waitall 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Waitall\fP \- Waits for all given communications to complete. - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -int MPI_Waitall(int \fIcount\fP, MPI_Request\fI array_of_requests[]\fP, - MPI_Status \fI*array_of_statuses\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_WAITALL(\fICOUNT, ARRAY_OF_REQUESTS, ARRAY_OF_STATUSES, IERROR\fP) - INTEGER \fICOUNT, ARRAY_OF_REQUESTS(*)\fP - INTEGER \fIARRAY_OF_STATUSES(MPI_STATUS_SIZE,*), IERROR\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Waitall(\fIcount\fP, \fIarray_of_requests\fP, \fIarray_of_statuses\fP, \fIierror\fP) - INTEGER, INTENT(IN) :: \fIcount\fP - TYPE(MPI_Request), INTENT(INOUT) :: \fIarray_of_requests(count)\fP - TYPE(MPI_Status) :: \fIarray_of_statuses(*)\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETERS -.ft R -.TP 1i -count -Lists length (integer). -.TP 1i -array_of_requests -Array of requests (array of handles). -.sp -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -array_of_statuses -Array of status objects (array of status). -.ft R -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -Blocks until all communication operations associated with active handles in the list complete, and returns the status of all these operations (this includes the case where no handle in the list is active). Both arrays have the same number of valid entries. The ith entry in array_of_statuses is set to the return status of the ith operation. Requests that were created by nonblocking communication operations are deallocated, and the corresponding handles in the array are set to MPI_REQUEST_NULL. The list may contain null or inactive handles. The call sets to empty the status of each such entry. -.sp -The error-free execution of MPI_Waitall(count, array_of_requests, array_of_statuses) has the same effect as the execution of MPI_Wait(&array_of_request[i], &array_of_statuses[i]), for i=0,...,count-1, in some arbitrary order. MPI_Waitall with an array of length 1 is equivalent to MPI_Wait. -.sp -When one or more of the communications completed by a call to MPI_Waitall fail, it is desirable to return specific information on each communication. The function MPI_Waitall will return in such case the error code MPI_ERR_IN_STATUS and will set the error field of each status to a specific error code. This code will be MPI_SUCCESS if the specific communication completed; it will be another specific error code if it failed; or it can be MPI_ERR_PENDING if it has neither failed nor completed. The function MPI_Waitall will return MPI_SUCCESS if no request had an error, or will return another error code if it failed for other reasons (such as invalid arguments). In such cases, it will not update the error fields of the statuses. -.sp -If your application does not need to examine the \fIarray_of_statuses\fP field, you can save resources by using the predefined constant MPI_STATUSES_IGNORE can be used as a special value for the \fIarray_of_statuses\fP argument. - -.SH ERRORS -For each invocation of MPI_Waitall, if one or more requests generate -an MPI error, only the \fIfirst\fP MPI request that caused an -error will be passed to its corresponding error handler. No other -error handlers will be invoked (even if multiple requests generated -errors). However, \fIall\fP requests that generate an error -will have a relevant error code set in the corresponding -status.MPI_ERROR field (unless MPI_STATUSES_IGNORE was used). -.sp -The default error handler aborts the MPI job, except for I/O function -errors. The error handler may be changed with MPI_Comm_set_errhandler, -MPI_File_set_errhandler, or MPI_Win_set_errhandler (depending on the -type of MPI handle that generated the MPI request); the predefined -error handler MPI_ERRORS_RETURN may be used to cause error values to -be returned. Note that MPI does not guarantee that an MPI program can -continue past an error. -.sp -If the invoked error handler allows MPI_Waitall to return to the -caller, the value MPI_ERR_IN_STATUS will be returned in the C and -Fortran bindings. - -.SH SEE ALSO -.ft R -.sp -MPI_Comm_set_errhandler -.br -MPI_File_set_errhandler -.br -MPI_Test -.br -MPI_Testall -.br -MPI_Testany -.br -MPI_Testsome -.br -MPI_Wait -.br -MPI_Waitany -.br -MPI_Waitsome -.br -MPI_Win_set_errhandler -.br - diff --git a/ompi/mpi/man/man3/MPI_Waitany.3in b/ompi/mpi/man/man3/MPI_Waitany.3in deleted file mode 100644 index f30f9988135..00000000000 --- a/ompi/mpi/man/man3/MPI_Waitany.3in +++ /dev/null @@ -1,142 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Waitany 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Waitany\fP \- Waits for any specified send or receive to complete. - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -int MPI_Waitany(int \fIcount\fP, MPI_Request\fI array_of_requests[]\fP, - int \fI*index\fP, MPI_Status\fI *status\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_WAITANY(\fICOUNT, ARRAY_OF_REQUESTS, INDEX, STATUS, IERROR\fP) - INTEGER \fICOUNT, ARRAY_OF_REQUESTS(*), INDEX\fP - INTEGER \fISTATUS(MPI_STATUS_SIZE), IERROR\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Waitany(\fIcount\fP, \fIarray_of_requests\fP, \fIindex\fP, \fIstatus\fP, \fIierror\fP) - INTEGER, INTENT(IN) :: \fIcount\fP - TYPE(MPI_Request), INTENT(INOUT) :: \fIarray_of_requests(count)\fP - INTEGER, INTENT(OUT) :: \fIindex\fP - TYPE(MPI_Status) :: \fIstatus\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETERS -.ft R -.TP 1i -count -List length (integer). -.TP 1i -array_of_requests -Array of requests (array of handles). -.sp - -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -index -Index of handle for operation that completed (integer). In the range 0 to -count-1. In Fortran, the range is 1 to count. -.TP 1i -status -Status object (status). -.sp -.ft R -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -A call to MPI_Waitany can be used to wait for the completion of one out of several requests. -.sp -The array_of_requests list may contain null or inactive handles. If the list contains no active handles (list has length zero or all entries are null or inactive), then the call returns immediately with index = MPI_UNDEFINED, and an empty status. -.sp -The execution of MPI_Waitany(count, array_of_requests, index, status) has the same effect as the execution of MPI_Wait(&array_of_requests[i], status), where i is the value returned by index (unless the value of index is MPI_UNDEFINED). MPI_Waitany with an array containing one active entry is equivalent to MPI_Wait. -.sp -If your application does not need to examine the \fIstatus\fP field, you can save resources by using the predefined constant MPI_STATUS_IGNORE as a special value for the \fIstatus\fP argument. -.sp -\fBExample:\fR Client-server code (starvation can occur). -.sp -.nf - CALL MPI_COMM_SIZE(comm, size, ierr) - CALL MPI_COMM_RANK(comm, rank, ierr) - IF(rank .GT 0) THEN ! client code - DO WHILE(.TRUE.) - CALL MPI_ISEND(a, n, MPI_REAL, 0, tag, comm, request, ierr) - CALL MPI_WAIT(request, status, ierr) - END DO - ELSE ! rank=0 -- server code - DO i=1, size-1 - CALL MPI_IRECV(a(1,i), n, MPI_REAL, i tag, - comm, request_list(i), ierr) - END DO - DO WHILE(.TRUE.) - CALL MPI_WAITANY(size-1, request_list, index, status, ierr) - CALL DO_SERVICE(a(1,index)) ! handle one message - CALL MPI_IRECV(a(1, index), n, MPI_REAL, index, tag, - comm, request_list(index), ierr) - END DO - END IF -.fi -.sp - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for -I/O function errors. The error handler may be changed with -MPI_Comm_set_errhandler, MPI_File_set_errhandler, or -MPI_Win_set_errhandler (depending on the type of MPI handle that -generated the request); the predefined error handler MPI_ERRORS_RETURN -may be used to cause error values to be returned. Note that MPI does -not guarantee that an MPI program can continue past an error. -.sp -Note that per MPI-1 section 3.2.5, MPI errors on requests passed -to MPI_WAITANY do not set the status.MPI_ERROR field in the returned -status. The error code is passed to the back-end error handler and -may be passed back to the caller through the return value of -MPI_WAITANY if the back-end error handler returns it. The pre-defined -MPI error handler MPI_ERRORS_RETURN exhibits this behavior, for -example. - -.SH SEE ALSO -.ft R -.sp -MPI_Comm_set_errhandler -.br -MPI_File_set_errhandler -.br -MPI_Test -.br -MPI_Testall -.br -MPI_Testany -.br -MPI_Testsome -.br -MPI_Wait -.br -MPI_Waitall -.br -MPI_Waitsome -.br -MPI_Win_set_errhandler -.br - diff --git a/ompi/mpi/man/man3/MPI_Waitsome.3in b/ompi/mpi/man/man3/MPI_Waitsome.3in deleted file mode 100644 index ca79b2b479c..00000000000 --- a/ompi/mpi/man/man3/MPI_Waitsome.3in +++ /dev/null @@ -1,155 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2011 Cisco Systems, Inc. All rights reserved. -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Waitsome 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Waitsome\fP \- Waits for some given communications to complete. - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -int MPI_Waitsome(int \fIincount\fP, MPI_Request \fIarray_of_requests[]\fP, - int\fI *outcount\fP, int\fI array_of_indices[]\fP, - MPI_Status \fIarray_of_statuses[]\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_WAITSOME(\fIINCOUNT, ARRAY_OF_REQUESTS, OUTCOUNT, - ARRAY_OF_INDICES, ARRAY_OF_STATUSES, IERROR\fP) - INTEGER \fIINCOUNT, ARRAY_OF_REQUESTS(*), OUTCOUNT\fP - INTEGER \fIARRAY_OF_INDICES(*)\fP - INTEGER \fIARRAY_OF_STATUSES(MPI_STATUS_SIZE*)\fP - INTEGER \fIIERROR\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Waitsome(\fIincount\fP, \fIarray_of_requests\fP, \fIoutcount\fP, \fIarray_of_indices\fP, - \fIarray_of_statuses\fP, \fIierror\fP) - INTEGER, INTENT(IN) :: \fIincount\fP - TYPE(MPI_Request), INTENT(INOUT) :: \fIarray_of_requests(incount)\fP - INTEGER, INTENT(OUT) :: \fIoutcount\fP, \fIarray_of_indices(*)\fP - TYPE(MPI_Status) :: \fIarray_of_statuses(*)\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETERS -.ft R -.TP 1i -incount -Length of array_of_requests (integer). -.TP 1i -array_of_requests -Array of requests (array of handles). - -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -outcount -Number of completed requests (integer). -.TP 1i -array_of_indices -Array of indices of operations that completed (array of integers). -.TP 1i -array_of_statuses -Array of status objects for operations that completed (array of status). -.ft R -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -Waits until at least one of the operations associated with active handles in the list have completed. Returns in outcount the number of requests from the list array_of_requests that have completed. Returns in the first outcount locations of the array array_of_indices the indices of these operations (index within the array array_of_requests; the array is indexed from 0 in C and from 1 in Fortran). Returns in the first outcount locations of the array array_of_status the status for these completed operations. If a request that completed was allocated by a nonblocking communication call, then it is deallocated, and the associated handle is set to MPI_REQUEST_NULL. -.sp -If the list contains no active handles, then the call returns immediately with outcount = MPI_UNDEFINED. -.sp -When one or more of the communications completed by MPI_Waitsome fails, then it is desirable to return specific information on each communication. The arguments outcount, array_of_indices, and array_of_statuses will be adjusted to indicate completion of all communications that have succeeded or failed. The call will return the error code MPI_ERR_IN_STATUS and the error field of each status returned will be set to indicate success or to indicate the specific error that occurred. The call will return MPI_SUCCESS if no request resulted in an error, and will return another error code if it failed for other reasons (such as invalid arguments). In such cases, it will not update the error fields of the statuses. -.sp -If your application does not need to examine the \fIarray_of_statuses\fP field, you can save resources by using the predefined constant MPI_STATUSES_IGNORE can be used as a special value for the \fIarray_of_statuses\fP argument. -.sp -\fBExample:\fR Same code as the example in the MPI_Waitany man page, but using MPI_Waitsome. -.sp -.nf - CALL MPI_COMM_SIZE(comm, size, ierr) - CALL MPI_COMM_RANK(comm, rank, ierr) - IF(rank .GT. 0) THEN ! client code - DO WHILE(.TRUE.) - CALL MPI_ISEND(a, n, MPI_REAL, 0, tag, comm, request, ierr) - CALL MPI_WAIT(request, status, ierr) - END DO - ELSE ! rank=0 -- server code - DO i=1, size-1 - CALL MPI_IRECV(a(1,i), n, MPI_REAL, i, tag, - comm, requests(i), ierr) - END DO - DO WHILE(.TRUE.) - CALL MPI_WAITSOME(size, request_list, numdone, - indices, statuses, ierr) - DO i=1, numdone - CALL DO_SERVICE(a(1, indices(i))) - CALL MPI_IRECV(a(1, indices(i)), n, MPI_REAL, 0, tag, - comm, requests(indices(i)), ierr) - END DO - END DO - END IF -.fi -.sp -.SH NOTES -.ft R -The array of indices are in the range 0 to incount-1 for C and in the range 1 to incount for Fortran. - -.SH ERRORS -For each invocation of MPI_Waitsome, if one or more requests generate -an MPI error, only the \fIfirst\fP MPI request that caused an -error will be passed to its corresponding error handler. No other -error handlers will be invoked (even if multiple requests generated -errors). However, \fIall\fP requests that generate an error -will have a relevant error code set in the corresponding -status.MPI_ERROR field (unless MPI_STATUSES_IGNORE was used). -.sp -The default error handler aborts the MPI job, except for I/O function -errors. The error handler may be changed with MPI_Comm_set_errhandler, -MPI_File_set_errhandler, or MPI_Win_set_errhandler (depending on the -type of MPI handle that generated the MPI request); the predefined -error handler MPI_ERRORS_RETURN may be used to cause error values to -be returned. Note that MPI does not guarantee that an MPI program can -continue past an error. -.sp -If the invoked error handler allows MPI_Waitsome to return to the -caller, the value MPI_ERR_IN_STATUS will be returned in the C and -Fortran bindings. - -.SH SEE ALSO -.ft R -.sp -MPI_Comm_set_errhandler -.br -MPI_File_set_errhandler -.br -MPI_Test -.br -MPI_Testall -.br -MPI_Testany -.br -MPI_Testsome -.br -MPI_Wait -.br -MPI_Waitall -.br -MPI_Waitany -.br -MPI_Win_set_errhandler -.br - diff --git a/ompi/mpi/man/man3/MPI_Win_allocate.3in b/ompi/mpi/man/man3/MPI_Win_allocate.3in deleted file mode 100644 index 4be5e51e94c..00000000000 --- a/ompi/mpi/man/man3/MPI_Win_allocate.3in +++ /dev/null @@ -1,128 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2015 Los Alamos National Security, LLC. All rights reserved. -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2007-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" $COPYRIGHT$ -.TH MPI_Win_allocate 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Win_allocate\fP \- One-sided MPI call that allocates memory and -returns a window object for RMA operations. - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -int MPI_Win_allocate (MPI_Aint \fIsize\fP, int \fIdisp_unit\fP, MPI_Info \fIinfo\fP, - MPI_Comm \fIcomm\fP, void *\fIbaseptr\fP, MPI_Win *\fIwin\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_WIN_ALLOCATE(\fISIZE, DISP_UNIT, INFO, COMM, BASEPTR, WIN, IERROR\fP) - INTEGER(KIND=MPI_ADDRESS_KIND) \fISIZE, BASEPTR\fP - INTEGER \fIDISP_UNIT, INFO, COMM, WIN, IERROR\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Win_allocate(\fIsize\fP, \fIdisp_unit\fP, \fIinfo\fP, \fIcomm\fP, \fIbaseptr\fP, \fIwin\fP, \fIierror\fP) - USE, INTRINSIC :: ISO_C_BINDING, ONLY : C_PTR - INTEGER(KIND=MPI_ADDRESS_KIND), INTENT(IN) :: \fIsize\fP - INTEGER, INTENT(IN) :: \fIdisp_unit\fP - TYPE(MPI_Info), INTENT(IN) :: \fIinfo\fP - TYPE(MPI_Comm), INTENT(IN) :: \fIcomm\fP - TYPE(C_PTR), INTENT(OUT) :: \fIbaseptr\fP - TYPE(MPI_Win), INTENT(OUT) :: \fIwin\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETERS -.ft R -.TP 1i -size -Size of window in bytes (nonnegative integer). -.TP 1i -disp_unit -Local unit size for displacements, in bytes (positive integer). -.TP 1i -info -Info argument (handle). -.TP 1i -comm -Communicator (handle). - -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -baseptr -Initial address of window. -.TP 1i -win -Window object returned by the call (handle). -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -\fBMPI_Win_allocate\fP is a collective call executed by all processes -in the group of \fIcomm\fP. On each process, it allocates memory of at -least \fIsize\fP bytes, returns a pointer to it, and returns a window -object that can be used by all processes in \fIcomm\fP to perform RMA -operations. The returned memory consists of \fIsize\fP bytes local to -each process, starting at address \fIbaseptr\fP and is associated with -the window as if the user called \fBMPI_Win_create\fP on existing -memory. The \fIsize\fP argument may be different at each process and -\fIsize\fP = 0 is valid; however, a library might allocate and expose -more memory in order to create a fast, globally symmetric -allocation. The discussion of and rationales for \fBMPI_Alloc_mem\fP and -\fBMPI_Free_mem\fP in MPI-3.1 \[char167] 8.2 also apply to -\fBMPI_Win_allocate\fP; in particular, see the rationale in MPI-3.1 -\[char167] 8.2 for an explanation of the type used for \fIbaseptr\fP. -.sp -The displacement unit argument is provided to facilitate address -arithmetic in RMA operations: the target displacement argument of an -RMA operation is scaled by the factor \fIdisp_unit\fP specified by the -target process, at window creation. -.sp -For supported info keys see \fBMPI_Win_create\fI. -.sp - -.SH NOTES -Common choices for \fIdisp_unit\fP are 1 (no scaling), and (in C -syntax) \fIsizeof(type)\fP, for a window that consists of an array of -elements of type \fItype\fP. The later choice will allow one to use -array indices in RMA calls, and have those scaled correctly to byte -displacements, even in a heterogeneous environment. -.sp -Calling \fBMPI_Win_free\fP will deallocate the memory allocated by \fBMPI_Win_allocate\fP. It is thus erroneous to manually free \fIbaseptr\fP. - -.SH C NOTES -.ft R -While \fIbaseptr\fP is a \fIvoid *\fP type, this is to allow easy use of any pointer object for this parameter. This argument is really a \fIvoid **\fP type. -.sp - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for -I/O function errors. The error handler may be changed with -MPI_Comm_set_errhandler; the predefined error handler -MPI_ERRORS_RETURN may be used to cause error values to be -returned. Note that MPI does not guarantee that an MPI program can -continue past an error. - -.SH SEE ALSO -.ft R -.sp -MPI_Alloc_mem -MPI_Free_mem -MPI_Win_create -MPI_Win_allocate_shared -MPI_Win_free diff --git a/ompi/mpi/man/man3/MPI_Win_allocate_shared.3in b/ompi/mpi/man/man3/MPI_Win_allocate_shared.3in deleted file mode 100644 index 2733bd8473b..00000000000 --- a/ompi/mpi/man/man3/MPI_Win_allocate_shared.3in +++ /dev/null @@ -1,154 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2015-2016 Los Alamos National Security, LLC. All rights reserved. -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2007-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" $COPYRIGHT$ -.TH MPI_Win_allocate_shared 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Win_allocate_shared\fP \- One-sided MPI call that allocates -shared memory and returns a window object for RMA operations. - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -int MPI_Win_allocate_shared (MPI_Aint \fIsize\fP, int \fIdisp_unit\fP, MPI_Info \fIinfo\fP, - MPI_Comm \fIcomm\fP, void *\fIbaseptr\fP, MPI_Win *\fIwin\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_WIN_ALLOCATE_SHARED(\fISIZE, DISP_UNIT, INFO, COMM, BASEPTR, WIN, IERROR\fP) - INTEGER(KIND=MPI_ADDRESS_KIND) \fISIZE, BASEPTR\fP - INTEGER \fIDISP_UNIT, INFO, COMM, WIN, IERROR\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Win_allocate_shared(\fIsize\fP, \fIdisp_unit\fP, \fIinfo\fP, \fIcomm\fP, \fIbaseptr\fP, \fIwin\fP, \fIierror\fP) - USE, INTRINSIC :: ISO_C_BINDING, ONLY : C_PTR - INTEGER(KIND=MPI_ADDRESS_KIND), INTENT(IN) :: \fIsize\fP - INTEGER, INTENT(IN) :: \fIdisp_unit\fP - TYPE(MPI_Info), INTENT(IN) :: \fIinfo\fP - TYPE(MPI_Comm), INTENT(IN) :: \fIcomm\fP - TYPE(C_PTR), INTENT(OUT) :: \fIbaseptr\fP - TYPE(MPI_Win), INTENT(OUT) :: \fIwin\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETERS -.ft R -.TP 1i -size -Size of window in bytes (nonnegative integer). -.TP 1i -disp_unit -Local unit size for displacements, in bytes (positive integer). -.TP 1i -info -Info argument (handle). -.TP 1i -comm -Communicator (handle). - -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -baseptr -Initial address of window. -.TP 1i -win -Window object returned by the call (handle). -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -\fBMPI_Win_allocate_shared\fP is a collective call executed by all -processes in the group of \fIcomm\fP. On each process, it allocates -memory of at least \fIsize\fP bytes that is shared among all processes -in \fIcomm\fP, and returns a pointer to the locally allocated segment -in \fIbaseptr\fP that can be used for load/store accesses on the -calling process. The locally allocated memory can be the target of -load/store accesses by remote processes; the base pointers for other -processes can be queried using the function -\fBMPI_Win_shared_query\fP. The call also returns a window object that -can be used by all processes in \fIcomm\fP to perform RMA -operations. The \fIsize\fP argument may be different at each process -and \fIsize\fP = 0 is valid. It is the user's responsibility to ensure -that the communicator \fIcomm\fP represents a group of processes that -can create a shared memory segment that can be accessed by all -processes in the group. The discussions of rationales for -\fBMPI_Alloc_mem\fP and \fBMPI_Free_mem\fP in MPI-3.1 \[char167] 8.2 -also apply to \fBMPI_Win_allocate_shared\fP; in particular, see the -rationale in MPI-3.1 \[char167] 8.2 for an explanation of the type -used for \fIbaseptr\fP. The allocated memory is contiguous across -process ranks unless the info key \fIalloc_shared_noncontig\fP is -specified. Contiguous across process ranks means that the first -address in the memory segment of process i is consecutive with the -last address in the memory segment of process i - 1. This may enable -the user to calculate remote address offsets with local information -only. -.sp -The following info keys are supported: -.ft R -.TP 1i -alloc_shared_noncontig -If not set to \fItrue\fP, the allocation strategy is to allocate -contiguous memory across process ranks. This may limit the performance -on some architectures because it does not allow the implementation to -modify the data layout (e.g., padding to reduce access latency). -.sp -.TP 1i -blocking_fence -If set to \fItrue\fP, the osc/sm component will use \fBMPI_Barrier\fP -for \fBMPI_Win_fence\fP. If set to \fIfalse\fP a condition variable -and counter will be used instead. The default value is -\fIfalse\fP. This info key is Open MPI specific. -.sp -.TP 1i -For additional supported info keys see \fBMPI_Win_create\fP. -.sp - -.SH NOTES -Common choices for \fIdisp_unit\fP are 1 (no scaling), and (in C -syntax) \fIsizeof(type)\fP, for a window that consists of an array of -elements of type \fItype\fP. The later choice will allow one to use -array indices in RMA calls, and have those scaled correctly to byte -displacements, even in a heterogeneous environment. -.sp -Calling \fBMPI_Win_free\fP will deallocate the memory allocated by \fBMPI_Win_allocate_shared\fP. It is thus erroneous to manually free \fIbaseptr\fP. - -.SH C NOTES -.ft R -While \fIbaseptr\fP is a \fIvoid *\fP type, this is to allow easy use of any pointer object for this parameter. This argument is really a \fIvoid **\fP type. -.sp - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value -of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for -I/O function errors. The error handler may be changed with -MPI_Comm_set_errhandler; the predefined error handler -MPI_ERRORS_RETURN may be used to cause error values to be -returned. Note that MPI does not guarantee that an MPI program can -continue past an error. - -.SH SEE ALSO -.ft R -.sp -MPI_Alloc_mem -MPI_Free_mem -MPI_Win_allocate -MPI_Win_create -MPI_Win_shared_query -MPI_Win_free - diff --git a/ompi/mpi/man/man3/MPI_Win_attach.3in b/ompi/mpi/man/man3/MPI_Win_attach.3in deleted file mode 100644 index 18415fa2fe4..00000000000 --- a/ompi/mpi/man/man3/MPI_Win_attach.3in +++ /dev/null @@ -1,92 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright (c) 2015-2019 Research Organization for Information Science -.\" and Technology (RIST). All rights reserved. -.\" Copyright (c) 2019-2020 FUJITSU LIMITED. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Win_attach 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Win_attach, MPI_Win_detach\fP \- One-sided MPI call that attaches / detaches a memory region to / from a window object for RMA operations. - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -MPI_Win_attach(MPI_Win \fIwin\fP, void *\fIbase\fP, MPI_Aint \fIsize\fP) - -MPI_Win_detach(MPI_Win \fIwin\fP, void *\fIbase\fP) -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_WIN_ATTACH(\fIWIN, BASE, SIZE, IERROR\fP) - \fIBASE\fP(*) - INTEGER(KIND=MPI_ADDRESS_KIND) \fISIZE\fP - INTEGER \fIWIN, IERROR\fP - -MPI_WIN_DETACH(\fIWIN, BASE, IERROR\fP) - \fIBASE\fP(*) - INTEGER \fIWIN, IERROR\fP -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Win_attach(\fIwin\fP, \fIbase\fP, \fIsize\fP, \fIierror\fP) - TYPE(MPI_Win), INTENT(IN) :: \fIwin\fP - TYPE(*), DIMENSION(..), INTENT(IN) :: \fIbase\fP - INTEGER(KIND=MPI_ADDRESS_KIND), INTENT(IN) :: \fIsize\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -MPI_Win_detach(\fIwin\fP, \fIbase\fP, \fIierror\fP) - TYPE(MPI_Win), INTENT(IN) :: \fIwin\fP - TYPE(*), DIMENSION(..), INTENT(IN) :: \fIbase\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP -.fi -.SH INPUT PARAMETERS -.ft R -.TP 1i -win -A window that was created with -.I MPI_Win_create_dynamic - -.TP 1i -base -Initial address of window (choice). -.TP 1i -size -Size of window in bytes (nonnegative integer). - -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -win -Window object returned by the call (handle). -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -MPI_Win_attach is a one-sided MPI communication call used to attach a memory region of \fIsize\fP bytes starting at address \fIbase\fP to a window for RMA access. The window \fIwin\fP must have been created using -.IR MPI_Win_create_dynamic . -Multiple non-overlapping memory regions may be attached to the same dynamic window. Attaching overlapping memory regions to the same dynamic window is erroneous. -.sp -If the \fIbase\fP value used by MPI_Win_attach was allocated by MPI_Alloc_mem, the size of the window can be no larger than the value set by the MPI_ALLOC_MEM function. -.sp -.sp -MPI_Win_detach can be used to detach a previously attached memory region from \fIwin\fP. The memory address \fIbase\fP and \fIwin\fP must match arguments passed to a previous call to MPI_Win_attach. - -.SH NOTES -Use memory allocated by MPI_Alloc_mem to guarantee properly aligned window boundaries (such as word, double-word, cache line, page frame, and so on). -.sp - - - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for I/O function errors. The error handler may be changed with MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN may be used to cause error values to be returned. Note that MPI does not guarantee that an MPI program can continue past an error. - - diff --git a/ompi/mpi/man/man3/MPI_Win_c2f.3in b/ompi/mpi/man/man3/MPI_Win_c2f.3in deleted file mode 100644 index a13fce697dd..00000000000 --- a/ompi/mpi/man/man3/MPI_Win_c2f.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/MPI_Comm_f2c.3 diff --git a/ompi/mpi/man/man3/MPI_Win_call_errhandler.3in b/ompi/mpi/man/man3/MPI_Win_call_errhandler.3in deleted file mode 100644 index bc653932ef4..00000000000 --- a/ompi/mpi/man/man3/MPI_Win_call_errhandler.3in +++ /dev/null @@ -1,81 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Win_call_errhandler 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" - -.SH NAME -\fBMPI_Win_call_errhandler\fP \- Passes the supplied error code to the -error handler assigned to a window - -.SH SYNTAX -.ft R - -.SH C Syntax -.nf -#include -int MPI_Win_call_errhandler(MPI_Win \fIwin\fP, int \fIerrorcode\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_WIN_CALL_ERRHANDLER(\fIWIN, ERRORCODE, IERROR\fP) - INTEGER \fIWIN, ERRORCODE, IERROR\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Win_call_errhandler(\fIwin\fP, \fIerrorcode\fP, \fIierror\fP) - TYPE(MPI_Win), INTENT(IN) :: \fIwin\fP - INTEGER, INTENT(IN) :: \fIerrorcode\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETERS -.ft R -.TP 1.4i -win -Window with error handler (handle). -.ft R -.TP 1.4i -errorcode -MPI error code (integer). - -.SH OUTPUT PARAMETER -.ft R -.TP 1.4i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -This function invokes the error handler assigned to the window -\fIwin\fP with the supplied error code \fIerrorcode\fP. If the error -handler was successfully called, the process is not aborted, and the -error handler returns, this function returns MPI_SUCCESS. - -.SH NOTES -.ft R -Users should note that the default error handler is -MPI_ERRORS_ARE_FATAL. Thus, calling this function will abort the -window processes if the default error handler has not been changed for -this window. - -.SH ERRORS -.ft R -Almost all MPI routines return an error value; C routines as -the value of the function and Fortran routines in the last argument. -.sp -See the MPI man page for a full list of MPI error codes. - -.SH SEE ALSO -.ft R -.nf -MPI_Win_create_errhandler -MPI_Win_set_errhandler - diff --git a/ompi/mpi/man/man3/MPI_Win_complete.3in b/ompi/mpi/man/man3/MPI_Win_complete.3in deleted file mode 100644 index c6fbc0bc964..00000000000 --- a/ompi/mpi/man/man3/MPI_Win_complete.3in +++ /dev/null @@ -1,60 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2007-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Win_complete 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Win_complete\fP \- Completes an RMA access epoch on \fIwin\fP started by a call to MPI_Win_start - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -MPI_Win_complete(MPI_Win \fIwin\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_WIN_COMPLETE(WIN, IERROR) - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Win_complete(\fIwin\fP, \fIierror\fP) - TYPE(MPI_Win), INTENT(IN) :: \fIwin\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETERS -.ft R -.TP 1i -win -Window object (handle). - -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -MPI_Win_complete is a one-sided MPI communication synchronization call, completing an RMA access epoch on \fIwin\fP started by a call to MPI_Win_start. MPI_Win_complete enforces the completion of preceding RMA calls at the origin and not at the target. A put or accumulate call may not have completed at the target when it has completed at the origin. - - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for I/O function errors. The error handler may be changed with MPI_Win_set_errhandler; the predefined error handler MPI_ERRORS_RETURN may be used to cause error values to be returned. Note that MPI does not guarantee that an MPI program can continue past an error. - -.SH SEE ALSO -MPI_Win_start -.br - diff --git a/ompi/mpi/man/man3/MPI_Win_create.3in b/ompi/mpi/man/man3/MPI_Win_create.3in deleted file mode 100644 index ab0ff077ed6..00000000000 --- a/ompi/mpi/man/man3/MPI_Win_create.3in +++ /dev/null @@ -1,153 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2015 Los Alamos National Security, LLC. All rights -.\" reserved. -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2007-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Win_create 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Win_create\fP \- One-sided MPI call that returns a window object for RMA operations. - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -MPI_Win_create(void *\fIbase\fP, MPI_Aint \fIsize\fP, int \fIdisp_unit\fP, - MPI_Info \fIinfo\fP, MPI_Comm \fIcomm\fP, MPI_Win *\fIwin\fP) - -.fi -.SH Fortran Syntax (see FORTRAN 77 NOTES) -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_WIN_CREATE(\fIBASE, SIZE, DISP_UNIT, INFO, COMM, WIN, IERROR\fP) - \fIBASE\fP(*) - INTEGER(KIND=MPI_ADDRESS_KIND) \fISIZE\fP - INTEGER \fIDISP_UNIT, INFO, COMM, WIN, IERROR\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Win_create(\fIbase\fP, \fIsize\fP, \fIdisp_unit\fP, \fIinfo\fP, \fIcomm\fP, \fIwin\fP, \fIierror\fP) - TYPE(*), DIMENSION(..), ASYNCHRONOUS :: \fIbase\fP - INTEGER(KIND=MPI_ADDRESS_KIND), INTENT(IN) :: \fIsize\fP - INTEGER, INTENT(IN) :: \fIdisp_unit\fP - TYPE(MPI_Info), INTENT(IN) :: \fIinfo\fP - TYPE(MPI_Comm), INTENT(IN) :: \fIcomm\fP - TYPE(MPI_Win), INTENT(OUT) :: \fIwin\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETERS -.ft R -.TP 1i -base -Initial address of window (choice). -.TP 1i -size -Size of window in bytes (nonnegative integer). -.TP 1i -disp_unit -Local unit size for displacements, in bytes (positive integer). -.TP 1i -info -Info argument (handle). -.TP 1i -comm -Communicator (handle). - -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -win -Window object returned by the call (handle). -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -MPI_Win_create is a one-sided MPI communication collective call executed by all processes in the group of \fIcomm\fP. It returns a window object that can be used by these processes to perform RMA operations. Each process specifies a window of existing memory that it exposes to RMA accesses by the processes in the group of \fIcomm\fP. The window consists of \fIsize\fP bytes, starting at address \fIbase\fP. A process may elect to expose no memory by specifying \fIsize\fP = 0. -.sp -If the \fIbase\fP value used by MPI_Win_create was allocated by MPI_Alloc_mem, the size of the window can be no larger than the value set by the MPI_ALLOC_MEM function. -.sp -The displacement unit argument is provided to facilitate address arithmetic in RMA operations: the target displacement argument of an RMA operation is scaled by the factor \fIdisp_unit\fP specified by the target process, at window creation. -.sp -The following info keys are supported: -.ft R -.TP 1i -no_locks -If set to \fItrue\fP, then the implementation may assume that the local -window is never locked (by a call to MPI_Win_lock or -MPI_Win_lock_all). Setting this value if only active synchronization -may allow the implementation to enable certain optimizations. -.sp -.TP 1i -accumulate_ordering -By default, accumulate operations from one initiator to one target on -the same window memory location are strictly ordered. If the info key -accumulate_ordering is set to \fInone\fP, no ordering of accumulate -operations guaranteed. They key can also be a comma-separated list of -required orderings consisting of \fIrar\fP, \fIwar\fP, \fIraw\fP, and \fIwaw\fP for -read-after-read, write-after-read, read-after-write, and -write-after-write, respectively. Looser ordering constraints are -likely to result in improved performance. -.sp -.TP 1i -accumulate_ops -If set to \fIsame_op\fP, the implementation will assume that all concurrent -accumulate calls to the same target address will use the same -operation. If set to \fIsame_op_no_op\fP, then the implementation will -assume that all concurrent accumulate calls to the same target address -will use the same operation or MPI_NO_OP. The default is \fIsame_op_no_op\fP. -.sp -.TP 1i -same_size -If set to \fItrue\fP, then the implementation may assume that the argument -\fIsize\fP is identical on all processes, and that all processes have -provided this info key with the same value. -.sp -.TP 1i -same_disp_unit -If set to \fItrue\fP, then the implementation may assume that the argument -\fIdisp_unit\fP is identical on all processes, and that all processes have -provided this info key with the same value. -.sp -.SH NOTES -Common choices for \fIdisp_unit\fP are 1 (no scaling), and (in C syntax) \fIsizeof(type)\fP, for a window that consists of an array of elements of type \fItype\fP. The later choice will allow one to use array indices in RMA calls, and have those scaled correctly to byte displacements, even in a heterogeneous environment. -.sp -Use memory allocated by MPI_Alloc_mem to guarantee properly aligned window boundaries (such as word, double-word, cache line, page frame, and so on). -.sp - - - -.SH FORTRAN 77 NOTES -.ft R -The MPI standard prescribes portable Fortran syntax for -the \fISIZE\fP argument only for Fortran 90. FORTRAN 77 -users may use the non-portable syntax -.sp -.nf - INTEGER*MPI_ADDRESS_KIND \fISIZE\fP -.fi -.sp -where MPI_ADDRESS_KIND is a constant defined in mpif.h -and gives the length of the declared integer in bytes. - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for I/O function errors. The error handler may be changed with MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN may be used to cause error values to be returned. Note that MPI does not guarantee that an MPI program can continue past an error. - -.SH SEE ALSO -.ft R -.sp -MPI_Alloc_mem -MPI_Free_mem -MPI_Win_allocate -MPI_Win_allocate_shared diff --git a/ompi/mpi/man/man3/MPI_Win_create_dynamic.3in b/ompi/mpi/man/man3/MPI_Win_create_dynamic.3in deleted file mode 100644 index 3e32bf5e7a0..00000000000 --- a/ompi/mpi/man/man3/MPI_Win_create_dynamic.3in +++ /dev/null @@ -1,108 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright (c) 2015 Research Organization for Information Science -.\" and Technology (RIST). All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Win_create_dynamic 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Win_create_dynamic\fP \- One-sided MPI call that returns a window object for RMA operations. - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -MPI_Win_create_dynamic(MPI_Info \fIinfo\fP, MPI_Comm \fIcomm\fP, MPI_Win *\fIwin\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_WIN_CREATE_DYNAMIC(\fIINFO, COMM, WIN, IERROR\fP) - INTEGER \fIINFO, COMM, WIN, IERROR\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Win_create_dynamic(\fIinfo\fP, \fIcomm\fP, \fIwin\fP, \fIierror\fP) - TYPE(MPI_Info), INTENT(IN) :: \fIinfo\fP - TYPE(MPI_Comm), INTENT(IN) :: \fIcomm\fP - TYPE(MPI_Win), INTENT(OUT) :: \fIwin\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETERS -.ft R -.TP 1i -info -Info argument (handle). -.TP 1i -comm -Communicator (handle). - -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -win -Window object returned by the call (handle). -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -MPI_Win_create_dynamic is a one-sided MPI communication collective call executed by all processes in the group of \fIcomm\fP. It returns a window object without memory attached that can be used by these processes to perform RMA operations. -.sp -A window created with \fBMPI_Win_create_dynamic\fP requires the \fItarget_disp\fP argument for all RMA communication functions to be the actual address at the target. - -.sp -The following info keys are supported: -.ft R -.TP 1i -no_locks -If set to \fItrue\fP, then the implementation may assume that the local -window is never locked (by a call to MPI_Win_lock or -MPI_Win_lock_all). Setting this value if only active synchronization -may allow the implementation to enable certain optimizations. -.sp -.TP 1i -accumulate_ordering -By default, accumulate operations from one initiator to one target on -the same window memory location are strictly ordered. If the info key -accumulate_ordering is set to \fInone\fP, no ordering of accumulate -operations guaranteed. They key can also be a comma-separated list of -required orderings consisting of \fIrar\fP, \fIwar\fP, \fIraw\fP, and \fIwaw\fP for -read-after-read, write-after-read, read-after-write, and -write-after-write, respectively. Looser ordering constraints are -likely to result in improved performance. -.sp -.TP 1i -accumulate_ops -If set to \fIsame_op\fP, the implementation will assume that all concurrent -accumulate calls to the same target address will use the same -operation. If set to \fIsame_op_no_op\fP, then the implementation will -assume that all concurrent accumulate calls to the same target address -will use the same operation or \fBMPI_NO_OP\fP. The default is \fIsame_op_no_op\fP. -.sp - -.SH NOTES -Since dynamically attaching memory to a window is a local operation, one has to communicate the actual address at the target using \fBMPI_Get_address\fP and some communication. -.sp -Dynamic memory does not have any \fIdisp_unit\fP associated and requires correct offset calculations with proper type handling. -.sp - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for I/O function errors. The error handler may be changed with MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN may be used to cause error values to be returned. Note that MPI does not guarantee that an MPI program can continue past an error. - - -.SH SEE ALSO -MPI_Win_attach -MPI_Win_detach -MPI_Get_address -.br - - diff --git a/ompi/mpi/man/man3/MPI_Win_create_errhandler.3in b/ompi/mpi/man/man3/MPI_Win_create_errhandler.3in deleted file mode 100644 index f3468014b48..00000000000 --- a/ompi/mpi/man/man3/MPI_Win_create_errhandler.3in +++ /dev/null @@ -1,83 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2009-2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Win_create_errhandler 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Win_create_errhandler\fP \- Creates an error handler for a window. - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -int MPI_Win_create_errhandler(MPI_Win_errhandler_function *\fIfunction\fP, - MPI_Errhandler *\fIerrhandler\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_WIN_CREATE_ERRHANDLER(\fIFUNCTION, ERRHANDLER, IERROR\fP) - EXTERNAL \fIFUNCTION\fP - INTEGER \fIERRHANDLER, IERROR\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Win_create_errhandler(\fIwin_errhandler_fn\fP, \fIerrhandler\fP, \fIierror\fP) - PROCEDURE(MPI_Win_errhandler_function) :: \fIwin_errhandler_fn\fP - TYPE(MPI_Errhandler), INTENT(OUT) :: \fIerrhandler\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH DEPRECATED TYPE NAME NOTE -.ft R -MPI-2.2 deprecated the MPI_Win_errhandler_fn and -MPI::Win::Errhandler_fn types in favor of -MPI_Win_errhandler_function and MPI::Win::Errhandler_function, -respectively. Open MPI supports both names (indeed, the _fn names are -typedefs to the _function names). - -.SH INPUT PARAMETER -.ft R -.TP 1i -function -User-defined error-handling procedure (function). - -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -errhandler -MPI error handler (handle). -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -MPI_Win_create_errhandler should be, in C, a function of type MPI_Win_errhandler_function, which is defined as -.sp -.nf -typedef void MPI_Win_errhandler_function(MPI Win *, int *, ...); -.fi -.sp -The first argument is the window in use, the second is the error code to be returned. -.sp -In Fortran, the user routine should be of the form: -.sp -.nf -SUBROUTINE WIN_ERRHANDLER_FUNCTION(WIN, ERROR_CODE, ...) - INTEGER WIN, ERROR_CODE -.fi - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for I/O function errors. The error handler may be changed with MPI_Win_set_errhandler; the predefined error handler MPI_ERRORS_RETURN may be used to cause error values to be returned. Note that MPI does not guarantee that an MPI program can continue past an error. - diff --git a/ompi/mpi/man/man3/MPI_Win_create_keyval.3in b/ompi/mpi/man/man3/MPI_Win_create_keyval.3in deleted file mode 100644 index 7decaa40b7b..00000000000 --- a/ompi/mpi/man/man3/MPI_Win_create_keyval.3in +++ /dev/null @@ -1,124 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Win_create_keyval 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Win_create_keyval\fP \- Creates a keyval for a window. - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -int MPI_Win_create_keyval(MPI_Win_copy_attr_function *\fIwin_copy_attr_fn\fP, - MPI_Win_delete_attr_function *\fIwin_delete_attr_fn\fP, - int *\fIwin_keyval\fP, void *\fIextra_state\fP) - -.fi -.SH Fortran Syntax (see FORTRAN 77 NOTES) -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_WIN_CREATE_KEYVAL(\fIWIN_COPY_ATTR_FN, WIN_DELETE_ATTR_FN, - WIN_KEYVAL, EXTRA_STATE, IERROR\fP) - EXTERNAL \fIWIN_COPY_ATTR_FN, WIN_DELETE_ATTR_FN\fP - INTEGER \fIWIN_KEYVAL, IERROR\fP - INTEGER(KIND=MPI_ADDRESS_KIND) \fIEXTRA_STATE\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Win_create_keyval(\fIwin_copy_attr_fn\fP, \fIwin_delete_attr_fn\fP, \fIwin_keyval\fP, - \fIextra_state\fP, \fIierror\fP) - PROCEDURE(MPI_Win_copy_attr_function) :: \fIwin_copy_attr_fn\fP - PROCEDURE(MPI_Win_delete_attr_function) :: \fIwin_delete_attr_fn\fP - INTEGER, INTENT(OUT) :: \fIwin_keyval\fP - INTEGER(KIND=MPI_ADDRESS_KIND), INTENT(IN) :: \fIextra_state\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETERS -.ft R -.TP 1i -win_copy_attr_fn -Copy callback function for \fIwin_keyval\fP (function). -.TP 1i -win_delete_attr_fn -Delete callback function for \fIwin_keyval\fP (function). -.TP 1i -extra_state -Extra state for callback functions. - -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -win_keyval -Key value for future access (integer). -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -The argument \fIwin_copy_attr_fn\fP may be specified as MPI_WIN_NULL_COPY_FN or MPI_WIN_DUP_FN from either C or Fortran. MPI_WIN_NULL_COPY_FN is a function that serves only to return \fIflag\fP = 0 and MPI_SUCCESS. MPI_WIN_DUP_FN is a simple-minded copy function that sets \fIflag\fP = 1, returns the value of \fIattribute_val_in\fP in \fIattribute_val_out\fP, and returns MPI_SUCCESS. -.sp -The argument \fIwin_delete_attr_fn\fP may be specified as MPI_WIN_NULL_DELETE_FN from either C or Fortran. MPI_WIN_NULL_DELETE_FN is a function that serves only to return MPI_SUCCESS. -.sp -The C callback functions are: -.sp -.nf -typedef int MPI_Win_copy_attr_function(MPI_Win \fIoldwin\fP, int \fIwin_keyval\fP, - void *\fIextra_state\fP, void *\fIattribute_val_in\fP, - void *\fIattribute_val_out\fP, int *\fIflag\fP); -.fi -.sp -and -.sp -.nf -typedef int MPI_Win_delete_attr_function(MPI_Win \fIwin\fP, int \fIwin_keyval\fP, - void *\fIattribute_val\fP, void *\fIextra_state\fP); -.fi -.sp -The Fortran callback functions are: -.sp -.nf -SUBROUTINE WIN_COPY_ATTR_FN(\fIOLDWIN, WIN_KEYVAL, EXTRA_STATE, - ATTRIBUTE_VAL_IN, ATTRIBUTE_VAL_OUT, FLAG, IERROR\fP) - INTEGER \fIOLDWIN, WIN_KEYVAL, IERROR\fP - INTEGER(KIND=MPI_ADDRESS_KIND) \fIEXTRA_STATE, ATTRIBUTE_VAL_IN, - ATTRIBUTE_VAL_OUT\fP - LOGICAL \fIFLAG\fP -.fi -.sp -and -.sp -.nf -SUBROUTINE WIN_DELETE_ATTR_FN(\fIWIN, WIN_KEYVAL, ATTRIBUTE_VAL, - EXTRA_STATE, IERROR\fP) - INTEGER \fIWIN, WIN_KEYVAL, IERROR\fP - INTEGER(KIND=MPI_ADDRESS_KIND) \fIATTRIBUTE_VAL, EXTRA_STATE\fP -.fi - -.SH FORTRAN 77 NOTES -.ft R -The MPI standard prescribes portable Fortran syntax for -the \fIEXTRA_STATE\fP argument only for Fortran 90. FORTRAN 77 -users may use the non-portable syntax -.sp -.nf - INTEGER*MPI_ADDRESS_KIND \fIEXTRA_STATE\fP -.fi -.sp -where MPI_ADDRESS_KIND is a constant defined in mpif.h -and gives the length of the declared integer in bytes. - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for I/O function errors. The error handler may be changed with MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN may be used to cause error values to be returned. Note that MPI does not guarantee that an MPI program can continue past an error. - diff --git a/ompi/mpi/man/man3/MPI_Win_delete_attr.3in b/ompi/mpi/man/man3/MPI_Win_delete_attr.3in deleted file mode 100644 index 8ff76cec275..00000000000 --- a/ompi/mpi/man/man3/MPI_Win_delete_attr.3in +++ /dev/null @@ -1,66 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright (c) 2010-2014 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Win_delete_attr 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Win_delete_attr\fP \- Deletes an attribute from a window. - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -int MPI_Win_delete_attr(MPI_Win \fIwin\fP, int \fIwin_keyval\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_WIN_DELETE_ATTR(\fIWIN, WIN_KEYVAL, IERROR\fP) - INTEGER \fIWIN, WIN_KEYVAL, IERROR\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Win_delete_attr(\fIwin\fP, \fIwin_keyval\fP, \fIierror\fP) - TYPE(MPI_Win), INTENT(IN) :: \fIwin\fP - INTEGER, INTENT(IN) :: \fIwin_keyval\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT/OUTPUT PARAMETER -.ft R -.TP 1i -win -Window from which the attribute is deleted (handle). - -.SH INPUT PARAMETER -.ft R -.TP 1i -win_keyval -Key value (integer). - -.SH OUTPUT PARAMETER -.ft R -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH NOTES -Note that it is not defined by the MPI standard what happens if the -delete_fn callback invokes other MPI functions. In Open MPI, it is -not valid for delete_fn callbacks (or any of their children) to add or -delete attributes on the same object on which the delete_fn callback -is being invoked. - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for I/O function errors. The error handler may be changed with MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN may be used to cause error values to be returned. Note that MPI does not guarantee that an MPI program can continue past an error. - diff --git a/ompi/mpi/man/man3/MPI_Win_detach.3in b/ompi/mpi/man/man3/MPI_Win_detach.3in deleted file mode 100644 index 42a7c2b2dfb..00000000000 --- a/ompi/mpi/man/man3/MPI_Win_detach.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/MPI_Win_attach.3 diff --git a/ompi/mpi/man/man3/MPI_Win_f2c.3in b/ompi/mpi/man/man3/MPI_Win_f2c.3in deleted file mode 100644 index a13fce697dd..00000000000 --- a/ompi/mpi/man/man3/MPI_Win_f2c.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/MPI_Comm_f2c.3 diff --git a/ompi/mpi/man/man3/MPI_Win_fence.3in b/ompi/mpi/man/man3/MPI_Win_fence.3in deleted file mode 100644 index aa8252f5c5f..00000000000 --- a/ompi/mpi/man/man3/MPI_Win_fence.3in +++ /dev/null @@ -1,93 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2007-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Win_fence 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Win_fence\fP \- Synchronizes RMA calls on a window. - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -int MPI_Win_fence(int \fIassert\fP, MPI_Win \fIwin\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_WIN_FENCE(\fIASSERT, WIN, IERROR\fP) - INTEGER \fIASSERT, WIN, IERROR\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Win_fence(\fIassert\fP, \fIwin\fP, \fIierror\fP) - INTEGER, INTENT(IN) :: \fIassert\fP - TYPE(MPI_Win), INTENT(IN) :: \fIwin\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETERS -.ft R -.TP 1i -assert -Program assertion (integer). -.TP 1i -win -Window object (handle). - -.SH OUTPUT PARAMETER -.ft R -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -MPI_Win_fence synchronizes RMA calls on \fIwin\fP. The call is collective on the group of \fIwin\fP. All RMA operations on \fIwin\fP originating at a given process and started before the fence call will complete at that process before the fence call returns. They will be completed at their target before the fence call returns at the target. RMA operations on \fIwin\fP started by a process after the fence call returns will access their target window only after MPI_Win_fence has been called by the target process. -.sp -The call completes an RMA access epoch if it was preceded by another fence call and the local process issued RMA communication calls on \fIwin\fP between these two calls. The call completes an RMA exposure epoch if it was preceded by another fence call and the local window was the target of RMA accesses between these two calls. The call starts an RMA access epoch if it is followed by another fence call and by RMA communication calls issued between these two fence calls. The call starts an exposure epoch if it is followed by another fence call and the local window is the target of RMA accesses between these two fence calls. Thus, the fence call is equivalent to calls to a subset of \fIpost, start, complete, wait\fP. -.sp -The \fIassert\fP argument is used to provide assertions on the context of the call that may be used for various optimizations. A value of \fIassert\fP = 0 is always valid. The following assertion value is supported: -.ft R -.TP 1i -MPI_MODE_NOPRECEDE -No local RMA calls have been issued before this fence. This assertion must be provided by all or no members of the group of the window. It may enable faster fence call by avoiding unnecessary synchronization. -.sp -.TP 1i -MPI_MODE_NOSTORE -Informs that the local window was not updated by local stores or get calls in the preceding epoch. -.TP 1i -MPI_MODE_NOPUT -Informs that the local window will not be updated by any put or accummulate calls in the ensuing epoch (until next fence call). -.TP 1i -MPI_MODE_NOSUCCEED -No local RMA calls will be issued after this fence. This assertion must be provided by all or no members of the group of the window. It may enable faster fence call by avoiding unnecessary synchronization. -.sp - - -.SH NOTE -Calls to MPI_Win_fence should both precede and follow calls to put, get or accumulate that are synchronized with fence calls. -.sp - - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for I/O function errors. The error handler may be changed with MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN may be used to cause error values to be returned. Note that MPI does not guarantee that an MPI program can continue past an error. - -.SH SEE ALSO -MPI_Win_create -MPI_Win_start -MPI_Win_post -MPI_Win_complete -MPI_Win_wait -.br - diff --git a/ompi/mpi/man/man3/MPI_Win_flush.3in b/ompi/mpi/man/man3/MPI_Win_flush.3in deleted file mode 100644 index 1b41798b0ba..00000000000 --- a/ompi/mpi/man/man3/MPI_Win_flush.3in +++ /dev/null @@ -1,76 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2014 Los Alamos National Security, LLC. All rights reserved. -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2007-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" $COPYRIGHT$ -.TH MPI_Win_flush 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Win_flush\fP, \fBMPI_Win_flush_all\fP \- Complete all outstanding RMA operations at both the origin and the target - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -int MPI_Win_flush (int \fIrank\fP, MPI_Win \fIwin\fP) - -int MPI_Win_flush_all (MPI_Win \fIwin\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_WIN_FLUSH(\fIRANK, WIN, IERROR\fP) - INTEGER \fIRANK, WIN, IERROR\fP - -MPI_WIN_FLUSH_ALL(\fIWIN, IERROR\fP) - INTEGER \fIWIN, IERROR\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Win_flush(\fIrank\fP, \fIwin\fP, \fIierror\fP) - INTEGER, INTENT(IN) :: \fIrank\fP - TYPE(MPI_Win), INTENT(IN) :: \fIwin\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -MPI_Win_flush_all(\fIwin\fP, \fIierror\fP) - TYPE(MPI_Win), INTENT(IN) :: \fIwin\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETERS -.ft R -.TP 1i -rank -Rank of window (nonnegative integer). -.TP 1i -win -Window object (handle). - -.SH OUTPUT PARAMETER -.ft R -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -\fBMPI_Win_flush\fP completes all outstanding RMA operations initiated by the calling process to the target rank on the specified window. The operations are completed both at the origin and at the target. \fBMPI_Win_flush_all\fP completes all outstanding RMA operations to all targets. -.sp -Can only be called from within a passive target epoch. - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for I/O function errors. The error handler may be changed with \fBMPI_Comm_set_errhandler\fP; the predefined error handler MPI_ERRORS_RETURN may be used to cause error values to be returned. Note that MPI does not guarantee that an MPI program can continue past an error. - -.SH SEE ALSO -MPI_Win_flush_local -MPI_Win_lock -MPI_Win_lock_all -.br diff --git a/ompi/mpi/man/man3/MPI_Win_flush_all.3in b/ompi/mpi/man/man3/MPI_Win_flush_all.3in deleted file mode 100644 index b30e345a522..00000000000 --- a/ompi/mpi/man/man3/MPI_Win_flush_all.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/MPI_Win_flush.3 diff --git a/ompi/mpi/man/man3/MPI_Win_flush_local.3in b/ompi/mpi/man/man3/MPI_Win_flush_local.3in deleted file mode 100644 index 440fbfe41f8..00000000000 --- a/ompi/mpi/man/man3/MPI_Win_flush_local.3in +++ /dev/null @@ -1,76 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2014 Los Alamos National Security, LLC. All rights reserved. -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2007-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" $COPYRIGHT$ -.TH MPI_Win_flush_local 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Win_flush_local\fP, \fBMPI_Win_flush_local_all\fP \- Complete all outstanding RMA operations at both the origin - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -int MPI_Win_flush_local (int \fIrank\fP, MPI_Win \fIwin\fP) - -int MPI_Win_flush_local_all (MPI_Win \fIwin\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_WIN_FLUSH_LOCAL(\fIRANK, WIN, IERROR\fP) - INTEGER \fIRANK, WIN, IERROR\fP - -MPI_WIN_FLUSH_LOCAL_ALL(\fIWIN, IERROR\fP) - INTEGER \fIWIN, IERROR\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Win_flush_local(\fIrank\fP, \fIwin\fP, \fIierror\fP) - INTEGER, INTENT(IN) :: \fIrank\fP - TYPE(MPI_Win), INTENT(IN) :: \fIwin\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -MPI_Win_flush_local_all(\fIwin\fP, \fIierror\fP) - TYPE(MPI_Win), INTENT(IN) :: \fIwin\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETERS -.ft R -.TP 1i -rank -Rank of window (nonnegative integer). -.TP 1i -win -Window object (handle). - -.SH OUTPUT PARAMETER -.ft R -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -\fBMPI_Win_flush_local\fP locally completes at the origin all outstanding RMA operations initiated by the calling process to the target process specified by rank on the specified window. For example, after this routine completes, the user may reuse any buffers provided to put, get, or accumulate operations. \fBMPI_Win_flush_local_all\fP locally completes at the origin all outstanding RMA operations to all targets. -.sp -Can only be called from within a passive target epoch. - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for I/O function errors. The error handler may be changed with \fBMPI_Comm_set_errhandler\fP; the predefined error handler MPI_ERRORS_RETURN may be used to cause error values to be returned. Note that MPI does not guarantee that an MPI program can continue past an error. - -.SH SEE ALSO -MPI_Win_flush -MPI_Win_lock -MPI_Win_lock_all -.br diff --git a/ompi/mpi/man/man3/MPI_Win_flush_local_all.3in b/ompi/mpi/man/man3/MPI_Win_flush_local_all.3in deleted file mode 100644 index 6b740a2b3a0..00000000000 --- a/ompi/mpi/man/man3/MPI_Win_flush_local_all.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/MPI_Win_flush_local.3 diff --git a/ompi/mpi/man/man3/MPI_Win_free.3in b/ompi/mpi/man/man3/MPI_Win_free.3in deleted file mode 100644 index 922a21f5da4..00000000000 --- a/ompi/mpi/man/man3/MPI_Win_free.3in +++ /dev/null @@ -1,66 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2007-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Win_free 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Win_free\fP \- Frees the window object and returns a null handle. - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -int MPI_Win_free(MPI_Win *\fIwin\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_WIN_FREE(\fIWIN, IERROR\fP) - INTEGER \fIWIN, IERROR\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Win_free(\fIwin\fP, \fIierror\fP) - TYPE(MPI_Win), INTENT(INOUT) :: \fIwin\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT/OUTPUT PARAMETER -.ft R -.TP 1i -win -Window object (handle). - -.SH OUTPUT PARAMETER -.ft R -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -MPI_Win_free frees the window object \fIwin\fP and returns a null handle (equal to MPI_WIN_NULL). This collective call is executed by all processes in the group associated with \fIwin\fP. It can be invoked by a process only after it has completed its involvement in RMA communications on window \fIwin\fP, that is, the process has called MPI_Win_fence, or called MPI_Win_unlock to match a previous call to MPI_Win_lock. When the call returns, the window memory can be freed. - -.SH NOTES -.ft R -If the window was created through \fBMPI_Win_allocate\fP or \fBMPI_Win_allocate_shared\fP then the memory buffer allocated in that call will be freed when calling \fBMPI_Win_free\fP. - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for I/O function errors. The error handler may be changed with MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN may be used to cause error values to be returned. Note that MPI does not guarantee that an MPI program can continue past an error. - -.SH SEE ALSO -MPI_Win_create -MPI_Win_allocate -MPI_Win_allocate_shared -.br - diff --git a/ompi/mpi/man/man3/MPI_Win_free_keyval.3in b/ompi/mpi/man/man3/MPI_Win_free_keyval.3in deleted file mode 100644 index 3dbfe5eee19..00000000000 --- a/ompi/mpi/man/man3/MPI_Win_free_keyval.3in +++ /dev/null @@ -1,52 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Win_free_keyval 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Win_free_keyval\fP \- Frees a window keyval. - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -int MPI_Win_free_keyval(int *\fIwin_keyval\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_WIN_FREE_KEYVAL(\fIWIN_KEYVAL, IERROR\fP) - INTEGER \fIWIN_KEYVAL, IERROR\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Win_free_keyval(\fIwin_keyval\fP, \fIierror\fP) - INTEGER, INTENT(INOUT) :: \fIwin_keyval\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT/OUTPUT PARAMETER -.ft R -.TP 1i -win_keyval -Key value (integer). - -.SH OUTPUT PARAMETER -.ft R -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for I/O function errors. The error handler may be changed with MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN may be used to cause error values to be returned. Note that MPI does not guarantee that an MPI program can continue past an error. - diff --git a/ompi/mpi/man/man3/MPI_Win_get_attr.3in b/ompi/mpi/man/man3/MPI_Win_get_attr.3in deleted file mode 100644 index f33798dad8b..00000000000 --- a/ompi/mpi/man/man3/MPI_Win_get_attr.3in +++ /dev/null @@ -1,83 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Win_get_attr 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Win_get_attr\fP \- Obtains the value of a window attribute. - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -int MPI_Win_get_attr(MPI_Win \fIwin\fP, int \fIwin_keyval\fP, - void *\fIattribute_val\fP, int *\fIflag\fP) - -.fi -.SH Fortran Syntax (see FORTRAN 77 NOTES) -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_WIN_GET_ATTR(\fIWIN, WIN_KEYVAL, ATTRIBUTE_VAL, FLAG, IERROR\fP) - INTEGER \fIWIN, WIN_KEYVAL, IERROR\fP - INTEGER(KIND=MPI_ADDRESS_KIND) \fIATTRIBUTE_VAL\fP - LOGICAL \fIFLAG\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Win_get_attr(\fIwin\fP, \fIwin_keyval\fP, \fIattribute_val\fP, \fIflag\fP, \fIierror\fP) - TYPE(MPI_Win), INTENT(IN) :: \fIwin\fP - INTEGER, INTENT(IN) :: \fIwin_keyval\fP - INTEGER(KIND=MPI_ADDRESS_KIND), INTENT(OUT) :: \fIattribute_val\fP - LOGICAL, INTENT(OUT) :: \fIflag\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETERS -.ft R -.TP 1i -win -Window to which the attribute is attached (handle). -.TP 1i -win_keyval -Key value (integer). - -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -attribute_val -Attribute value, unless \fIag\fP = false -.TP 1i -flag -False if no attribute is associated with the key (logical). -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -Obtains the value of a window attribute. -.SH FORTRAN 77 NOTES -.ft R -The MPI standard prescribes portable Fortran syntax for -the \fIATTRIBUTE_VAL\fP argument only for Fortran 90. FORTRAN 77 -users may use the non-portable syntax -.sp -.nf - INTEGER*MPI_ADDRESS_KIND \fIATTRIBUTE_VAL\fP -.fi -.sp -where MPI_ADDRESS_KIND is a constant defined in mpif.h -and gives the length of the declared integer in bytes. - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for I/O function errors. The error handler may be changed with MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN may be used to cause error values to be returned. Note that MPI does not guarantee that an MPI program can continue past an error. - diff --git a/ompi/mpi/man/man3/MPI_Win_get_errhandler.3in b/ompi/mpi/man/man3/MPI_Win_get_errhandler.3in deleted file mode 100644 index ff9984a37f2..00000000000 --- a/ompi/mpi/man/man3/MPI_Win_get_errhandler.3in +++ /dev/null @@ -1,61 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Win_get_errhandler 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Win_get_errhandler\fP \- Retrieves the error handler currently associated with a window. - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -int MPI_Win_get_errhandler(MPI_Win \fIwin\fP, MPI_Errhandler *\fIerrhandler\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_WIN_GET_ERRHANDLER(\fIWIN, ERRHANDLER, IERROR\fP) - INTEGER \fIWIN, ERRHANDLER, IERROR\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Win_get_errhandler(\fIwin\fP, \fIerrhandler\fP, \fIierror\fP) - TYPE(MPI_Win), INTENT(IN) :: \fIwin\fP - TYPE(MPI_Errhandler), INTENT(OUT) :: \fIerrhandler\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETER -.ft R -.TP 1i -win -Window (handle). - -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -errhandler -Error handler currently associated with window (handle). -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -MPI_Win_get_errhandler retrieves the error handler currently associated with a window. - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for I/O function errors. The error handler may be changed with MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN may be used to cause error values to be returned. Note that MPI does not guarantee that an MPI program can continue past an error. - - diff --git a/ompi/mpi/man/man3/MPI_Win_get_group.3in b/ompi/mpi/man/man3/MPI_Win_get_group.3in deleted file mode 100644 index 8917ecf4ef7..00000000000 --- a/ompi/mpi/man/man3/MPI_Win_get_group.3in +++ /dev/null @@ -1,60 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Win_get_group 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Win_get_group\fP \- Returns a duplicate of the group of the communicator used to create the window. - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -MPI_Win_get_group(MPI_Win \fIwin\fP, MPI_Group *\fIgroup\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_WIN_GET_GROUP(\fIWIN, GROUP, IERROR\fP) - INTEGER \fIWIN, GROUP, IERROR\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Win_get_group(\fIwin\fP, \fIgroup\fP, \fIierror\fP) - TYPE(MPI_Win), INTENT(IN) :: \fIwin\fP - TYPE(MPI_Group), INTENT(OUT) :: \fIgroup\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETER -.ft R -.TP 1i -win -Window object (handle). - -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -group -Group of processes that share access to the window (handle). -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -MPI_Win_get_group returns a duplicate of the group of the communicator used to create the window associated with \fIwin\fP. The group is returned in \fIgroup\fP. - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for I/O function errors. The error handler may be changed with MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN may be used to cause error values to be returned. Note that MPI does not guarantee that an MPI program can continue past an error. - diff --git a/ompi/mpi/man/man3/MPI_Win_get_info.3in b/ompi/mpi/man/man3/MPI_Win_get_info.3in deleted file mode 100644 index 9018d782942..00000000000 --- a/ompi/mpi/man/man3/MPI_Win_get_info.3in +++ /dev/null @@ -1,76 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright (c) 2015 Research Organization for Information Science -.\" and Technology (RIST). All rights reserved. -.\" Copyright (c) 2020 FUJITSU LIMITED. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Win_get_info 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Win_get_info\fP \- Retrieves active window info hints -. -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -int MPI_Win_get_info(MPI_Win \fIwin\fP, MPI_Info \fI*info_used\fP) -. -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_WIN_GET_INFO(\fIWIN, INFO_USED, IERROR\fP) - INTEGER \fIWIN, INFO_USED, IERROR \fP -. -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Win_get_info(\fIwin\fP, \fIinfo_used\fP, \fIierror\fP) - TYPE(MPI_Win), INTENT(IN) :: \fIwin\fP - TYPE(MPI_Info), INTENT(OUT) :: \fIinfo_used\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETERS -.ft R -.TP 1i -win -Window from which to receive active info hints -. -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -info_used -New info object returned with all active hints on this window. -.TP 1i -IERROR -Fortran only: Error status (integer). -. -.SH DESCRIPTION -.ft R -MPI_Win_get_info returns a new info object containing the hints of -the window associated with -.IR win . -The current setting of all hints actually used by the system related -to this window is returned in -.IR info_used . -If no such hints exist, a handle to a newly created info object is -returned that contains no key/value pair. The user is responsible for -freeing info_used via MPI_Info_free. -. -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value -of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for -I/O function errors. The error handler may be changed with -MPI_Comm_set_errhandler; the predefined error handler -MPI_ERRORS_RETURN may be used to cause error values to be -returned. Note that MPI does not guarantee that an MPI program can -continue past an error. -. -.SH SEE ALSO -MPI_Win_set_info, -MPI_Win_free diff --git a/ompi/mpi/man/man3/MPI_Win_get_name.3in b/ompi/mpi/man/man3/MPI_Win_get_name.3in deleted file mode 100644 index 6621cb30df5..00000000000 --- a/ompi/mpi/man/man3/MPI_Win_get_name.3in +++ /dev/null @@ -1,65 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Win_get_name 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Win_get_name\fP \- Obtains the name of a window. - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -int MPI_Win_get_name(MPI_Win \fIwin\fP, char *\fIwin_name\fP, int *\fIresultlen\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_WIN_GET_NAME(\fIWIN, WIN_NAME, RESULTLEN, IERROR\fP) - INTEGER \fIWIN, RESULTLEN, IERROR\fP - CHARACTER*(*) \fIWIN_NAME\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Win_get_name(\fIwin\fP, \fIwin_name\fP, \fIresultlen\fP, \fIierror\fP) - TYPE(MPI_Win), INTENT(IN) :: \fIwin\fP - CHARACTER(LEN=MPI_MAX_OBJECT_NAME), INTENT(OUT) :: \fIwin_name\fP - INTEGER, INTENT(OUT) :: \fIresultlen\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETER -.ft R -.TP 1i -win -Window whose name is to be returned (handle). - -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -win_name -the name previously stored on the window, or an empty string if no such name exists (string). -.TP 1i -resultlen -Length of returned name (integer). -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for I/O function errors. The error handler may be changed with MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN may be used to cause error values to be returned. Note that MPI does not guarantee that an MPI program can continue past an error. - - diff --git a/ompi/mpi/man/man3/MPI_Win_lock.3in b/ompi/mpi/man/man3/MPI_Win_lock.3in deleted file mode 100644 index 5fffb03ee19..00000000000 --- a/ompi/mpi/man/man3/MPI_Win_lock.3in +++ /dev/null @@ -1,92 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2014 Los Alamos National Security, LLC. All rights reserved. -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2007-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Win_lock 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Win_lock\fP \- Starts an RMA access epoch locking access to a particular rank. - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -int MPI_Win_lock(int \fIlock_type\fP, int \fIrank\fP, int \fIassert\fP, MPI_Win \fIwin\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_WIN_LOCK(\fILOCK_TYPE, RANK, ASSERT, WIN, IERROR\fP) - INTEGER \fILOCK_TYPE, RANK, ASSERT, WIN, IERROR\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Win_lock(\fIlock_type\fP, \fIrank\fP, \fIassert\fP, \fIwin\fP, \fIierror\fP) - INTEGER, INTENT(IN) :: \fIlock_type\fP, \fIrank\fP, \fIassert\fP - TYPE(MPI_Win), INTENT(IN) :: \fIwin\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETERS -.ft R -.TP 1i -lock_type -Either MPI_LOCK_EXCLUSIVE or MPI_LOCK_SHARED (state). -.TP 1i -rank -Rank of locked window (nonnegative integer). -.TP 1i -assert -Program assertion (integer). -.TP 1i -win -Window object (handle). - -.SH OUTPUT PARAMETER -.ft R -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -Starts an RMA access epoch. Locks ensure that only the windows created by specific processes can be accessed by those processes (and by no other processes) during that epoch. -.sp -Locks are used to protect accesses to the locked target window effected by RMA calls issued between the lock and unlock call, and to protect local load/store accesses to a locked local window executed between the lock and unlock call. -Accesses that are protected by an exclusive lock will not be concurrent at the window site with other accesses to the same window that are lock protected. Accesses that are protected by a shared lock will not be concurrent at the window site with accesses protected by an exclusive lock to the same window. -.sp -The \fIassert\fP argument is used to provide assertions on the context of the call that may be used for various optimizations. (See Section 6.4.4 of the MPI-2 Standard.) A value of \fIassert\fP = 0 is always valid. -The following assertion value is supported: -.ft R -.TP 1i -MPI_MODE_NOCHECK -No other processes will hold or attempt to acquire a conflicting lock while the caller holds the window lock. -.sp - -.SH NOTES -.ft R -In a client/server environment in which clients connect to -a server and create windows that span both the client and the -server, if a client or server that has obtained a lock -on such a window and then terminates abnormally, the server or other clients -may hang in a MPI_Win_lock call, failing to notice that the peer MPI job -has terminated. - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for I/O function errors. The error handler may be changed with MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN may be used to cause error values to be returned. Note that MPI does not guarantee that an MPI program can continue past an error. - -.SH SEE ALSO -MPI_Win_unlock -MPI_Win_lock_all -.br - diff --git a/ompi/mpi/man/man3/MPI_Win_lock_all.3in b/ompi/mpi/man/man3/MPI_Win_lock_all.3in deleted file mode 100644 index bdc1e0320c1..00000000000 --- a/ompi/mpi/man/man3/MPI_Win_lock_all.3in +++ /dev/null @@ -1,84 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2014 Los Alamos National Security, LLC. All rights reserved. -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2007-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" $COPYRIGHT$ -.TH MPI_Win_lock_all 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Win_lock_all\fP \- Starts an RMA access epoch locking access to all processes in the window - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -int MPI_Win_lock_all(int \fIassert\fP, MPI_Win \fIwin\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_WIN_LOCK_ALL(\fIASSERT, WIN, IERROR\fP) - INTEGER \fIASSERT, WIN, IERROR\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Win_lock_all(\fIassert\fP, \fIwin\fP, \fIierror\fP) - INTEGER, INTENT(IN) :: \fIassert\fP - TYPE(MPI_Win), INTENT(IN) :: \fIwin\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETERS -.ft R -.TP 1i -assert -Program assertion (integer). -.TP 1i -win -Window object (handle). - -.SH OUTPUT PARAMETER -.ft R -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -Starts an RMA access epoch to all processes in \fIwin\fP, with a lock type of MPI_LOCK_SHARED. During the epoch, the calling process can access the window memory on all processes in \fIwin\fP by using RMA operations. A window locked with MPI_Win_lock_all must be unlocked with MPI_Win_unlock_all. This routine is not collective — the ALL refers to a lock on all members of the group of the window. -.sp -Locks are used to protect accesses to the locked target window effected by RMA calls issued between the lock and unlock call, and to protect local load/store accesses to a locked local window executed between the lock and unlock call. -Accesses that are protected by an exclusive lock will not be concurrent at the window site with other accesses to the same window that are lock protected. Accesses that are protected by a shared lock will not be concurrent at the window site with accesses protected by an exclusive lock to the same window. -.sp -The \fIassert\fP argument is used to provide assertions on the context of the call that may be used for various optimizations. (See Section 6.4.4 of the MPI-2 Standard.) A value of \fIassert\fP = 0 is always valid. -The following assertion value is supported: -.ft R -.TP 1i -MPI_MODE_NOCHECK -No other processes will hold or attempt to acquire a conflicting lock while the caller holds the window lock. -.sp - -.SH NOTES -.ft R -In a client/server environment in which clients connect to -a server and create windows that span both the client and the -server, if a client or server that has obtained a lock -on such a window and then terminates abnormally, the server or other clients -may hang in a MPI_Win_lock_all call, failing to notice that the peer MPI job -has terminated. - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for I/O function errors. The error handler may be changed with MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN may be used to cause error values to be returned. Note that MPI does not guarantee that an MPI program can continue past an error. - -.SH SEE ALSO -MPI_Win_unlock_all -MPI_Win_lock -.br diff --git a/ompi/mpi/man/man3/MPI_Win_post.3in b/ompi/mpi/man/man3/MPI_Win_post.3in deleted file mode 100644 index 74d711cf273..00000000000 --- a/ompi/mpi/man/man3/MPI_Win_post.3in +++ /dev/null @@ -1,84 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2007-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Win_post 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Win_post\fP \- Starts an RMA exposure epoch for the local window associated with \fIwin\fP - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -int MPI_Win_post(MPI_Group \fIgroup\fP, int assert, MPI_Win \fIwin\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_WIN_POST(\fIGROUP, ASSERT, WIN, IERROR\fP) - INTEGER GROUP, ASSERT, WIN, IERROR - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Win_post(\fIgroup\fP, \fIassert\fP, \fIwin\fP, \fIierror\fP) - TYPE(MPI_Group), INTENT(IN) :: \fIgroup\fP - INTEGER, INTENT(IN) :: \fIassert\fP - TYPE(MPI_Win), INTENT(IN) :: \fIwin\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETERS -.ft R -.TP 1i -group -The group of origin processes (handle) -.TP 1i -assert -Program assertion (integer) -.TP 1i -win -Window object (handle) - -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION - -Starts an RMA exposure epoch for the local window associated with \fIwin\fP. Only the processes belonging to \fIgroup\fP should access the window with RMA calls on \fIwin\fP during this epoch. Each process in \fIgroup\fP must issue a matching call to MPI_Win_start. MPI_Win_post does not block. -.sp -The \fIassert\fP argument is used to provide assertions on the context of the call that may be used for various optimizations. A value of \fIassert\fP = 0 is always valid. The following assertion values are supported: -.ft R -.TP 1i -MPI_MODE_NOCHECK -The matching calls to MPI_Win_start have not yet occurred on any origin processes when this call is made. This assertion must be present for all matching MPI_Win_start calls if used. -.TP 1i -MPI_MODE_NOSTORE -Informs that the local window was not updated by local stores or get calls in the preceding epoch. -.TP 1i -MPI_MODE_NOPUT -Informs that the local window will not be updated by put or accummulate calls until the ensuing wait synchronization. -.sp - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for I/O function errors. The error handler may be changed with MPI_Win_set_errhandler; the predefined error handler MPI_ERRORS_RETURN may be used to cause error values to be returned. Note that MPI does not guarantee that an MPI program can continue past an error. - -.SH SEE ALSO -MPI_Win_start -MPI_Win_wait -.br - - - diff --git a/ompi/mpi/man/man3/MPI_Win_set_attr.3in b/ompi/mpi/man/man3/MPI_Win_set_attr.3in deleted file mode 100644 index 4e428f4dc28..00000000000 --- a/ompi/mpi/man/man3/MPI_Win_set_attr.3in +++ /dev/null @@ -1,80 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Win_set_attr 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Win_set_attr\fP \- Sets the value of a window attribute. - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -int MPI_Win_set_attr(MPI_Win \fIwin\fP, int \fIwin_keyval\fP, void *\fIattribute_val\fP) - -.fi -.SH Fortran Syntax (see FORTRAN 77 NOTES) -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_WIN_SET_ATTR(\fIWIN, WIN_KEYVAL, ATTRIBUTE_VAL, IERROR\fP) - INTEGER \fIWIN, WIN_KEYVAL, IERROR\fP - INTEGER(KIND=MPI_ADDRESS_KIND) \fIATTRIBUTE_VAL\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Win_set_attr(\fIwin\fP, \fIwin_keyval\fP, \fIattribute_val\fP, \fIierror\fP) - TYPE(MPI_Win), INTENT(IN) :: \fIwin\fP - INTEGER, INTENT(IN) :: \fIwin_keyval\fP - INTEGER(KIND=MPI_ADDRESS_KIND), INTENT(IN) :: \fIattribute_val\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT/OUTPUT PARAMETER -.ft R -.TP 1i -win -Window to which attribute will be attached (handle). - -.SH INPUT PARAMETERS -.ft R -.TP 1i -win_keyval -Key value (integer). -.TP 1i -attribute_val -Attribute value. - -.SH OUTPUT PARAMETER -.ft R -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R - -.SH FORTRAN 77 NOTES -.ft R -The MPI standard prescribes portable Fortran syntax for -the \fIATTRIBUTE_VAL\fP argument only for Fortran 90. FORTRAN 77 -users may use the non-portable syntax -.sp -.nf - INTEGER*MPI_ADDRESS_KIND \fIATTRIBUTE_VAL\fP -.fi -.sp -where MPI_ADDRESS_KIND is a constant defined in mpif.h -and gives the length of the declared integer in bytes. - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for I/O function errors. The error handler may be changed with MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN may be used to cause error values to be returned. Note that MPI does not guarantee that an MPI program can continue past an error. - diff --git a/ompi/mpi/man/man3/MPI_Win_set_errhandler.3in b/ompi/mpi/man/man3/MPI_Win_set_errhandler.3in deleted file mode 100644 index 955731044f4..00000000000 --- a/ompi/mpi/man/man3/MPI_Win_set_errhandler.3in +++ /dev/null @@ -1,64 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Win_set_errhandler 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Win_set_errhandler\fP \- Attaches a new error handler to a window. - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -int MPI_Win_set_errhandler(MPI_Win \fIwin\fP, MPI_Errhandler \fIerrhandler\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_WIN_SET_ERRHANDLER(\fIWIN, ERRHANDLER, IERROR\fP) - INTEGER \fIWIN, ERRHANDLER, IERROR\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Win_set_errhandler(\fIwin\fP, \fIerrhandler\fP, \fIierror\fP) - TYPE(MPI_Win), INTENT(IN) :: \fIwin\fP - TYPE(MPI_Errhandler), INTENT(IN) :: \fIerrhandler\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT/OUTPUT PARAMETER -.ft R -.TP 1i -win -Window (handle). - -.SH INPUT PARAMETER -.ft R -.TP 1i -errhandler -New error handler for window (handle). - -.SH OUTPUT PARAMETER -.ft R -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -MPI_Win_set_errhandler attaches a new error handler to a window. The error handler must be either a predefined error handler or an error handler created by a call to MPI_Win_create_errhandler. - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for I/O function errors. The error handler may be changed with MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN may be used to cause error values to be returned. Note that MPI does not guarantee that an MPI program can continue past an error. - - diff --git a/ompi/mpi/man/man3/MPI_Win_set_info.3in b/ompi/mpi/man/man3/MPI_Win_set_info.3in deleted file mode 100644 index 51139e292cb..00000000000 --- a/ompi/mpi/man/man3/MPI_Win_set_info.3in +++ /dev/null @@ -1,77 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright (c) 2015 Research Organization for Information Science -.\" and Technology (RIST). All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Win_set_info 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Win_set_info\fP \- Set window info hints -. -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -int MPI_Win_set_info(MPI_Win \fIwin\fP, MPI_Info \fIinfo\fP) -. -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_WIN_SET_INFO(\fIWIN, INFO, IERROR\fP) - INTEGER \fIWIN, INFO, IERROR \fP -. -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Win_set_info(\fIwin\fP, \fIinfo\fP, \fIierror\fP) - TYPE(MPI_Win), INTENT(IN) :: \fIwin\fP - TYPE(MPI_Info), INTENT(IN) :: \fIinfo\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETERS -.ft R -.TP 1i -win -Window on which to set info hints -.TP 1i -info -Info object containing hints to be set on -.I win -. -.SH OUTPUT PARAMETERS -.TP 1i -IERROR -Fortran only: Error status (integer). -. -.SH DESCRIPTION -.ft R -MPI_WIN_SET_INFO sets new values for the hints of the window -associated with -.IR win. -MPI_WIN_SET_INFO is a collective routine. The info object may be -different on each process, but any info entries that an implementation -requires to be the same on all processes must appear with the same -value in each process's -.I info -object. -. -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value -of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for -I/O function errors. The error handler may be changed with -MPI_Comm_set_errhandler; the predefined error handler -MPI_ERRORS_RETURN may be used to cause error values to be -returned. Note that MPI does not guarantee that an MPI program can -continue past an error. -. -.SH SEE ALSO -MPI_Win_get_info, -MPI_Info_create, -MPI_Info_set, -MPI_Info_free diff --git a/ompi/mpi/man/man3/MPI_Win_set_name.3in b/ompi/mpi/man/man3/MPI_Win_set_name.3in deleted file mode 100644 index a9bb4db9e1b..00000000000 --- a/ompi/mpi/man/man3/MPI_Win_set_name.3in +++ /dev/null @@ -1,65 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2013 Los Alamos National Security, LLC. All rights reserved. -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Win_set_name 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Win_set_name\fP \- Sets the name of a window. - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -int MPI_Win_set_name(MPI_Win \fIwin\fP, const char *\fIwin_name\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_WIN_SET_NAME(\fIWIN, WIN_NAME, IERROR\fP) - INTEGER \fIWIN, IERROR\fP - CHARACTER*(*) \fIWIN_NAME\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Win_set_name(\fIwin\fP, \fIwin_name\fP, \fIierror\fP) - TYPE(MPI_Win), INTENT(IN) :: \fIwin\fP - CHARACTER(LEN=*), INTENT(IN) :: \fIwin_name\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT/OUTPUT PARAMETER -.ft R -.TP 1i -win -Window whose identifier is to be set (handle). - -.SH INPUT PARAMETER -.ft R -.TP 1i -win_name -The character string used as the name (string). - -.SH OUTPUT PARAMETER -.ft R -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for I/O function errors. The error handler may be changed with MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN may be used to cause error values to be returned. Note that MPI does not guarantee that an MPI program can continue past an error. - - diff --git a/ompi/mpi/man/man3/MPI_Win_shared_query.3in b/ompi/mpi/man/man3/MPI_Win_shared_query.3in deleted file mode 100644 index 7ea6778d8e2..00000000000 --- a/ompi/mpi/man/man3/MPI_Win_shared_query.3in +++ /dev/null @@ -1,105 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2015 Los Alamos National Security, LLC. All rights reserved. -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2007-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" $COPYRIGHT$ -.TH MPI_Win_shared_query 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Win_shared_query\fP \- Query a shared memory window - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -int MPI_Win_shared_query (MPI_Win \fIwin\fP, int \fIrank\fP, MPI_Aint *\fIsize\fP, - int *\fIdisp_unit\fP, void *\fIbaseptr\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_WIN_SHARED_QUERY(WIN, RANK, SIZE, DISP_UNIT, BASEPTR, IERROR) - INTEGER WIN, RANK, DISP_UNIT, IERROR - INTEGER(KIND=MPI_ADDRESS_KIND) SIZE, BASEPTR - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Win_shared_query(\fIwin\fP, \fIrank\fP, \fIsize\fP, \fIdisp_unit\fP, \fIbaseptr\fP, \fIierror\fP) - USE, INTRINSIC :: ISO_C_BINDING, ONLY : C_PTR - TYPE(MPI_Win), INTENT(IN) :: \fIwin\fP - INTEGER, INTENT(IN) :: \fIrank\fP - INTEGER(KIND=MPI_ADDRESS_KIND), INTENT(OUT) :: \fIsize\fP - INTEGER, INTENT(OUT) :: \fIdisp_unit\fP - TYPE(C_PTR), INTENT(OUT) :: \fIbaseptr\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETERS -.ft R -.TP 1i -win -Shared memory window object (handle). -.TP 1i -rank -Rank in the group of window \fIwin\fP (non-negative integer) -or MPI_PROC_NULL. - -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -size -Size of the window segment (non-negative integer). -.TP 1i -disp_unit -Local unit size for displacements, in bytes (positive integer). -.TP 1i -baseptr -Address for load/store access to window segment -(choice). -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -\fBMPI_Win_shared_query\fP queries the process-local address for -remote memory segments created with MPI_Win_allocate_shared. This -function can return different process-local addresses for the same -physical memory on different processes. The returned memory can be -used for load/store accesses subject to the constraints defined in -MPI-3.1 \[char167] 11.7. This function can only be called with windows -of flavor MPI_WIN_FLAVOR_SHARED. If the passed window is not of flavor -MPI_WIN_FLAVOR_SHARED, the error MPI_ERR_RMA_FLAVOR is raised. When -rank is MPI_PROC_NULL, the \fIpointer\fP, \fIdisp_unit\fP, and -\fIsize\fP returned are the pointer, disp_unit, and size of the memory -segment belonging the lowest rank that specified \fIsize\fP > 0. If -all processes in the group attached to the window specified \fIsize\fP -= 0, then the call returns \fIsize\fP = 0 and a \fIbaseptr\fP as if -\fBMPI_Alloc_mem\fP was called with \fIsize\fP = 0. - -.SH C NOTES -.ft R -The parameter \fIbaseptr\fP is of type \fIvoid *\fP to allow passing any pointer object for this parameter. The provided argument should be a pointer to a pointer of arbitrary type (e.g. \fIvoid **\fP). - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value -of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for -I/O function errors. The error handler may be changed with -MPI_Comm_set_errhandler; the predefined error handler -MPI_ERRORS_RETURN may be used to cause error values to be -returned. Note that MPI does not guarantee that an MPI program can -continue past an error. - -.SH SEE ALSO -.ft R -.sp -MPI_Alloc_mem -MPI_Win_allocate_shared diff --git a/ompi/mpi/man/man3/MPI_Win_start.3in b/ompi/mpi/man/man3/MPI_Win_start.3in deleted file mode 100644 index 4bdd2b09a92..00000000000 --- a/ompi/mpi/man/man3/MPI_Win_start.3in +++ /dev/null @@ -1,80 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2007-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Win_start 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Win_start\fP \- Starts an RMA access epoch for \fIwin\fP - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -int MPI_Win_start(MPI_Group \fIgroup\fP, int assert, MPI_Win \fIwin\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_WIN_START(\fIGROUP, ASSERT, WIN, IERROR\fP) - INTEGER GROUP, ASSERT, WIN, IERROR - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Win_start(\fIgroup\fP, \fIassert\fP, \fIwin\fP, \fIierror\fP) - TYPE(MPI_Group), INTENT(IN) :: \fIgroup\fP - INTEGER, INTENT(IN) :: \fIassert\fP - TYPE(MPI_Win), INTENT(IN) :: \fIwin\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETERS -.ft R -.TP 1i -group -The group of target processes (handle). -.TP 1i -assert -Program assertion (integer). -.TP 1i -win -Window object (handle). - -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -MPI_Win_start is a one-sided MPI communication synchronization call that starts an RMA access epoch for \fIwin\fP. RMA calls issued on \fIwin\fP during this epoch must -access only windows at processes in \fIgroup\fP. Each process in \fIgroup\fP must issue a matching -call to MPI_Win_post. MPI_Win_start -is allowed to block until the corresponding MPI_Win_post calls have been executed, but is not required to. -.sp -The \fIassert\fP argument is used to provide assertions on the context of the call that may be used for various optimizations. (See Section 6.4.4 of the MPI-2 Standard.) A value of \fIassert\fP = 0 is always valid. The following assertion value is supported: -.ft R -.TP 1i -MPI_MODE_NOCHECK -When this value is passed in to this call, the library assumes that -the post call on the target has been called and it is not necessary -for the library to check to see if such a call has been made. - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for I/O function errors. The error handler may be changed with MPI_Win_set_errhandler; the predefined error handler MPI_ERRORS_RETURN may be used to cause error values to be returned. Note that MPI does not guarantee that an MPI program can continue past an error. - -.SH SEE ALSO -MPI_Win_post -MPI_Win_complete -.br - diff --git a/ompi/mpi/man/man3/MPI_Win_sync.3in b/ompi/mpi/man/man3/MPI_Win_sync.3in deleted file mode 100644 index 5f4c17b7de4..00000000000 --- a/ompi/mpi/man/man3/MPI_Win_sync.3in +++ /dev/null @@ -1,55 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2014 Los Alamos National Security, LLC. All rights reserved. -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2007-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" $COPYRIGHT$ -.TH MPI_Win_sync 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Win_sync\fP, \- Synchronize the private and public copies of the window - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -int MPI_Win_sync (MPI_Win \fIwin\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_WIN_SYNC(\fIWIN, IERROR\fP) - INTEGER \fIWIN, IERROR\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Win_sync(\fIwin\fP, \fIierror\fP) - TYPE(MPI_Win), INTENT(IN) :: \fIwin\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETERS -.ft R -.TP 1i -win -Window object (handle). - -.SH OUTPUT PARAMETER -.ft R -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -\fBMPI_Win_sync\fP synchronizes the private and public window copies of \fIwin\fP. For the purposes of synchronizing the private and public window, \fBMPI_Win_sync\fP has the effect of ending and reopening an access and exposure epoch on the window (note that it does not actually end an epoch or complete any pending MPI RMA operations). - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for I/O function errors. The error handler may be changed with \fBMPI_Comm_set_errhandler\fP; the predefined error handler MPI_ERRORS_RETURN may be used to cause error values to be returned. Note that MPI does not guarantee that an MPI program can continue past an error. diff --git a/ompi/mpi/man/man3/MPI_Win_test.3in b/ompi/mpi/man/man3/MPI_Win_test.3in deleted file mode 100644 index ffdeff393fe..00000000000 --- a/ompi/mpi/man/man3/MPI_Win_test.3in +++ /dev/null @@ -1,73 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2007-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Win_test 3 "#OMPI_DATE#" ""#PACKAGE_VERSION#"" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Win_test\fP \- Attempts to complete an RMA exposure epoch; a nonblocking version of MPI_Win_wait - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -int MPI_Win_test(MPI_Win \fIwin\fP, int *\fIflag\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_WIN_TEST(\fI WIN, FLAG, IERROR\fP) - INTEGER \fI WIN, IERROR\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Win_test(\fIwin\fP, \fIflag\fP, \fIierror\fP) - TYPE(MPI_Win), INTENT(IN) :: \fIwin\fP - LOGICAL, INTENT(OUT) :: \fIflag\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETERS -.ft R -.TP 1i -win -Window object (handle) - - -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -IERROR -Fortran only: Error status (integer). -.TP 1i -flag -The returning state of the test for epoch closure. - - -.SH DESCRIPTION -.ft R -MPI_Win_test is a one-sided MPI communication synchronization call, a -nonblocking version of MPI_Win_wait. It returns \fIflag = true\fP if -MPI_Win_wait would return, \fIflag = false\fP otherwise. The effect of return of MPI_Win_test with \fIflag = true\fP is the same as the effect of a return of MPI_Win_wait. If \fIflag = false\fP is returned, then the call has no visible effect. -.sp -Invoke MPI_Win_test only where MPI_Win_wait can be invoked. Once -the call has returned \fIflag = true\fP, it must not be invoked anew, until the window is posted anew. - - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for I/O function errors. The error handler may be changed with MPI_Win_set_errhandler; the predefined error handler MPI_ERRORS_RETURN may be used to cause error values to be returned. Note that MPI does not guarantee that an MPI program can continue past an error. - -.SH SEE ALSO -MPI_Win_post -MPI_Win_wait -.br - diff --git a/ompi/mpi/man/man3/MPI_Win_unlock.3in b/ompi/mpi/man/man3/MPI_Win_unlock.3in deleted file mode 100644 index 03623696c78..00000000000 --- a/ompi/mpi/man/man3/MPI_Win_unlock.3in +++ /dev/null @@ -1,69 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2014 Los Alamos National Security, LLC. All rights reserved. -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2007-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Win_unlock 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Win_unlock\fP \- Completes an RMA access epoch started by a call to MPI_Win_lock. - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -int MPI_Win_unlock(int \fIrank\fP, MPI_Win \fIwin\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_WIN_UNLOCK(\fIRANK, WIN, IERROR\fP) - INTEGER \fIRANK, WIN, IERROR\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Win_unlock(\fIrank\fP, \fIwin\fP, \fIierror\fP) - INTEGER, INTENT(IN) :: \fIrank\fP - TYPE(MPI_Win), INTENT(IN) :: \fIwin\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETERS -.ft R -.TP 1i -rank -Rank of window (nonnegative integer). -.TP 1i -win -Window object (handle). - -.SH OUTPUT PARAMETER -.ft R -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -MPI_Win_unlock completes an RMA access epoch started by a call to MPI_Win_lock. RMA operations issued during this period will have completed both at the origin and at the target when the call returns. -.sp -Locks are used to protect accesses to the locked target window effected by RMA calls issued between the lock and unlock call, and to protect local load/store accesses to a locked local window executed between the lock and unlock call. Accesses that are protected by an exclusive lock will not be concurrent at the window site with other accesses to the same window that are lock protected. Accesses that are protected by a shared lock will not be concurrent at the window site with accesses protected by an exclusive lock to the same window. - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for I/O function errors. The error handler may be changed with MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN may be used to cause error values to be returned. Note that MPI does not guarantee that an MPI program can continue past an error. - -.SH SEE ALSO -MPI_Win_lock -MPI_Win_unlock_all -.br - - diff --git a/ompi/mpi/man/man3/MPI_Win_unlock_all.3in b/ompi/mpi/man/man3/MPI_Win_unlock_all.3in deleted file mode 100644 index 480fe0dbc05..00000000000 --- a/ompi/mpi/man/man3/MPI_Win_unlock_all.3in +++ /dev/null @@ -1,62 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2014 Los Alamos National Security, LLC. All rights reserved. -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2007-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" $COPYRIGHT$ -.TH MPI_Win_unlock_all 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Win_unlock_all\fP \- Completes an RMA access epoch started by a call to MPI_Win_lock_all. - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -int MPI_Win_unlock_all(MPI_Win \fIwin\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_WIN_UNLOCK_ALL(\fIWIN, IERROR\fP) - INTEGER \fIWIN, IERROR\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Win_unlock_all(\fIwin\fP, \fIierror\fP) - TYPE(MPI_Win), INTENT(IN) :: \fIwin\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETERS -.ft R -.TP 1i -win -Window object (handle). - -.SH OUTPUT PARAMETER -.ft R -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -MPI_Win_unlock_all completes an RMA access epoch started by a call to MPI_Win_lock_all. RMA operations issued during this period will have completed both at the origin and at the target when the call returns. -.sp -Locks are used to protect accesses to the locked target window effected by RMA calls issued between the lock and unlock call, and to protect local load/store accesses to a locked local window executed between the lock and unlock call. Accesses that are protected by an exclusive lock will not be concurrent at the window site with other accesses to the same window that are lock protected. Accesses that are protected by a shared lock will not be concurrent at the window site with accesses protected by an exclusive lock to the same window. - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for I/O function errors. The error handler may be changed with MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN may be used to cause error values to be returned. Note that MPI does not guarantee that an MPI program can continue past an error. - -.SH SEE ALSO -MPI_Win_lock_all -MPI_Win_unlock -.br diff --git a/ompi/mpi/man/man3/MPI_Win_wait.3in b/ompi/mpi/man/man3/MPI_Win_wait.3in deleted file mode 100644 index a33e107ace9..00000000000 --- a/ompi/mpi/man/man3/MPI_Win_wait.3in +++ /dev/null @@ -1,66 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright 2007-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Win_wait 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Win_wait\fP \- Completes an RMA exposure epoch started by a call to MPI_Win_post on \fIwin\fP - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -int MPI_Win_wait(MPI_Win \fIwin\fP) - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -MPI_WIN_WAIT(\fI WIN, IERROR\fP) - INTEGER \fI WIN, IERROR\fP - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -MPI_Win_wait(\fIwin\fP, \fIierror\fP) - TYPE(MPI_Win), INTENT(IN) :: \fIwin\fP - INTEGER, OPTIONAL, INTENT(OUT) :: \fIierror\fP - -.fi -.SH INPUT PARAMETERS -.ft R -.TP 1i -win -Window object (handle). - -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -IERROR -Fortran only: Error status (integer). - -.SH DESCRIPTION -.ft R -MPI_Win_wait is a one-sided MPI communication synchronization call that completes an RMA exposure epoch started by a call to MPI_Win_post on \fIwin\fP. This -call matches calls to MPI_Win_complete(\fIwin\fP) issued by each of the processes that -were granted access to the window during this epoch. The call to MPI_Win_wait blocks -until all matching calls to MPI_Win_complete have occurred. This guarantees that all -these origin processes have completed their RMA accesses to the local window. When the -call returns, all these RMA accesses will have completed at the target window. - - -.SH ERRORS -Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument. -.sp -Before the error value is returned, the current MPI error handler is -called. By default, this error handler aborts the MPI job, except for I/O function errors. The error handler may be changed with MPI_Win_set_errhandler; the predefined error handler MPI_ERRORS_RETURN may be used to cause error values to be returned. Note that MPI does not guarantee that an MPI program can continue past an error. - -.SH SEE ALSO -MPI_Win_post -.br - diff --git a/ompi/mpi/man/man3/MPI_Wtick.3in b/ompi/mpi/man/man3/MPI_Wtick.3in deleted file mode 100644 index a1eb5ba5dd5..00000000000 --- a/ompi/mpi/man/man3/MPI_Wtick.3in +++ /dev/null @@ -1,52 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2017 Cisco Systems, Inc. -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Wtick 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Wtick\fP \- Returns the resolution of MPI_Wtime. - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -double MPI_Wtick() - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -DOUBLE PRECISION MPI_WTICK() - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -DOUBLE PRECISION MPI_WTICK() - -.fi -.SH RETURN VALUE -.ft R -Time in seconds of resolution of MPI_Wtime. - -.SH DESCRIPTION -.ft R -MPI_Wtick returns the resolution of MPI_Wtime in seconds. That is, it -returns, as a double-precision value, the number of seconds between -successive clock ticks. For example, if the clock is implemented by -the hardware as a counter that is incremented every millisecond, the -value returned by MPI_Wtick should be 10^-3. -.PP - -.SH NOTE -This function does not return an error value. Consequently, the result -of calling it before MPI_Init or after MPI_Finalize is undefined. - -.SH SEE ALSO -.ft R -.sp -MPI_Wtime diff --git a/ompi/mpi/man/man3/MPI_Wtime.3in b/ompi/mpi/man/man3/MPI_Wtime.3in deleted file mode 100644 index a64c4954ff8..00000000000 --- a/ompi/mpi/man/man3/MPI_Wtime.3in +++ /dev/null @@ -1,88 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2006-2008 Sun Microsystems, Inc. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2014 Cisco Systems, Inc. All rights reserved. -.\" Copyright (c) 2020 Google, LLC. All rights reserved. -.\" $COPYRIGHT$ -.TH MPI_Wtime 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPI_Wtime\fP \- Returns an elapsed time on the calling processor. - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -double MPI_Wtime() - -.fi -.SH Fortran Syntax -.nf -USE MPI -! or the older form: INCLUDE 'mpif.h' -DOUBLE PRECISION MPI_WTIME() - -.fi -.SH Fortran 2008 Syntax -.nf -USE mpi_f08 -DOUBLE PRECISION MPI_WTIME() - -.fi -.SH RETURN VALUE -.ft R -Time in seconds since an arbitrary time in the past. - -.SH DESCRIPTION -.ft R -MPI_Wtime returns a floating-point number of seconds, representing elapsed wall-clock time since some time in the past. -.PP -The "time in the past" is guaranteed not to change during the life of the process. The user is responsible for converting large numbers of seconds to other units if they are preferred. -.PP -This function is portable (it returns seconds, not "ticks"), it allows high resolution, and carries no unnecessary baggage. One would use it like this: -.sp -.nf - { - double starttime, endtime; - starttime = MPI_Wtime(); - \&.... stuff to be timed \&... - endtime = MPI_Wtime(); - printf("That took %f seconds\\n",endtime-starttime); - } -.fi -.PP -The times returned are local to the node that called them. There is no requirement that different nodes return the "same" time. -.SH NOTES -The boolean variable MPI_WTIME_IS_GLOBAL, a predefined attribute key that indicates whether clocks are synchronized, does not have a valid value in Open MPI, as the clocks are not guaranteed to be synchronized. - -.PP -This function is intended to be a high-resolution, elapsed (or wall) clock. See MPI_Wtick to determine the resolution of MPI_Wtime. -.PP -On POSIX platforms, this function may utilize a timer that is cheaper -to invoke than the gettimeofday() system call, but will fall back to -gettimeofday() if a cheap high-resolution timer is not available. The -ompi_info command can be consulted to see if Open MPI supports a -native high-resolution timer on your platform; see the value for "MPI_WTIME -support" (or "options:mpi-wtime" when viewing the parsable -output). If this value is "native", a method that is likely to be -cheaper than gettimeofday() will be used to obtain the time when -MPI_Wtime is invoked. -.PP -For example, on platforms that support it, the -.I clock_gettime() -function will be used to obtain a monotonic clock value with whatever -precision is supported on that platform (e.g., nanoseconds). -.PP -Note, too, that the MCA parameter opal_timer_require_monotonic can -influcence this behavior. It defaults to true, but if set to false, -Open MPI may use a finer-grained timing mechanism (e.g., the -RDTSC/RDTSCP clock ticks on x86_64 platforms), but is not guaranteed -to be monotonic in some cases (e.g., if the MPI process is not bound -to a single processor core). -.PP -This function does not return an error value. Consequently, the result of calling it before MPI_Init or after MPI_Finalize is undefined. - -.SH SEE ALSO -MPI_Wtick -.br - diff --git a/ompi/mpi/man/man3/Makefile.am b/ompi/mpi/man/man3/Makefile.am deleted file mode 100644 index 324c4032866..00000000000 --- a/ompi/mpi/man/man3/Makefile.am +++ /dev/null @@ -1,504 +0,0 @@ -# -*- makefile -*- -# Copyright (c) 2006-2020 Cisco Systems, Inc. All rights reserved. -# Copyright (c) 2008 Sun Microsystems, Inc. All rights reserved. -# Copyright (c) 2012-2013 Los Alamos National Security, LLC. All rights reserved. -# Copyright (c) 2020 Research Organization for Information Science -# and Technology (RIST). All rights reserved. -# Copyright (c) 2021 Triad National Security, LLC. All rights -# reserved. -# $COPYRIGHT$ -# -# Additional copyrights may follow -# -# $HEADER$ -# - -include $(top_srcdir)/Makefile.ompi-rules - -# For simplicity of maintenance over time, we are in the slow process -# of converting all existing nroff pages to Markdown. As man pages -# are converted to Markdown, move them from TEMPLATE_FILES to -# MD_ILES. -# -# Just in case someone looks for it here someday, here is a -# conveninent reference for what Markdown pandoc supports: -# -# https://rmarkdown.rstudio.com/authoring_pandoc_markdown.html - -MD_FILES = \ - MPI_Barrier.3.md \ - MPI_Bsend.3.md \ - MPI_Bsend_init.3.md \ - MPI_Buffer_attach.3.md \ - MPI_Buffer_detach.3.md \ - MPI_Cancel.3.md \ - MPI_Cart_coords.3.md \ - MPI_Cart_create.3.md \ - MPI_Cart_get.3.md \ - MPI_Cart_map.3.md \ - MPI_Cart_rank.3.md \ - MPI_Cart_shift.3.md \ - MPI_Close_port.3.md \ - MPI_Cart_sub.3.md \ - MPI_Cartdim_get.3.md \ - MPI_Comm_accept.3.md \ - MPI_Comm_call_errhandler.3.md \ - MPI_Comm_compare.3.md \ - MPI_Comm_connect.3.md \ - MPI_Comm_create.3.md \ - MPI_Comm_create_errhandler.3.md \ - MPI_Comm_create_from_group.3.md \ - MPI_Comm_create_group.3.md \ - MPI_Comm_create_keyval.3.md \ - MPI_Comm_delete_attr.3.md \ - MPI_Comm_disconnect.3.md \ - MPI_Comm_dup.3.md \ - MPI_Comm_dup_with_info.3.md \ - MPI_T_init_thread.3.md \ - MPI_Status_f2c.3.md \ - MPI_Status_f082c.3.md \ - MPI_Status_f082f.3.md \ - MPI_Bcast.3.md \ - MPI_File_write_ordered.3.md \ - MPI_File_write_ordered_begin.3.md \ - MPI_File_write_ordered_end.3.md \ - MPI_File_write_shared.3.md \ - MPI_Finalize.3.md \ - MPI_Finalized.3.md \ - MPI_Free_mem.3.md \ - MPI_Gather.3.md \ - MPI_Gatherv.3.md \ - MPI_Get.3.md \ - MPI_Get_accumulate.3.md \ - MPI_Get_address.3.md \ - MPI_Get_count.3.md \ - MPI_Get_elements.3.md \ - MPI_Get_library_version.3.md \ - MPI_Get_processor_name.3.md \ - MPI_Get_version.3.md \ - MPI_Graph_create.3.md \ - MPI_Graph_get.3.md \ - MPI_Graph_map.3.md \ - MPI_Graph_neighbors_count.3.md \ - MPI_Graphdims_get.3.md \ - MPI_Grequest_complete.3.md \ - MPI_Grequest_start.3.md \ - MPI_Group_compare.3.md \ - MPI_Group_difference.3.md \ - MPI_Group_excl.3.md \ - MPI_Group_free.3.md \ - MPI_Group_from_session_pset.3.md \ - MPI_Group_incl.3.md \ - MPI_Group_intersection.3.md \ - MPI_Intercomm_create_from_groups.3.md \ - MPI_Session_create_errhandler.3.md \ - MPI_Session_f2c.3.md \ - MPI_Session_finalize.3.md \ - MPI_Session_get_info.3.md \ - MPI_Session_get_num_psets.3.md \ - MPI_Session_get_nth_pset.3.md \ - MPI_Session_get_pset_info.3.md \ - MPI_Session_init.3.md - - -TEMPLATE_FILES = \ - MPI_Abort.3in \ - MPI_Accumulate.3in \ - MPI_Add_error_class.3in \ - MPI_Add_error_code.3in \ - MPI_Add_error_string.3in \ - MPI_Address.3in \ - MPI_Aint_add.3in \ - MPI_Aint_diff.3in \ - MPI_Allgather.3in \ - MPI_Iallgather.3in \ - MPI_Allgather_init.3in \ - MPI_Allgatherv.3in \ - MPI_Iallgatherv.3in \ - MPI_Allgatherv_init.3in \ - MPI_Alloc_mem.3in \ - MPI_Allreduce.3in \ - MPI_Iallreduce.3in \ - MPI_Allreduce_init.3in \ - MPI_Alltoall.3in \ - MPI_Ialltoall.3in \ - MPI_Alltoall_init.3in \ - MPI_Alltoallv.3in \ - MPI_Ialltoallv.3in \ - MPI_Alltoallv_init.3in \ - MPI_Alltoallw.3in \ - MPI_Ialltoallw.3in \ - MPI_Alltoallw_init.3in \ - MPI_Attr_delete.3in \ - MPI_Attr_get.3in \ - MPI_Attr_put.3in \ - MPI_Ibarrier.3in \ - MPI_Barrier_init.3in \ - MPI_Ibcast.3in \ - MPI_Bcast_init.3in \ - MPI_Comm_c2f.3in \ - MPI_Comm_idup.3in \ - MPI_Comm_idup_with_info.3in \ - MPI_Comm_f2c.3in \ - MPI_Comm_free.3in \ - MPI_Comm_free_keyval.3in \ - MPI_Comm_get_attr.3in \ - MPI_Comm_get_errhandler.3in \ - MPI_Comm_get_info.3in \ - MPI_Comm_get_name.3in \ - MPI_Comm_get_parent.3in \ - MPI_Comm_group.3in \ - MPI_Comm_join.3in \ - MPI_Comm_rank.3in \ - MPI_Comm_remote_group.3in \ - MPI_Comm_remote_size.3in \ - MPI_Comm_set_attr.3in \ - MPI_Comm_set_errhandler.3in \ - MPI_Comm_set_info.3in \ - MPI_Comm_set_name.3in \ - MPI_Comm_size.3in \ - MPI_Comm_spawn.3in \ - MPI_Comm_spawn_multiple.3in \ - MPI_Comm_split.3in \ - MPI_Comm_split_type.3in \ - MPI_Comm_test_inter.3in \ - MPI_Compare_and_swap.3in \ - MPI_Dims_create.3in \ - MPI_Dist_graph_create.3in \ - MPI_Dist_graph_create_adjacent.3in \ - MPI_Dist_graph_neighbors.3in \ - MPI_Dist_graph_neighbors_count.3in \ - MPI_Errhandler_create.3in \ - MPI_Errhandler_free.3in \ - MPI_Errhandler_get.3in \ - MPI_Errhandler_set.3in \ - MPI_Error_class.3in \ - MPI_Error_string.3in \ - MPI_Exscan.3in \ - MPI_Iexscan.3in \ - MPI_Exscan_init.3in \ - MPI_Fetch_and_op.3in \ - MPI_File_c2f.3in \ - MPI_File_call_errhandler.3in \ - MPI_File_close.3in \ - MPI_File_create_errhandler.3in \ - MPI_File_delete.3in \ - MPI_File_f2c.3in \ - MPI_File_get_amode.3in \ - MPI_File_get_atomicity.3in \ - MPI_File_get_byte_offset.3in \ - MPI_File_get_errhandler.3in \ - MPI_File_get_group.3in \ - MPI_File_get_info.3in \ - MPI_File_get_position.3in \ - MPI_File_get_position_shared.3in \ - MPI_File_get_size.3in \ - MPI_File_get_type_extent.3in \ - MPI_File_get_view.3in \ - MPI_File_iread.3in \ - MPI_File_iread_at.3in \ - MPI_File_iread_all.3in \ - MPI_File_iread_at_all.3in \ - MPI_File_iread_shared.3in \ - MPI_File_iwrite.3in \ - MPI_File_iwrite_at.3in \ - MPI_File_iwrite_all.3in \ - MPI_File_iwrite_at_all.3in \ - MPI_File_iwrite_shared.3in \ - MPI_File_open.3in \ - MPI_File_preallocate.3in \ - MPI_File_read.3in \ - MPI_File_read_all.3in \ - MPI_File_read_all_begin.3in \ - MPI_File_read_all_end.3in \ - MPI_File_read_at.3in \ - MPI_File_read_at_all.3in \ - MPI_File_read_at_all_begin.3in \ - MPI_File_read_at_all_end.3in \ - MPI_File_read_ordered.3in \ - MPI_File_read_ordered_begin.3in \ - MPI_File_read_ordered_end.3in \ - MPI_File_read_shared.3in \ - MPI_File_seek.3in \ - MPI_File_seek_shared.3in \ - MPI_File_set_atomicity.3in \ - MPI_File_set_errhandler.3in \ - MPI_File_set_info.3in \ - MPI_File_set_size.3in \ - MPI_File_set_view.3in \ - MPI_File_sync.3in \ - MPI_File_write.3in \ - MPI_File_write_all.3in \ - MPI_File_write_all_begin.3in \ - MPI_File_write_all_end.3in \ - MPI_File_write_at.3in \ - MPI_File_write_at_all.3in \ - MPI_File_write_at_all_begin.3in \ - MPI_File_write_at_all_end.3in \ - MPI_Igather.3in \ - MPI_Gather_init.3in \ - MPI_Igatherv.3in \ - MPI_Gatherv_init.3in \ - MPI_Get_elements_x.3in \ - MPI_Graph_neighbors.3in \ - MPI_Group_c2f.3in \ - MPI_Group_f2c.3in \ - MPI_Group_range_excl.3in \ - MPI_Group_range_incl.3in \ - MPI_Group_rank.3in \ - MPI_Group_size.3in \ - MPI_Group_translate_ranks.3in \ - MPI_Group_union.3in \ - MPI_Ibsend.3in \ - MPI_Improbe.3in \ - MPI_Imrecv.3in \ - MPI_Info_c2f.3in \ - MPI_Info_create.3in \ - MPI_Info_delete.3in \ - MPI_Info_dup.3in \ - MPI_Info_env.3in \ - MPI_Info_f2c.3in \ - MPI_Info_free.3in \ - MPI_Info_get.3in \ - MPI_Info_get_nkeys.3in \ - MPI_Info_get_nthkey.3in \ - MPI_Info_get_string.3in \ - MPI_Info_get_valuelen.3in \ - MPI_Info_set.3in \ - MPI_Init.3in \ - MPI_Initialized.3in \ - MPI_Init_thread.3in \ - MPI_Intercomm_create.3in \ - MPI_Intercomm_merge.3in \ - MPI_Iprobe.3in \ - MPI_Irecv.3in \ - MPI_Irsend.3in \ - MPI_Isend.3in \ - MPI_Isendrecv.3in \ - MPI_Isendrecv_replace.3in \ - MPI_Issend.3in \ - MPI_Is_thread_main.3in \ - MPI_Keyval_create.3in \ - MPI_Keyval_free.3in \ - MPI_Lookup_name.3in \ - MPI_Message_c2f.3in \ - MPI_Message_f2c.3in \ - MPI_Mprobe.3in \ - MPI_Mrecv.3in \ - MPI_Neighbor_allgather.3in \ - MPI_Ineighbor_allgather.3in \ - MPI_Neighbor_allgather_init.3in \ - MPI_Neighbor_allgatherv.3in \ - MPI_Ineighbor_allgatherv.3in \ - MPI_Neighbor_allgatherv_init.3in \ - MPI_Neighbor_alltoall.3in \ - MPI_Ineighbor_alltoall.3in \ - MPI_Neighbor_alltoall_init.3in \ - MPI_Neighbor_alltoallv.3in \ - MPI_Ineighbor_alltoallv.3in \ - MPI_Neighbor_alltoallv_init.3in \ - MPI_Neighbor_alltoallw.3in \ - MPI_Ineighbor_alltoallw.3in \ - MPI_Neighbor_alltoallw_init.3in \ - MPI_Op_c2f.3in \ - MPI_Op_commutative.3in \ - MPI_Op_create.3in \ - MPI_Open_port.3in \ - MPI_Op_f2c.3in \ - MPI_Op_free.3in \ - MPI_Pack.3in \ - MPI_Pack_external.3in \ - MPI_Pack_external_size.3in \ - MPI_Pack_size.3in \ - MPI_Parrived.3in \ - MPI_Pcontrol.3in \ - MPI_Pready.3in \ - MPI_Pready_list.3in \ - MPI_Pready_range.3in \ - MPI_Precv_init.3in \ - MPI_Probe.3in \ - MPI_Psend_init.3in \ - MPI_Publish_name.3in \ - MPI_Put.3in \ - MPI_Query_thread.3in \ - MPI_Raccumulate.3in \ - MPI_Recv.3in \ - MPI_Recv_init.3in \ - MPI_Reduce.3in \ - MPI_Ireduce.3in \ - MPI_Reduce_init.3in \ - MPI_Reduce_local.3in \ - MPI_Reduce_scatter.3in \ - MPI_Ireduce_scatter.3in \ - MPI_Reduce_scatter_init.3in \ - MPI_Reduce_scatter_block.3in \ - MPI_Ireduce_scatter_block.3in \ - MPI_Reduce_scatter_block_init.3in \ - MPI_Register_datarep.3in \ - MPI_Request_c2f.3in \ - MPI_Request_f2c.3in \ - MPI_Request_free.3in \ - MPI_Request_get_status.3in \ - MPI_Rget.3in \ - MPI_Rget_accumulate.3in \ - MPI_Rput.3in \ - MPI_Rsend.3in \ - MPI_Rsend_init.3in \ - MPI_Scan.3in \ - MPI_Iscan.3in \ - MPI_Scan_init.3in \ - MPI_Scatter.3in \ - MPI_Iscatter.3in \ - MPI_Scatter_init.3in \ - MPI_Scatterv.3in \ - MPI_Iscatterv.3in \ - MPI_Scatterv_init.3in \ - MPI_Send.3in \ - MPI_Send_init.3in \ - MPI_Sendrecv.3in \ - MPI_Sendrecv_replace.3in \ - MPI_Sizeof.3in \ - MPI_Ssend.3in \ - MPI_Ssend_init.3in \ - MPI_Start.3in \ - MPI_Startall.3in \ - MPI_Status_c2f08.3in \ - MPI_Status_c2f.3in \ - MPI_Status_f2f08.3in \ - MPI_Status_set_cancelled.3in \ - MPI_Status_set_elements.3in \ - MPI_Status_set_elements_x.3in \ - MPI_T_category_changed.3in \ - MPI_T_category_get_categories.3in \ - MPI_T_category_get_cvars.3in \ - MPI_T_category_get_info.3in \ - MPI_T_category_get_num.3in \ - MPI_T_category_get_pvars.3in \ - MPI_T_cvar_get_info.3in \ - MPI_T_cvar_get_num.3in \ - MPI_T_cvar_handle_alloc.3in \ - MPI_T_cvar_handle_free.3in \ - MPI_T_cvar_read.3in \ - MPI_T_cvar_write.3in \ - MPI_T_enum_get_info.3in \ - MPI_T_enum_get_item.3in \ - MPI_T_finalize.3in \ - MPI_T_pvar_get_info.3in \ - MPI_T_pvar_get_num.3in \ - MPI_T_pvar_handle_alloc.3in \ - MPI_T_pvar_handle_free.3in \ - MPI_T_pvar_read.3in \ - MPI_T_pvar_readreset.3in \ - MPI_T_pvar_reset.3in \ - MPI_T_pvar_session_create.3in \ - MPI_T_pvar_session_free.3in \ - MPI_T_pvar_start.3in \ - MPI_T_pvar_stop.3in \ - MPI_T_pvar_write.3in \ - MPI_Test.3in \ - MPI_Testall.3in \ - MPI_Testany.3in \ - MPI_Test_cancelled.3in \ - MPI_Testsome.3in \ - MPI_Topo_test.3in \ - MPI_Type_c2f.3in \ - MPI_Type_commit.3in \ - MPI_Type_contiguous.3in \ - MPI_Type_create_darray.3in \ - MPI_Type_create_f90_complex.3in \ - MPI_Type_create_f90_integer.3in \ - MPI_Type_create_f90_real.3in \ - MPI_Type_create_hindexed.3in \ - MPI_Type_create_hindexed_block.3in \ - MPI_Type_create_hvector.3in \ - MPI_Type_create_indexed_block.3in \ - MPI_Type_create_keyval.3in \ - MPI_Type_create_resized.3in \ - MPI_Type_create_struct.3in \ - MPI_Type_create_subarray.3in \ - MPI_Type_delete_attr.3in \ - MPI_Type_dup.3in \ - MPI_Type_extent.3in \ - MPI_Type_f2c.3in \ - MPI_Type_free.3in \ - MPI_Type_free_keyval.3in \ - MPI_Type_get_attr.3in \ - MPI_Type_get_contents.3in \ - MPI_Type_get_envelope.3in \ - MPI_Type_get_extent.3in \ - MPI_Type_get_extent_x.3in \ - MPI_Type_get_name.3in \ - MPI_Type_get_true_extent.3in \ - MPI_Type_get_true_extent_x.3in \ - MPI_Type_hindexed.3in \ - MPI_Type_hvector.3in \ - MPI_Type_indexed.3in \ - MPI_Type_lb.3in \ - MPI_Type_match_size.3in \ - MPI_Type_set_attr.3in \ - MPI_Type_set_name.3in \ - MPI_Type_size.3in \ - MPI_Type_size_x.3in \ - MPI_Type_struct.3in \ - MPI_Type_ub.3in \ - MPI_Type_vector.3in \ - MPI_Unpack.3in \ - MPI_Unpack_external.3in \ - MPI_Unpublish_name.3in \ - MPI_Wait.3in \ - MPI_Waitall.3in \ - MPI_Waitany.3in \ - MPI_Waitsome.3in \ - MPI_Win_allocate.3in \ - MPI_Win_allocate_shared.3in \ - MPI_Win_attach.3in \ - MPI_Win_c2f.3in \ - MPI_Win_call_errhandler.3in \ - MPI_Win_complete.3in \ - MPI_Win_create.3in \ - MPI_Win_create_dynamic.3in \ - MPI_Win_create_errhandler.3in \ - MPI_Win_create_keyval.3in \ - MPI_Win_delete_attr.3in \ - MPI_Win_detach.3in \ - MPI_Win_f2c.3in \ - MPI_Win_fence.3in \ - MPI_Win_flush.3in \ - MPI_Win_flush_all.3in \ - MPI_Win_flush_local.3in \ - MPI_Win_flush_local_all.3in \ - MPI_Win_free.3in \ - MPI_Win_free_keyval.3in \ - MPI_Win_get_attr.3in \ - MPI_Win_get_errhandler.3in \ - MPI_Win_get_group.3in \ - MPI_Win_get_info.3in \ - MPI_Win_get_name.3in \ - MPI_Win_lock.3in \ - MPI_Win_lock_all.3in \ - MPI_Win_post.3in \ - MPI_Win_set_attr.3in \ - MPI_Win_set_errhandler.3in \ - MPI_Win_set_info.3in \ - MPI_Win_set_name.3in \ - MPI_Win_shared_query.3in \ - MPI_Win_start.3in \ - MPI_Win_sync.3in \ - MPI_Win_test.3in \ - MPI_Win_unlock.3in \ - MPI_Win_unlock_all.3in \ - MPI_Win_wait.3in \ - MPI_Wtick.3in \ - MPI_Wtime.3in - -man_pages_from_md = $(MD_FILES:.3.md=.3) -man_pages_from_template = $(TEMPLATE_FILES:.3in=.3) - -if OPAL_ENABLE_MAN_PAGES -man_MANS = $(man_pages_from_md) $(man_pages_from_template) -MAINTAINERCLEANFILES = $(man_pages_from_md) -endif - -EXTRA_DIST = \ - $(MD_FILES) $(man_pages_from_md) \ - $(TEMPLATE_FILES) $(man_pages_from_template) diff --git a/ompi/mpi/man/man5/.gitignore b/ompi/mpi/man/man5/.gitignore deleted file mode 100644 index 5d06cd46646..00000000000 --- a/ompi/mpi/man/man5/.gitignore +++ /dev/null @@ -1 +0,0 @@ -*.5 diff --git a/ompi/mpi/man/man5/MPI.5 b/ompi/mpi/man/man5/MPI.5 deleted file mode 100644 index 0c58931b0a5..00000000000 --- a/ompi/mpi/man/man5/MPI.5 +++ /dev/null @@ -1 +0,0 @@ -.so man5/Open-MPI.5 diff --git a/ompi/mpi/man/man5/MPI_T.5.md b/ompi/mpi/man/man5/MPI_T.5.md deleted file mode 100644 index a2793cfbac4..00000000000 --- a/ompi/mpi/man/man5/MPI_T.5.md +++ /dev/null @@ -1,82 +0,0 @@ -# NAME - -Open MPI's MPI_T interface - General information - -# DESCRIPTION - -There are a few Open MPI-specific notes worth mentioning about its `MPI_T` interface implementation. - -## MPI_T Control Variables - -Open MPI's implementation of the `MPI_T` Control Variable ("cvar") APIs is an interface to Open MPI's underlying Modular Component Architecture (MCA) parameters/variables. Simply put: using the `MPI_T` cvar interface is another mechanism to get/set Open MPI MCA parameters. - -In order of precedence (highest to lowest), Open MPI provides the following mechanisms to set MCA parameters: - -1. The `MPI_T` interface has the highest precedence. Specifically: values set via the `MPI_T` interface will override all other settings. -1. The `mpirun(1)` / `mpiexec(1)` command line (e.g., via the `--mca` parameter). -1. Environment variables. -1. Parameter files have the lowest precedence. Specifically: values set via parameter files can be overridden by any of the other MCA-variable setting mechanisms. - -## MPI initialization - -An application may use the `MPI_T` interface before MPI is initialized to set MCA parameters. Setting MPI-level MCA parameters before MPI is initialized may affect _how_ MPI is initialized (e.g., by influencing which frameworks and components are selected). - -The following example sets the `pml` and `btl` MCA params before invoking `MPI_Init(3)` in order to force a specific selection of PML and BTL components: - -```c -int provided, index, count; -MPI_T_cvar_handle pml_handle, btl_handle; -char pml_value[64], btl_value[64]; - -MPI_T_init_thread(MPI_THREAD_SINGLE, &provided); - -MPI_T_cvar_get_index("pml", &index); -MPI_T_cvar_handle_alloc(index, NULL, &pml_handle, &count); -MPI_T_cvar_write(pml_handle, "ob1"); - -MPI_T_cvar_get_index("btl", &index); -MPI_T_cvar_handle_alloc(index, NULL, &btl_handle, &count); -MPI_T_cvar_write(btl_handle, "tcp,vader,self"); - -MPI_T_cvar_read(pml_handle, pml_value); -MPI_T_cvar_read(btl_handle, btl_value); -printf("Set value of cvars: PML: %s, BTL: %s\n", - pml_value, btl_value); - -MPI_T_cvar_handle_free(&pml_handle); -MPI_T_cvar_handle_free(&btl_handle); - -MPI_Init(NULL, NULL); - -// ... - -MPI_Finalize(); - -MPI_T_finalize(); -``` - -Note that once MPI is initialized, most Open MPI cvars become read-only. - -For example, after MPI is initialized, it is no longer possible to set the PML and BTL selection mechanisms. This is because many of these MCA parameters are only used during MPI initialization; setting them after MPI has already been initialized would be meaningless, anyway. - -## MPI_T Categories - -Open MPI's MPI_T categories are organized hierarchically: - -1. Layer (or "project"). There are two layers in Open MPI: - * `ompi`: This layer contains cvars, pvars, and sub categories related to MPI characteristics. - * `opal`: This layer generally contains cvars, pvars, and sub categories of lower-layer constructions, such as operating system issues, networking issues, etc. -2. Framework or section. - * In most cases, the next level in the hierarchy is the Open MPI MCA framework. - * For example, you can find the `btl` framework under the `opal` layer (because it has to do with the underlying networking). - * Additionally, the `pml` framework is under the `ompi` layer (because it has to do with MPI semantics of point-to-point messaging). - * There are a few non-MCA-framework entities under the layer, however. - * For example, there is an `mpi` section under both the `opal` and `ompi` layers for general/core MPI constructs. -3. Component. - * If relevant, the third level in the hierarchy is the MCA component. - * For example, the `tcp` component can be found under the `opal` framework in the `opal` layer. - -# SEE ALSO - -[`MPI_T_init`(3)](MPI_T_init.html), -[`MPI_T_finalize`(3)](MPI_T_finalize.html) diff --git a/ompi/mpi/man/man5/Makefile.am b/ompi/mpi/man/man5/Makefile.am deleted file mode 100644 index 8e7e8679c06..00000000000 --- a/ompi/mpi/man/man5/Makefile.am +++ /dev/null @@ -1,33 +0,0 @@ -# -*- makefile -*- -# -# Copyright (c) 2020 Cisco Systems, Inc. All rights reserved. -# -# $COPYRIGHT$ -# -# Additional copyrights may follow -# -# $HEADER$ -# - -include $(top_srcdir)/Makefile.ompi-rules - -# Just in case someone looks for it here someday, here is a -# conveninent reference for what Markdown pandoc supports: -# -# https://rmarkdown.rstudio.com/authoring_pandoc_markdown.html -MD_FILES = \ - MPI_T.5.md \ - Open-MPI.5.md - -native_nroff_files = \ - MPI.5 \ - OpenMPI.5 - -man_pages_from_md = $(MD_FILES:.5.md=.5) - -if OPAL_ENABLE_MAN_PAGES -man_MANS = $(man_pages_from_md) $(native_nroff_files) -MAINTAINERCLEANFILES = $(man_pages_from_md) -endif - -EXTRA_DIST = $(MD_FILES) $(man_pages_from_md) $(native_nroff_files) diff --git a/ompi/mpi/man/man5/Open-MPI.5.md b/ompi/mpi/man/man5/Open-MPI.5.md deleted file mode 100644 index 78c97399f45..00000000000 --- a/ompi/mpi/man/man5/Open-MPI.5.md +++ /dev/null @@ -1,145 +0,0 @@ -# NAME - -Open MPI - General information - -# OPEN MPI - -Open MPI is an open source implementation of MPI (message-passing -interface), the industry-standard specification for writing -message-passing programs. Message passing is a programming model that -gives the programmer explicit control over interprocess communication. - -The MPI specification was developed by the MPI Forum, a group of -software developers, computer vendors, academics, and computer-science -researchers whose goal was to develop a standard for writing -message-passing programs that would be efficient, flexible, and -portable. - -The outcome, known as the MPI Standard, was first published in 1993; its -most recent version (MPI-3.1) was published in June 2015. Open MPI -includes all MPI 3.1-compliant routines. - -For more information about Open MPI, see -[https://www.open-mpi.org](https://www.open-mpi.org). - -The MPI standards are available at -[https://www.mpi-forum.org](https://www.mpi-forum.org). - -# MAN PAGE SYNTAX - -Man pages for Open MPI and Open MPI I/O routines are named according to -C syntax, that is, they begin with the prefix `MPI_`, all in -uppercase, and the first letter following the `MPI_` prefix is also -uppercase. The rest of the letters in the routine are all lowercase, for -example, `MPI_Comm_get_attr`. - -# ENVIRONMENT - -To fine-tune your Open MPI environment, you can either use arguments -to the `mpirun` or `mpiexec` commands, or you can use MCA parameters. - -For more information on arguments, see the `mpirun`(1) man page. - -For a complete listing of MCA parameters and their descriptions, issue -the command `ompi_info --all`. This information also appears in the -FAQ on the Open MPI web site at -[https://www.open-mpi.org/faq/?category=tuning#mca-params](https://www.open-mpi.org/faq/?category=tuning#mca-params). - -# ERRORS - -All MPI routines (except `MPI_Wtime` and `MPI_Wtick`) return an error -value; C routines as the value of the function and Fortran routines in -the last argument. Before the value is returned, the current MPI error -handler is called. By default, this error handler aborts the MPI job. -The error handler may be changed with `MPI_Comm_set_errhandler`; the -predefined error handler `MPI_ERRORS_RETURN` may be used to cause error -values to be returned. Note that MPI does not guarantee that an MPI -program can continue past an error. - -For more information on Open MPI error codes, see `mpi.h` in the -`include` directory. - -Standard error return classes for Open MPI: - -| Error name | Error value | Description | -|---|---|---| -| MPI_SUCCESS | 0 | Successful return code. | -| MPI_ERR_BUFFER | 1 | Invalid buffer pointer. | -| MPI_ERR_COUNT | 2 | Invalid count argument. | -| MPI_ERR_TYPE | 3 | Invalid datatype argument. | -| MPI_ERR_TAG | 4 | Invalid tag argument. | -| MPI_ERR_COMM | 5 | Invalid communicator. | -| MPI_ERR_RANK | 6 | Invalid rank. | -| MPI_ERR_REQUEST | 7 | Invalid MPI_Request handle. | -| MPI_ERR_ROOT | 8 | Invalid root. | -| MPI_ERR_GROUP | 9 | Null group passed to function. | -| MPI_ERR_OP | 10 | Invalid operation. | -| MPI_ERR_TOPOLOGY | 11 | Invalid topology. | -| MPI_ERR_DIMS | 12 | Illegal dimension argument. | -| MPI_ERR_ARG | 13 | Invalid argument. | -| MPI_ERR_UNKNOWN | 14 | Unknown error. | -| MPI_ERR_TRUNCATE | 15 | Message truncated on receive. | -| MPI_ERR_OTHER | 16 | Other error; use Error_string. | -| MPI_ERR_INTERN | 17 | Internal error code. | -| MPI_ERR_IN_STATUS | 18 | Look in status for error value. | -| MPI_ERR_PENDING | 19 | Pending request. | -| MPI_ERR_ACCESS | 20 | Permission denied. | -| MPI_ERR_AMODE | 21 | Unsupported amode passed to open. | -| MPI_ERR_ASSERT | 22 | Invalid assert. | -| MPI_ERR_BAD_FILE | 23 | Invalid file name (for example, path name too long). | -| MPI_ERR_BASE | 24 | Invalid base. | -| MPI_ERR_CONVERSION | 25 | An error occurred in a user-supplied data-conversion function. | -| MPI_ERR_DISP | 26 | Invalid displacement. | -| MPI_ERR_DUP_DATAREP | 27 | Conversion functions could not be registered because a data representation identifier that was already defined was passed to MPI_REGISTER_DATAREP. | -| MPI_ERR_FILE_EXISTS | 28 | File exists. | -| MPI_ERR_FILE_IN_USE | 29 | File operation could not be completed, as the file is currently open by some process. | -| MPI_ERR_FILE | 30 | Invalid file handle. | -| MPI_ERR_INFO_KEY | 31 | Illegal info key. | -| MPI_ERR_INFO_NOKEY | 32 | No such key. | -| MPI_ERR_INFO_VALUE | 33 | Illegal info value. | -| MPI_ERR_INFO | 34 | Invalid info object. | -| MPI_ERR_IO | 35 | I/O error. | -| MPI_ERR_KEYVAL | 36 | Illegal key value. | -| MPI_ERR_LOCKTYPE | 37 | Invalid locktype. | -| MPI_ERR_NAME | 38 | Name not found. | -| MPI_ERR_NO_MEM | 39 | Memory exhausted. | -| MPI_ERR_NOT_SAME | 40 | Collective argument not identical on all processes, or collective routines called in a different order by different processes. | -| MPI_ERR_NO_SPACE | 41 | Not enough space. | -| MPI_ERR_NO_SUCH_FILE | 42 | File (or directory) does not exist. | -| MPI_ERR_PORT | 43 | Invalid port. | -| MPI_ERR_PROC_ABORTED | 74 | Operation failed because a remote peer has aborted. | -| MPI_ERR_QUOTA | 44 | Quota exceeded. | -| MPI_ERR_READ_ONLY | 45 | Read-only file system. | -| MPI_ERR_RMA_CONFLICT | 46 | Conflicting accesses to window. | -| MPI_ERR_RMA_SYNC | 47 | Erroneous RMA synchronization. | -| MPI_ERR_SERVICE | 48 | Invalid publish/unpublish. | -| MPI_ERR_SIZE | 49 | Invalid size. | -| MPI_ERR_SPAWN | 50 | Error spawning. | -| MPI_ERR_UNSUPPORTED_DATAREP | 51 | Unsupported datarep passed to MPI_File_set_view. | -| MPI_ERR_UNSUPPORTED_OPERATION | 52 | Unsupported operation, such as seeking on a file that supports only sequential access. | -| MPI_ERR_WIN | 53 | Invalid window. | -| MPI_T_ERR_MEMORY | 54 | Out of memory. | -| MPI_T_ERR_NOT_INITIALIZED | 55 | Interface not initialized. | -| MPI_T_ERR_CANNOT_INIT | 56 | Interface not in the state to be initialized. | -| MPI_T_ERR_INVALID_INDEX | 57 | The enumeration index is invalid. | -| MPI_T_ERR_INVALID_ITEM| 58 | The item index queried is out of range. | -| MPI_T_ERR_INVALID_HANDLE | 59 | The handle is invalid. | -| MPI_T_ERR_OUT_OF_HANDLES | 60 | No more handles available. | -| MPI_T_ERR_OUT_OF_SESSIONS | 61 | No more sessions available. | -| MPI_T_ERR_INVALID_SESSION | 62 | Session argument is not a valid session. | -| MPI_T_ERR_CVAR_SET_NOT_NOW | 63 |Variable cannot be set at this moment. | -| MPI_T_ERR_CVAR_SET_NEVER | 64 | Variable cannot be set until end of execution. | -| MPI_T_ERR_PVAR_NO_STARTSTOP | 65 | Variable cannot be started or stopped. | -| MPI_T_ERR_PVAR_NO_WRITE | 66 | Variable cannot be written or reset. | -| MPI_T_ERR_PVAR_NO_ATOMIC | 67 | Variable cannot be read and written atomically. | -| MPI_ERR_RMA_RANGE | 68 | Target memory is not part of the window (in the case of a window created with MPI_WIN_CREATE_DYNAMIC, target memory is not attached). | -| MPI_ERR_RMA_ATTACH | 69 | Memory cannot be attached (e.g., because of resource exhaustion). | -| MPI_ERR_RMA_FLAVOR | 70 | Passed window has the wrong flavor for the called function. | -| MPI_ERR_RMA_SHARED | 71 | Memory cannot be shared (e.g., some process in the group of the specified communicator cannot expose shared memory). | -| MPI_T_ERR_INVALID | 72 | Invalid use of the interface or bad parameter values(s). | -| MPI_T_ERR_INVALID_NAME | 73 | The variable or category name is invalid. | -| MPI_ERR_LASTCODE | 93 | Last error code. | - -# SEE ALSO - -MPI_T(5) diff --git a/ompi/mpi/man/man5/OpenMPI.5 b/ompi/mpi/man/man5/OpenMPI.5 deleted file mode 100644 index 0c58931b0a5..00000000000 --- a/ompi/mpi/man/man5/OpenMPI.5 +++ /dev/null @@ -1 +0,0 @@ -.so man5/Open-MPI.5 diff --git a/ompi/mpiext/affinity/c/Makefile.am b/ompi/mpiext/affinity/c/Makefile.am index e42dfebdafc..273e70e8219 100644 --- a/ompi/mpiext/affinity/c/Makefile.am +++ b/ompi/mpiext/affinity/c/Makefile.am @@ -2,7 +2,7 @@ # Copyright (c) 2004-2009 The Trustees of Indiana University and Indiana # University Research and Technology # Corporation. All rights reserved. -# Copyright (c) 2010-2014 Cisco Systems, Inc. All rights reserved. +# Copyright (c) 2010-2022 Cisco Systems, Inc. All rights reserved. # Copyright (c) 2018 Research Organization for Information Science # and Technology (RIST). All rights reserved. # $COPYRIGHT$ @@ -25,8 +25,6 @@ CLEANFILES = example # header files do the Right Thing. AM_CPPFLAGS = -DOMPI_PROFILE_LAYER=0 -DOMPI_COMPILING_FORTRAN_WRAPPERS=1 -include $(top_srcdir)/Makefile.ompi-rules - # Convenience libtool library that will be slurped up into libmpi.la. noinst_LTLIBRARIES = libmpiext_affinity_c.la @@ -45,11 +43,4 @@ libmpiext_affinity_c_la_SOURCES = \ mpiext_affinity_str.c libmpiext_affinity_c_la_LDFLAGS = -module -avoid-version -# Man page installation -nodist_man_MANS = OMPI_Affinity_str.3 - -# Man page sources -EXTRA_DIST = $(nodist_man_MANS:.3=.3in) example.c - -distclean-local: - rm -f $(nodist_man_MANS) +EXTRA_DIST = example.c diff --git a/ompi/mpiext/affinity/c/OMPI_Affinity_str.3in b/ompi/mpiext/affinity/c/OMPI_Affinity_str.3in deleted file mode 100644 index a9b0a8b1655..00000000000 --- a/ompi/mpiext/affinity/c/OMPI_Affinity_str.3in +++ /dev/null @@ -1,202 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright 2007-2010 Oracle and/or its affiliates. All rights reserved. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2010 Cisco Systems, Inc. All rights reserved. -.\" $COPYRIGHT$ -.TH OMPI_Affinity_str 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBOMPI_Affinity_str\fP \- Obtain prettyprint strings of processor affinity information for this process - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -#include - -int OMPI_Affinity_str(ompi_affinity_fmt_type_t \fIfmt_type\fP, - char \fIompi_bound\fP[OMPI_AFFINITY_STRING_MAX], - char \fIcurrent_binding\fP[OMPI_AFFINITY_STRING_MAX], - char \fIexists\fP[OMPI_AFFINITY_STRING_MAX]) -.fi -.SH Fortran Syntax -There is no Fortran binding for this function. -. -.SH Fortran 2008 Syntax -There is no Fortran 2008 binding for this function. -. -.SH C++ Syntax -There is no C++ binding for this function. -. -.SH INPUT PARAMETERS -.ft R -.TP 1i -fmt_type -An enum indicating how to format the returned ompi_bound and -current_binding strings. OMPI_AFFINITY_RSRC_STRING_FMT returns the -string as human-readable resource names, such as "socket 0, core 0". - -OMPI_AFFINITY_LAYOUT_FMT returns ASCII art representing where this MPI -process is bound relative to the machine resource layout. For example -"[. B][. .]" shows the process that called the routine is bound to -socket 0, core 1 in a system with 2 sockets, each containing 2 cores. - -See below for more output examples. - -. -.SH OUTPUT PARAMETERS -.ft R -.TP 1i -ompi_bound -A prettyprint string describing what processor(s) Open MPI bound this -process to, or a string indicating that Open MPI did not bind this -process. -. -.TP 1i -current_binding -A prettyprint string describing what processor(s) this process is -currently bound to, or a string indicating that the process is bound -to all available processors (and is therefore considered "unbound"). -. -.TP 1i -exists -A prettyprint string describing the available sockets and sockets on -this host. - -.SH DESCRIPTION -.ft R -Open MPI may bind a process to specific sockets and/or cores at -process launch time. This non-standard Open MPI function call returns -prettyprint information about three things: -. -.TP -Where Open MPI bound this process. -The string returned in -.B -ompi_bound -will either indicate that Open MPI did not bind this process to -anything, or it will contain a prettyprint description of the -processor(s) to which Open MPI bound this process. -. -.TP -Where this process is currently bound. -Regardless of whether Open MPI bound this process or not, another -entity may have bound it. The string returned in -.B current_binding -will indicate what the -.I -current -binding is of this process, regardless of what Open MPI may have done -earlier. The string returned will either indicate that the process is -unbound (meaning that it is bound to all available processors) or it -will contain a prettyprint description of the sockets and cores to -which the process is currently bound. -. -.TP -What processors exist. -As a convenience to the user, the -.B -exists -string will contain a prettyprint description of the sockets and cores -that this process can see (which is -.I usually -all processors in the system). - -.SH Examples -.ft R -\fBExample 1:\fP Print out processes binding using resource string format. -.sp -.nf - int rank; - char ompi_bound[OMPI_AFFINITY_STRING_MAX]; - char current_binding[OMPI_AFFINITY_STRING_MAX]; - char exists[OMPI_AFFINITY_STRING_MAX]; - - MPI_Init(&argc, &argv); - MPI_Comm_rank(MPI_COMM_WORLD, &rank); - - OMPI_Affinity_str(OMPI_AFFINITY_RSRC_STRING_FMT, - ompi_bound, current_binding, exists); - printf("rank %d: \\n" - " ompi_bound: %s\\n" - " current_binding: %s\\n" - " exists: %s\\n", - rank, ompi_bound, current_binding, exists); - ... -.fi -.PP -Output of mpirun -np 2 -bind-to-core a.out: -.nf -rank 0: - ompi_bound: socket 0[core 0] - current_binding: socket 0[core 0] - exists: socket 0 has 4 cores -rank 1: - ompi_bound: socket 0[core 1] - current_binding: socket 0[core 1] - exists: socket 0 has 4 cores -.fi -.PP -Output of mpirun -np 2 -bind-to-socket a.out: -.nf -rank 0: - ompi_bound: socket 0[core 0-3] - current_binding: Not bound (or bound to all available processors) - exists: socket 0 has 4 cores -rank 1: - ompi_bound: socket 0[core 0-3] - current_binding: Not bound (or bound to all available processors) - exists: socket 0 has 4 cores -.fi -.sp -.br -\fBExample 2:\fP Print out processes binding using layout string format. -.sp -.nf - int rank; - char ompi_bound[OMPI_AFFINITY_STRING_MAX]; - char current_binding[OMPI_AFFINITY_STRING_MAX]; - char exists[OMPI_AFFINITY_STRING_MAX]; - - MPI_Init(&argc, &argv); - MPI_Comm_rank(MPI_COMM_WORLD, &rank); - - OMPI_Affinity_str(OMPI_AFFINITY_LAYOUT_FMT, - ompi_bound, current_binding, exists); - printf("rank %d: \\n" - " ompi_bound: %s\\n" - " current_binding: %s\\n" - " exists: %s\\n", - rank, ompi_bound, current_binding, exists); - ... -.fi -.PP -Output of mpirun -np 2 -bind-to-core a.out: -.nf -rank 0: - ompi_bound: [B . . .] - current_binding: [B . . .] - exists: [. . . .] -rank 1: - ompi_bound: [. B . .] - current_binding: [. B . .] - exists: [. . . .] -.fi -.PP -Output of mpirun -np 2 -bind-to-socket a.out: -.nf -rank 0: - ompi_bound: [B B B B] - current_binding: [B B B B] - exists: [. . . .] -rank 1: - ompi_bound: [B B B B] - current_binding: [B B B B] - exists: [. . . .] -.fi - -.SH See Also -.ft R -.nf -mpirun(1) -.fi diff --git a/ompi/mpiext/cuda/c/MPIX_Query_cuda_support.3in b/ompi/mpiext/cuda/c/MPIX_Query_cuda_support.3in deleted file mode 100644 index 711d9fe1f9c..00000000000 --- a/ompi/mpiext/cuda/c/MPIX_Query_cuda_support.3in +++ /dev/null @@ -1,59 +0,0 @@ -.\" Copyright 2007-2010 Oracle and/or its affiliates. All rights reserved. -.\" Copyright (c) 1996 Thinking Machines Corporation -.\" Copyright (c) 2010 Cisco Systems, Inc. All rights reserved. -.\" Copyright (c) 2015 NVIDIA, Inc. All rights reserved. -.TH MPIx_CUDA_SUPPORT 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -\fBMPIX_Query_cuda_support\fP \- Returns 1 if there is CUDA aware support and 0 if there is not. - -.SH SYNTAX -.ft R -.SH C Syntax -.nf -#include -#include - -int MPIX_Query_cuda_support(void) -.fi -.SH Fortran Syntax -There is no Fortran binding for this function. -. -.SH C++ Syntax -There is no C++ binding for this function. -. -.SH DESCRIPTION -.ft R - -This routine return 1 if MPI library is build with CUDA and runtime supports CUDA buffers. -This routine must be called after MPI is initialized by a call to MPI_Init or MPI_Init_thread. - -.SH Examples -.ft R -.nf - -#include -#include "mpi.h" - -#include "mpi-ext.h" /* Needed for CUDA-aware check */ - -int main(int argc, char *argv[]) -{ - - MPI_Init(&argc, &argv); - - if (MPIX_Query_cuda_support()) { - printf("This MPI library has CUDA-aware support.\n"); - } else { - printf("This MPI library does not have CUDA-aware support.\n"); - } - MPI_Finalize(); - - return 0; -} -.fi - -.SH See Also -.ft R -.nf - -.fi diff --git a/ompi/mpiext/cuda/c/Makefile.am b/ompi/mpiext/cuda/c/Makefile.am index f303cc70824..365093abc2d 100644 --- a/ompi/mpiext/cuda/c/Makefile.am +++ b/ompi/mpiext/cuda/c/Makefile.am @@ -2,7 +2,7 @@ # Copyright (c) 2004-2009 The Trustees of Indiana University and Indiana # University Research and Technology # Corporation. All rights reserved. -# Copyright (c) 2010-2014 Cisco Systems, Inc. All rights reserved. +# Copyright (c) 2010-2022 Cisco Systems, Inc. All rights reserved. # Copyright (c) 2015 NVIDIA, Inc. All rights reserved. # Copyright (c) 2018 Research Organization for Information Science # and Technology (RIST). All rights reserved. @@ -20,8 +20,6 @@ # header files do the Right Thing. AM_CPPFLAGS = -DOMPI_PROFILE_LAYER=0 -DOMPI_COMPILING_FORTRAN_WRAPPERS=1 -include $(top_srcdir)/Makefile.ompi-rules - # Convenience libtool library that will be slurped up into libmpi.la. noinst_LTLIBRARIES = libmpiext_cuda_c.la @@ -39,12 +37,3 @@ libmpiext_cuda_c_la_SOURCES = \ $(ompi_HEADERS) \ mpiext_cuda.c libmpiext_cuda_c_la_LDFLAGS = -module -avoid-version - -# Man page installation -nodist_man_MANS = MPIX_Query_cuda_support.3 - -# Man page sources -EXTRA_DIST = $(nodist_man_MANS:.3=.3in) - -distclean-local: - rm -f $(nodist_man_MANS) diff --git a/ompi/tools/ompi_info/Makefile.am b/ompi/tools/ompi_info/Makefile.am index 4a5e8fa1437..f174e7ab449 100644 --- a/ompi/tools/ompi_info/Makefile.am +++ b/ompi/tools/ompi_info/Makefile.am @@ -9,7 +9,7 @@ # University of Stuttgart. All rights reserved. # Copyright (c) 2004-2005 The Regents of the University of California. # All rights reserved. -# Copyright (c) 2008-2020 Cisco Systems, Inc. All rights reserved. +# Copyright (c) 2008-2022 Cisco Systems, Inc. All rights reserved. # Copyright (c) 2008 Sun Microsystems, Inc. All rights reserved. # Copyright (c) 2012 Los Alamos National Security, LLC. # All rights reserved. @@ -42,10 +42,12 @@ AM_CFLAGS = \ -DOPAL_CC_ABSOLUTE="\"@OPAL_CC_ABSOLUTE@\"" \ -DOMPI_CXX_ABSOLUTE="\"@OMPI_CXX_ABSOLUTE@\"" -include $(top_srcdir)/Makefile.ompi-rules +# JMS Delete me +#include $(top_srcdir)/Makefile.ompi-rules -man_pages = ompi_info.1 -EXTRA_DIST = $(man_pages:.1=.1in) +# JMS What to do about this? +#man_pages = ompi_info.1 +#EXTRA_DIST = $(man_pages:.1=.1in) if OPAL_INSTALL_BINARIES diff --git a/ompi/tools/wrappers/Makefile.am b/ompi/tools/wrappers/Makefile.am index 26b3a8a19cf..0c785631804 100644 --- a/ompi/tools/wrappers/Makefile.am +++ b/ompi/tools/wrappers/Makefile.am @@ -9,7 +9,7 @@ # University of Stuttgart. All rights reserved. # Copyright (c) 2004-2005 The Regents of the University of California. # All rights reserved. -# Copyright (c) 2006-2021 Cisco Systems, Inc. All rights reserved. +# Copyright (c) 2006-2022 Cisco Systems, Inc. All rights reserved. # Copyright (c) 2012 Los Alamos National Security, LLC. All rights reserved. # Copyright (c) 2013 Intel, Inc. All rights reserved. # Copyright (c) 2014 Research Organization for Information Science @@ -22,26 +22,6 @@ # $HEADER$ # -include $(top_srcdir)/Makefile.ompi-rules - -generated_man_pages = mpicc.1 - -if OMPI_HAVE_CXX_COMPILER -generated_man_pages += mpic++.1 mpicxx.1 -endif - -if OMPI_HAVE_FORTRAN_COMPILER -generated_man_pages += mpifort.1 mpif77.1 mpif90.1 -endif - -if OMPI_WANT_JAVA_BINDINGS -generated_man_pages += mpijavac.1 -endif - -man_pages = $(generated_man_pages) - -EXTRA_DIST = mpif77.1in mpijavac.1in mpijavac.pl.in - if OPAL_WANT_SCRIPT_WRAPPER_COMPILERS bin_SCRIPTS = ompi_wrapper_script @@ -105,8 +85,6 @@ if OMPI_WANT_JAVA_BINDINGS bin_SCRIPTS = mpijavac.pl endif -man_MANS = $(man_pages) - nodist_ompidata_DATA = mpicc-wrapper-data.txt if OMPI_HAVE_CXX_COMPILER @@ -167,11 +145,9 @@ install-exec-hook: install-exec-hook-always install-data-hook: install-data-hook-always (cd $(DESTDIR)$(pkgdatadir); rm -f mpiCC-wrapper-data.txt; $(LN_S) mpic++-wrapper-data.txt mpiCC-wrapper-data.txt) - (cd $(DESTDIR)$(mandir)/man1; rm -f mpiCC.1; $(LN_S) mpic++.1 mpiCC.1) uninstall-local: uninstall-local-always rm -f $(DESTDIR)$(bindir)/mpiCC$(EXEEXT) \ - $(DESTDIR)$(mandir)/man1/mpiCC.1 \ $(DESTDIR)$(pkgdatadir)/mpiCC-wrapper-data.txt else # CASE_SENSITIVE_FS @@ -184,51 +160,3 @@ endif # CASE_SENSITIVE_FS endif # OPAL_INSTALL_BINARIES endif # OPAL_WANT_SCRIPT_WRAPPER_COMPILERS - - -######################################################## -# -# Man page generation / handling -# -######################################################## -distclean-local: - rm -f $(generated_man_pages) - -$(top_builddir)/opal/tools/wrappers/generic_wrapper.1: - (cd $(top_builddir)/opal/tools/wrappers && $(MAKE) $(AM_MAKEFLAGS) generic_wrapper.1) - -mpicc.1: $(top_builddir)/opal/tools/wrappers/generic_wrapper.1 - rm -f mpicc.1 - sed -e 's/#COMMAND#/mpicc/g' -e 's/#PROJECT#/Open MPI/g' -e 's/#PROJECT_SHORT#/OMPI/g' -e 's/#LANGUAGE#/C/g' < $(top_builddir)/opal/tools/wrappers/generic_wrapper.1 > mpicc.1 - -if OMPI_HAVE_CXX_COMPILER -mpic++.1: $(top_builddir)/opal/tools/wrappers/generic_wrapper.1 - rm -f mpic++.1 - sed -e 's/#COMMAND#/mpic++/g' -e 's/#PROJECT#/Open MPI/g' -e 's/#PROJECT_SHORT#/OMPI/g' -e 's/#LANGUAGE#/C++/g' < $(top_builddir)/opal/tools/wrappers/generic_wrapper.1 > mpic++.1 - -mpicxx.1: $(top_builddir)/opal/tools/wrappers/generic_wrapper.1 - rm -f mpicxx.1 - sed -e 's/#COMMAND#/mpicxx/g' -e 's/#PROJECT#/Open MPI/g' -e 's/#PROJECT_SHORT#/OMPI/g' -e 's/#LANGUAGE#/C++/g' < $(top_builddir)/opal/tools/wrappers/generic_wrapper.1 > mpicxx.1 -endif - -if OMPI_HAVE_FORTRAN_COMPILER -mpifort.1: $(top_builddir)/opal/tools/wrappers/generic_wrapper.1 - rm -f mpifort.1 - sed -e 's/#COMMAND#/mpifort/g' -e 's/#PROJECT#/Open MPI/g' -e 's/#PROJECT_SHORT#/OMPI/g' -e 's/#LANGUAGE#/Fortran/g' < $(top_builddir)/opal/tools/wrappers/generic_wrapper.1 > mpifort.1 - -# Deprecated -mpif77.1: mpif77.1in - rm -f mpif77.1 - sed -e 's/#PROJECT#/Open MPI/g' \ - -e 's/#PROJECT_SHORT#/OMPI/g' \ - -e 's/#LANGUAGE#/Fortran/g' \ - -e 's/#PACKAGE_NAME#/@PACKAGE_NAME@/g' \ - -e 's/#PACKAGE_VERSION#/@PACKAGE_VERSION@/g' \ - -e 's/#OMPI_DATE#/@OMPI_RELEASE_DATE@/g' \ - < $(top_srcdir)/ompi/tools/wrappers/mpif77.1in > mpif77.1 - -# Deprecated -mpif90.1: mpif77.1 - @ rm -f mpif90.1 - $(OMPI_V_LN_S) $(LN_S) mpif77.1 mpif90.1 -endif diff --git a/ompi/tools/wrappers/mpif77.1in b/ompi/tools/wrappers/mpif77.1in deleted file mode 100644 index 2653305f10f..00000000000 --- a/ompi/tools/wrappers/mpif77.1in +++ /dev/null @@ -1,48 +0,0 @@ -.\" Copyright (c) 2008 Sun Microsystems, Inc. All rights reserved. -.\" Copyright (c) 2011-2012 Cisco Systems, Inc. All rights reserved. -.TH "mpif77,mpif90" 1 "#OMPI_DATE#" "#PACKAGE_VERSION#" "Open MPI" -. -.SH NAME -mpif77, mpif90 -- Deprecated Open MPI Fortran wrapper compilers -. -.SH SYNTAX -mpif90 ... -. -.\" ************************** -.\" Description Section -.\" ************************** -. -.SH DESCRIPTION -.PP -The -.I mpif77 -and -.I mpif90 -wrapper compiler names are deprecated, and will disappear in a future -version of Open MPI. You should use the -.I mpifort -wrapper compiler, instead. While they are deprecated, -.I mpif77 -and -.I mpif90 -accept all the same parameters as -.IR mpifort , -and behaves the same as -.IR mpifort . -. -.PP -With -.IR mpifort , -you can compile any Fortran program that uses the "mpif.h", "use mpi", -and/or "use mpi_f08" MPI Fortran interfaces. -. -.PP -See mpifort(1) for more details. -. -.\" ************************** -.\" See Also Section -.\" ************************** -. -.SH SEE ALSO -mpifort(1) -. diff --git a/ompi/tools/wrappers/mpijavac.1in b/ompi/tools/wrappers/mpijavac.1in deleted file mode 100644 index e95016e6aa5..00000000000 --- a/ompi/tools/wrappers/mpijavac.1in +++ /dev/null @@ -1,147 +0,0 @@ -.\" Copyright (c) 2012 Los Alamos National Security, LLC. All rights reserved. -.\" Copyright (c) 2017 FUJITSU LIMITED. All rights reserved. -.TH mpijava 1 "#OPAL_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -. -.SH NAME -mpijava -- Open MPI Java wrapper compiler -. -.SH SYNTAX -mpijava [-showme|-showme:compile|-showme:link] ... -. -.SH OPTIONS -.TP ---showme -This option comes in several different variants (see below). None of -the variants invokes the underlying compiler; they all provide -information on how the underlying compiler would have been invoked had -.I --showme -not been used. -The basic -.I --showme -option outputs the command line that would be executed to compile the -program. \fBNOTE:\fR If a non-filename argument is passed on the -command line, the \fI-showme\fR option will \fInot\fR display any -additional flags. For example, both "mpijava --showme" and -"mpijava --showme my_source.java" will show all the wrapper-supplied -flags. But "mpijava --showme -v" will only show the underlying -compiler name and "-v". -.TP ---showme:compile -Output the compiler flags that would have been supplied to the -java compiler. -.TP ---showme:link -Output the linker flags that would have been supplied to the -java compiler. -.TP ---showme:command -Outputs the underlying java compiler command (which may be one -or more tokens). -.TP ---showme:incdirs -Outputs a space-delimited (but otherwise undecorated) list of -directories that the wrapper compiler would have provided to the -underlying java compiler to indicate where relevant header files -are located. -.TP ---showme:libdirs -Outputs a space-delimited (but otherwise undecorated) list of -directories that the wrapper compiler would have provided to the -underlying linker to indicate where relevant libraries are located. -.TP ---showme:libs -Outputs a space-delimited (but otherwise undecorated) list of library -names that the wrapper compiler would have used to link an -application. For example: "mpi open-rte open-pal util". -.TP ---showme:version -Outputs the version number of Open MPI. -.PP -See the man page for your underlying java compiler for other -options that can be passed through mpijava. -. -. -.SH DESCRIPTION -.PP -Conceptually, the role of these commands is quite simple: -transparently add relevant compiler and linker flags to the user's -command line that are necessary to compile / link Open MPI -programs, and then invoke the underlying compiler to actually perform -the command. -. -.PP -As such, these commands are frequently referred to as "wrapper" -compilers because they do not actually compile or link applications -themselves; they only add in command line flags and invoke the -back-end compiler. -. -. -.SS Overview -\fImpijava\fR is a convenience wrapper for the underlying -java compiler. Translation of an Open MPI program requires the -linkage of the Open MPI-specific libraries which may not reside in -one of the standard search directories of ld(1). It also often -requires the inclusion of header files what may also not be found in a -standard location. -. -.PP -\fImpijava\fR passes its arguments to the underlying java -compiler along with the -I, -L and -l options required by Open MPI -programs. -. -.PP -The Open MPI Team \fIstrongly\fR encourages using the wrapper -compilers instead of attempting to link to the Open MPI libraries -manually. This allows the specific implementation of Open MPI to -change without forcing changes to linker directives in users' -Makefiles. Indeed, the specific set of flags and libraries used by -the wrapper compilers depends on how Open MPI was configured and -built; the values can change between different installations of the -same version of Open MPI. -. -.PP -Indeed, since the wrappers are simply thin shells on top of an -underlying compiler, there are very, very few compelling reasons -\fInot\fR to use \fImpijava\fR. When it is not possible to use the -wrappers directly, the \fI-showme:compile\fR and \fI-showme:link\fR -options should be used to determine what flags the wrappers would have -used. -. -. -.SH NOTES -.PP -It is possible to make the wrapper compilers multi-lib aware. That -is, the libraries and includes specified may differ based on the -compiler flags specified (for example, with the GNU compilers on -Linux, a different library path may be used if -m32 is seen versus --m64 being seen). This is not the default behavior in a standard -build, but can be activated (for example, in a binary package -providing both 32 and 64 bit support). More information can be found -at: -.PP - https://svn.open-mpi.org/trac/ompi/wiki/compilerwrapper3264 -. -. -.SH FILES -.PP -The string that the wrapper compilers insert into the command line -before invoking the underlying compiler are stored in a text file -created by Open MPI and installed to -\fI$pkgdata/mpijava-wrapper-data.txt\fR, where \fI$pkgdata\fR -is typically \fI$prefix/share/openmpi\fR, and \fI$prefix\fR is the top -installation directory of Open MPI. -. -.PP -It is rarely necessary to edit this file, but it can be examined to -gain insight into what flags the wrappers are placing on the command -line. -. -. -.SH ENVIRONMENT VARIABLES -.PP -By default, the wrappers use the compilers that were selected when -Open MPI was configured. These compilers were either found -automatically by Open MPI's "configure" script, or were selected by -the user in the CC, CXX, F77, JAVAC, and/or FC environment variables -before "configure" was invoked. Additionally, other arguments -specific to the compiler may have been selected by configure. diff --git a/opal/tools/wrappers/Makefile.am b/opal/tools/wrappers/Makefile.am index a03ff2e6ecd..16c874ccd99 100644 --- a/opal/tools/wrappers/Makefile.am +++ b/opal/tools/wrappers/Makefile.am @@ -9,7 +9,7 @@ # University of Stuttgart. All rights reserved. # Copyright (c) 2004-2005 The Regents of the University of California. # All rights reserved. -# Copyright (c) 2006-2014 Cisco Systems, Inc. All rights reserved. +# Copyright (c) 2006-2022 Cisco Systems, Inc. All rights reserved. # Copyright (c) 2008 Sun Microsystems, Inc. All rights reserved. # Copyright (c) 2014 Research Organization for Information Science # and Technology (RIST). All rights reserved. @@ -22,18 +22,11 @@ DEFS="-DEXEEXT=\"$(EXEEXT)\"" -real_man_pages = generic_wrapper.1 opal_wrapper.1 -EXTRA_DIST = $(real_man_pages:.1=.1in) - -include $(top_srcdir)/Makefile.ompi-rules - if !OPAL_WANT_SCRIPT_WRAPPER_COMPILERS if OPAL_INSTALL_BINARIES bin_PROGRAMS = opal_wrapper -nodist_man_MANS = opal_wrapper.1 - dist_opaldata_DATA = help-opal-wrapper.txt # Only install the following for developer-level installs @@ -42,17 +35,9 @@ if WANT_INSTALL_HEADERS nodist_opaldata_DATA = \ opalcc-wrapper-data.txt -nodist_man_MANS += opalcc.1 - pkgconfigdir = $(libdir)/pkgconfig pkgconfig_DATA = opal.pc -install-exec-hook: - (cd $(DESTDIR)$(bindir); rm -f opalcc$(EXEEXT); $(LN_S) opal_wrapper$(EXEECT) opalcc$(EXEEXT)) - -uninstall-local: - rm -f $(DESTDIR)$(bindir)/opalcc$(EXEEXT) - endif # WANT_INSTALL_HEADERS endif # OPAL_INSTALL_BINARIES @@ -60,15 +45,3 @@ endif # OPAL_WANT_SCRIPT_WRAPPER_COMPILERS opal_wrapper_SOURCES = opal_wrapper.c opal_wrapper_LDADD = $(top_builddir)/opal/lib@OPAL_LIB_NAME@.la - -# Ensure that the man pages are rebuilt if the opal_config.h file -# changes; a "good enough" way to know if configure was run again (and -# therefore the release date or version may have changed) -$(real_man_pages): $(top_builddir)/opal/include/opal_config.h - -opalcc.1: generic_wrapper.1 - rm -f opalcc.1 - sed -e 's/#COMMAND#/opalcc/g' -e 's/#PROJECT#/Open PAL/g' -e 's/#PROJECT_SHORT#/OPAL/g' -e 's/#LANGUAGE#/C/g' < $(top_builddir)/opal/tools/wrappers/generic_wrapper.1 > opalcc.1 - -distclean-local: - rm -f $(real_man_pages) opalcc.1 diff --git a/opal/tools/wrappers/generic_wrapper.1in b/opal/tools/wrappers/generic_wrapper.1in deleted file mode 100644 index 796f2a14428..00000000000 --- a/opal/tools/wrappers/generic_wrapper.1in +++ /dev/null @@ -1,261 +0,0 @@ -.\" Copyright (c) 2008 Sun Microsystems, Inc. All rights reserved. -.\" Copyright (c) 2011-2012 Cisco Systems, Inc. All rights reserved. -.TH #COMMAND# 1 "#OPAL_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -. -.SH NAME -#COMMAND# -- #PROJECT# #LANGUAGE# wrapper compiler -. -.SH SYNTAX -#COMMAND# [-showme|-showme:compile|-showme:link] ... -. -.SH OPTIONS -.TP ---showme -This option comes in several different variants (see below). None of -the variants invokes the underlying compiler; they all provide -information on how the underlying compiler would have been invoked had -.I --showme -not been used. -The basic -.I --showme -option outputs the command line that would be executed to compile the -program. \fBNOTE:\fR If a non-filename argument is passed on the -command line, the \fI-showme\fR option will \fInot\fR display any -additional flags. For example, both "#COMMAND# --showme" and -"#COMMAND# --showme my_source.c" will show all the wrapper-supplied -flags. But "#COMMAND# --showme -v" will only show the underlying -compiler name and "-v". -.TP ---showme:compile -Output the compiler flags that would have been supplied to the -#LANGUAGE# compiler. -.TP ---showme:link -Output the linker flags that would have been supplied to the -#LANGUAGE# compiler. -.TP ---showme:command -Outputs the underlying #LANGUAGE# compiler command (which may be one -or more tokens). -.TP ---showme:incdirs -Outputs a space-delimited (but otherwise undecorated) list of -directories that the wrapper compiler would have provided to the -underlying #LANGUAGE# compiler to indicate where relevant header files -are located. -.TP ---showme:libdirs -Outputs a space-delimited (but otherwise undecorated) list of -directories that the wrapper compiler would have provided to the -underlying linker to indicate where relevant libraries are located. -.TP ---showme:libs -Outputs a space-delimited (but otherwise undecorated) list of library -names that the wrapper compiler would have used to link an -application. For example: "mpi open-rte open-pal util". -.TP ---showme:version -Outputs the version number of Open MPI. -.TP ---showme:help -Output a brief usage help message. -.PP -See the man page for your underlying #LANGUAGE# compiler for other -options that can be passed through #COMMAND#. -. -. -.SH DESCRIPTION -.PP -Conceptually, the role of these commands is quite simple: -transparently add relevant compiler and linker flags to the user's -command line that are necessary to compile / link #PROJECT# -programs, and then invoke the underlying compiler to actually perform -the command. -. -.PP -As such, these commands are frequently referred to as "wrapper" -compilers because they do not actually compile or link applications -themselves; they only add in command line flags and invoke the -back-end compiler. -. -. -.SS Background -Open MPI is comprised of two software layers: -OPAL (Open Portable Access Layer), and OMPI (Open MPI). -There are wrapper compilers for each layer; each layer's wrapper only -links in the libraries relevant for that layer. Specifically, each -layer provides the following wrapper compilers: -. -.TP 4 -OPAL -\fIopalcc\fR and \fIopalc++\fR -. -.TP -OMPI -\fImpicc\fR, \fImpic++\fR, \fImpicxx\fR, \fImpiCC\fR (only on systems with -case-senstive file systems), and \fImpifort\fR (and its legacy/deprecated -names \fImpif77\fR and \fImpif90\fR). Note -that \fImpic++\fR, \fImpicxx\fR, and \fImpiCC\fR all invoke the same -underlying C++ compiler with the same options. All are provided as -compatibility with other MPI implementations. -. -. -.SS Fortran Notes -.PP -The Fortran wrapper compiler for MPI (\fImpifort\fR, and its -legacy/deprecated names \fImpif77\fR and \fImpif90\fR) can compile and -link MPI applications that use any/all of the MPI Fortran bindings: -.IR mpif.h , -the -.I mpi -module, and the -.I mpi_f08 -module (assuming Open MPI was installed with support for each of these -Fortran bindings). Specifically: it is no longer necessary to use -different wrapper compilers for applications that use -.I mpif.h -vs. applications that use the -.I mpi -module -- just use -.I mpifort -for all Fortran MPI applications. -. -.PP -Note, however, that the Fortran compiler may require additional -command-line options to enforce a specific Fortran dialect. For -example, in some versions of the IBM XLF compiler, if xlf90 is the -underlying Fortran compiler, -.IR -qfixed -may be necessary to compile fixed-format Fortran source files. -. -.PP -Finally, note that -.I mpifort -will be inoperative and will return an error on use if Fortran support -was not built into the MP Ilayer. -. -. -.SS Overview -\fI#COMMAND#\fR is a convenience wrappers for the underlying -#LANGUAGE# compiler. Translation of an #PROJECT# program requires the -linkage of the #PROJECT#-specific libraries which may not reside in -one of the standard search directories of ld(1). It also often -requires the inclusion of header files what may also not be found in a -standard location. -. -.PP -\fI#COMMAND#\fR passes its arguments to the underlying #LANGUAGE# -compiler along with the -I, -L and -l options required by #PROJECT# -programs. -. -.PP -The #PROJECT# Team \fIstrongly\fR encourages using the wrapper -compilers instead of attempting to link to the #PROJECT# libraries -manually. This allows the specific implementation of #PROJECT# to -change without forcing changes to linker directives in users' -Makefiles. Indeed, the specific set of flags and libraries used by -the wrapper compilers depends on how #PROJECT# was configured and -built; the values can change between different installations of the -same version of #PROJECT#. -. -.PP -Indeed, since the wrappers are simply thin shells on top of an -underlying compiler, there are very, very few compelling reasons -\fInot\fR to use \fI#COMMAND#\fR. When it is not possible to use the -wrappers directly, the \fI-showme:compile\fR and \fI-showme:link\fR -options should be used to determine what flags the wrappers would have -used. For example: -. -.PP -shell$ cc -c file1.c `mpicc -showme:compile` -. -.PP -shell$ cc -c file2.c `mpicc -showme:compile` -. -.PP -shell$ cc file1.o file2.o `mpicc -showme:link` -o my_mpi_program -. -. -.SH NOTES -.PP -It is possible to make the wrapper compilers multi-lib aware. That -is, the libraries and includes specified may differ based on the -compiler flags specified (for example, with the GNU compilers on -Linux, a different library path may be used if -m32 is seen versus --m64 being seen). This is not the default behavior in a standard -build, but can be activated (for example, in a binary package -providing both 32 and 64 bit support). More information can be found -at: -.PP - https://github.com/open-mpi/ompi/wiki/compilerwrapper3264 -. -. -.SH FILES -.PP -The string that the wrapper compilers insert into the command line -before invoking the underlying compiler are stored in a text file -created by #PROJECT# and installed to -\fI$pkgdata/#COMMAND#-wrapper-data.txt\fR, where \fI$pkgdata\fR -is typically \fI$prefix/share/openmpi\fR, and \fI$prefix\fR is the top -installation directory of #PROJECT#. -. -.PP -It is rarely necessary to edit this file, but it can be examined to -gain insight into what flags the wrappers are placing on the command -line. -. -. -.SH ENVIRONMENT VARIABLES -.PP -By default, the wrappers use the compilers that were selected when -#PROJECT# was configured. These compilers were either found -automatically by Open MPI's "configure" script, or were selected by -the user in the CC, CXX, F77, and/or FC environment variables -before "configure" was invoked. Additionally, other arguments -specific to the compiler may have been selected by configure. -. -.PP -These values can be selectively overridden by either editing the text -files containing this configuration information (see the \fBFILES\fR -section), or by setting selected environment variables of the -form "#PROJECT_SHORT#_value". -. -.PP -Valid value names are: -. -.TP -CPPFLAGS -Flags added when invoking the preprocessor (C or C++) -. -.TP -LDFLAGS -Flags added when invoking the linker (C, C++, or Fortran) -. -.TP -LIBS -Libraries added when invoking the linker (C, C++, or Fortran) -. -.TP -CC -C compiler -. -.TP -CFLAGS -C compiler flags -. -.TP -CXX -C++ compiler -. -.TP -CXXFLAGS -C++ compiler flags -. -. -.TP -FC -Fortran compiler -. -.TP -FCFLAGS -Fortran compiler flags diff --git a/opal/tools/wrappers/opal_wrapper.1in b/opal/tools/wrappers/opal_wrapper.1in deleted file mode 100644 index c4132bb18d8..00000000000 --- a/opal/tools/wrappers/opal_wrapper.1in +++ /dev/null @@ -1,78 +0,0 @@ -.\" Copyright (c) 2008 Sun Microsystems, Inc. All rights reserved. -.\" Man page contributed by Dirk Eddelbuettel -.\" and released under the BSD license -.TH OPAL_WRAPPER 1 "#OPAL_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME -opal_wrapper - Back-end Open MPI wrapper command -.SH SYNOPSIS -.B opal_wrapper [options] -.SH DESCRIPTION -.PP -.B opal_wrapper -is not meant to be called directly by end users. It is automatically -invoked as the back-end by the Open MPI wrapper commands such as: -.BR mpicc , -.BR mpiCC , -.BR mpic++ , -and -.BR mpifort -(and its legacy/deprecated names -.BR mpif77 -and -.BR mpif90 ). -.PP -Some Open MPI installations may have additional wrapper commands, -and/or have renamed the wrapper compilers listed above to avoid -executable name conflicts with other MPI implementations. Hence, you -may also have wrapper compilers installed including the following -names: -.BR mpifort.openmpi -(and the legacy/deprecated names -.BR mpif90.openmpi -and -.BR mpif77.openmpi ), -.BR mpicxx.openmpi , -.BR mpiCC.openmpi , -.BR mpicc.openmpi , -.BR mpic++.openmpi , -.BR opalcc , -.BR opalc++ , -.BR ortecc , -and -.BR ortec++ , -. -. -.\" ************************** -.\" See Also Section -.\" ************************** -.SH SEE ALSO -The following may exist depending on your particular Open MPI -installation: -.BR mpicc (1), -.BR mpiCC (1), -.BR mpic++ (1), -.BR mpifort (1), -.BR mpifort.openmpi (1), -.BR mpicxx.openmpi (1), -.BR mpiCC.openmpi (1), -.BR mpicc.openmpi (1), -.BR mpic++.openmpi (1), -.BR ortecc (1), -.BR ortec++ (1), -.BR opalccc (1), -and the website at -.IR https://www.open-mpi.org/ . -. -. -.\" ************************** -.\" Authors Section -.\" ************************** -.SH AUTHORS -The Open MPI maintainers -- see -.I https://www.open-mpi.org/ -or the file -.IR AUTHORS . -.PP -This manual page was originally contributed by Dirk Eddelbuettel -, one of the Debian GNU/Linux maintainers for Open -MPI, and may be used by others. diff --git a/oshmem/Makefile.am b/oshmem/Makefile.am index da59aadba39..8d58ad20eff 100644 --- a/oshmem/Makefile.am +++ b/oshmem/Makefile.am @@ -1,7 +1,7 @@ # # Copyright (c) 2013-2015 Mellanox Technologies, Inc. # All rights reserved. -# Copyright (c) 2013-2014 Cisco Systems, Inc. All rights reserved. +# Copyright (c) 2013-2022 Cisco Systems, Inc. All rights reserved. # Copyright (c) 2014 Intel, Inc. All rights reserved. # Copyright (c) 2015 Los Alamos National Security, LLC. All rights # reserved. @@ -66,7 +66,6 @@ noinst_LTLIBRARIES = include_HEADERS = dist_oshmemdata_DATA = liboshmem_la_SOURCES += $(headers) -nodist_man_MANS = # Conditionally install the header files @@ -81,25 +80,6 @@ include request/Makefile.am include info/Makefile.am include runtime/Makefile.am include shmem/Makefile.am -include shmem/man/man3/Makefile.extra include mca/Makefile.am include tools/Makefile.am include util/Makefile.am - -# Ensure that the man page directory exists before we try to make man -# page files (because ompi/mpi/man/man3 has no config.status-generated -# Makefile) -dir_stamp = $(top_builddir)/$(subdir)/shmem/man/man3/.dir-stamp - -# Also ensure that the man pages are rebuilt if the opal_config.h file -# changes (e.g., configure was run again, meaning that the release -# date or version may have changed) -$(nodist_man_MANS): $(dir_stamp) $(top_builddir)/opal/include/opal_config.h - -$(dir_stamp): - $(MKDIR_P) `dirname $@` - touch "$@" - -# Remove the generated man pages -distclean-local: - rm -f $(nodist_man_MANS) $(dir_stamp) diff --git a/oshmem/shmem/man/man3/Makefile.extra b/oshmem/shmem/man/man3/Makefile.extra deleted file mode 100644 index c47ba2c2fb7..00000000000 --- a/oshmem/shmem/man/man3/Makefile.extra +++ /dev/null @@ -1,247 +0,0 @@ -# -*- makefile -*- -# Copyright (c) 2015 Mellanox Technologies, Inc. -# $COPYRIGHT$ -# -# Additional copyrights may follow -# -# $HEADER$ -# - -include $(top_srcdir)/Makefile.ompi-rules - -shmem_legacy_api_man_pages = \ - shmem/man/man3/start_pes.3 \ - shmem/man/man3/_num_pes.3 \ - shmem/man/man3/_my_pe.3 \ - shmem/man/man3/shmalloc.3 \ - shmem/man/man3/shmemalign.3 \ - shmem/man/man3/shrealloc.3 \ - shmem/man/man3/shfree.3 - -shmem_api_man_pages = \ - shmem/man/man3/intro_shmem.3 \ - shmem/man/man3/OpenSHMEM.3 \ - shmem/man/man3/shmem_init.3 \ - shmem/man/man3/shmem_finalize.3 \ - shmem/man/man3/shmem_n_pes.3 \ - shmem/man/man3/shmem_my_pe.3 \ - shmem/man/man3/shmem_pe_accessible.3 \ - shmem/man/man3/shmem_addr_accessible.3 \ - shmem/man/man3/shmem_malloc.3 \ - shmem/man/man3/shmem_align.3 \ - shmem/man/man3/shmem_realloc.3 \ - shmem/man/man3/shmem_free.3 \ - shmem/man/man3/shmem_ptr.3 \ - shmem/man/man3/shmem_char_p.3 \ - shmem/man/man3/shmem_short_p.3 \ - shmem/man/man3/shmem_int_p.3 \ - shmem/man/man3/shmem_long_p.3 \ - shmem/man/man3/shmem_float_p.3 \ - shmem/man/man3/shmem_double_p.3 \ - shmem/man/man3/shmem_longlong_p.3 \ - shmem/man/man3/shmem_longdouble_p.3 \ - shmem/man/man3/shmem_char_put.3 \ - shmem/man/man3/shmem_short_put.3 \ - shmem/man/man3/shmem_int_put.3 \ - shmem/man/man3/shmem_long_put.3 \ - shmem/man/man3/shmem_float_put.3 \ - shmem/man/man3/shmem_double_put.3 \ - shmem/man/man3/shmem_longlong_put.3 \ - shmem/man/man3/shmem_longdouble_put.3 \ - shmem/man/man3/shmem_put32.3 \ - shmem/man/man3/shmem_put64.3 \ - shmem/man/man3/shmem_put128.3 \ - shmem/man/man3/shmem_putmem.3 \ - shmem/man/man3/shmem_short_iput.3 \ - shmem/man/man3/shmem_int_iput.3 \ - shmem/man/man3/shmem_long_iput.3 \ - shmem/man/man3/shmem_float_iput.3 \ - shmem/man/man3/shmem_double_iput.3 \ - shmem/man/man3/shmem_longlong_iput.3 \ - shmem/man/man3/shmem_longdouble_iput.3 \ - shmem/man/man3/shmem_iput32.3 \ - shmem/man/man3/shmem_iput64.3 \ - shmem/man/man3/shmem_iput128.3 \ - shmem/man/man3/shmem_char_g.3 \ - shmem/man/man3/shmem_short_g.3 \ - shmem/man/man3/shmem_int_g.3 \ - shmem/man/man3/shmem_long_g.3 \ - shmem/man/man3/shmem_float_g.3 \ - shmem/man/man3/shmem_double_g.3 \ - shmem/man/man3/shmem_longlong_g.3 \ - shmem/man/man3/shmem_longdouble_g.3 \ - shmem/man/man3/shmem_char_get.3 \ - shmem/man/man3/shmem_short_get.3 \ - shmem/man/man3/shmem_int_get.3 \ - shmem/man/man3/shmem_long_get.3 \ - shmem/man/man3/shmem_float_get.3 \ - shmem/man/man3/shmem_double_get.3 \ - shmem/man/man3/shmem_longlong_get.3 \ - shmem/man/man3/shmem_longdouble_get.3 \ - shmem/man/man3/shmem_get32.3 \ - shmem/man/man3/shmem_get64.3 \ - shmem/man/man3/shmem_get128.3 \ - shmem/man/man3/shmem_getmem.3 \ - shmem/man/man3/shmem_short_iget.3 \ - shmem/man/man3/shmem_int_iget.3 \ - shmem/man/man3/shmem_long_iget.3 \ - shmem/man/man3/shmem_float_iget.3 \ - shmem/man/man3/shmem_double_iget.3 \ - shmem/man/man3/shmem_longlong_iget.3 \ - shmem/man/man3/shmem_longdouble_iget.3 \ - shmem/man/man3/shmem_iget32.3 \ - shmem/man/man3/shmem_iget64.3 \ - shmem/man/man3/shmem_iget128.3 \ - shmem/man/man3/shmem_swap.3 \ - shmem/man/man3/shmem_int_swap.3 \ - shmem/man/man3/shmem_long_swap.3 \ - shmem/man/man3/shmem_longlong_swap.3 \ - shmem/man/man3/shmem_float_swap.3 \ - shmem/man/man3/shmem_double_swap.3 \ -\ - shmem/man/man3/shmem_double_set.3 \ - shmem/man/man3/shmem_float_set.3 \ - shmem/man/man3/shmem_int_set.3 \ - shmem/man/man3/shmem_longlong_set.3 \ - shmem/man/man3/shmem_long_set.3 \ -\ - shmem/man/man3/shmem_int_cswap.3 \ - shmem/man/man3/shmem_long_cswap.3 \ - shmem/man/man3/shmem_longlong_cswap.3 \ - shmem/man/man3/shmem_int_fadd.3 \ - shmem/man/man3/shmem_long_fadd.3 \ - shmem/man/man3/shmem_longlong_fadd.3 \ -\ - shmem/man/man3/shmem_double_fetch.3 \ - shmem/man/man3/shmem_float_fetch.3 \ - shmem/man/man3/shmem_int_fetch.3 \ - shmem/man/man3/shmem_long_fetch.3 \ - shmem/man/man3/shmem_longlong_fetch.3 \ -\ - shmem/man/man3/shmem_int_finc.3 \ - shmem/man/man3/shmem_long_finc.3 \ - shmem/man/man3/shmem_longlong_finc.3 \ - shmem/man/man3/shmem_int_add.3 \ - shmem/man/man3/shmem_long_add.3 \ - shmem/man/man3/shmem_longlong_add.3 \ - shmem/man/man3/shmem_int_inc.3 \ - shmem/man/man3/shmem_long_inc.3 \ - shmem/man/man3/shmem_longlong_inc.3 \ - shmem/man/man3/shmem_set_lock.3 \ - shmem/man/man3/shmem_clear_lock.3 \ - shmem/man/man3/shmem_test_lock.3 \ - shmem/man/man3/shmem_wait.3 \ - shmem/man/man3/shmem_short_wait.3 \ - shmem/man/man3/shmem_int_wait.3 \ - shmem/man/man3/shmem_long_wait.3 \ - shmem/man/man3/shmem_longlong_wait.3 \ - shmem/man/man3/shmem_wait_until.3 \ - shmem/man/man3/shmem_short_wait_until.3 \ - shmem/man/man3/shmem_int_wait_until.3 \ - shmem/man/man3/shmem_long_wait_until.3 \ - shmem/man/man3/shmem_longlong_wait_until.3 \ - shmem/man/man3/shmem_barrier.3 \ - shmem/man/man3/shmem_barrier_all.3 \ - shmem/man/man3/shmem_fence.3 \ - shmem/man/man3/shmem_quiet.3 \ - shmem/man/man3/shmem_broadcast32.3 \ - shmem/man/man3/shmem_broadcast64.3 \ - shmem/man/man3/shmem_alltoall32.3 \ - shmem/man/man3/shmem_alltoall64.3 \ - shmem/man/man3/shmem_alltoalls32.3 \ - shmem/man/man3/shmem_alltoalls64.3 \ - shmem/man/man3/shmem_collect32.3 \ - shmem/man/man3/shmem_collect64.3 \ - shmem/man/man3/shmem_fcollect32.3 \ - shmem/man/man3/shmem_fcollect64.3 \ - shmem/man/man3/shmem_short_and_to_all.3 \ - shmem/man/man3/shmem_int_and_to_all.3 \ - shmem/man/man3/shmem_long_and_to_all.3 \ - shmem/man/man3/shmem_longlong_and_to_all.3 \ - shmem/man/man3/shmem_short_or_to_all.3 \ - shmem/man/man3/shmem_int_or_to_all.3 \ - shmem/man/man3/shmem_long_or_to_all.3 \ - shmem/man/man3/shmem_longlong_or_to_all.3 \ - shmem/man/man3/shmem_short_xor_to_all.3 \ - shmem/man/man3/shmem_int_xor_to_all.3 \ - shmem/man/man3/shmem_long_xor_to_all.3 \ - shmem/man/man3/shmem_longlong_xor_to_all.3 \ - shmem/man/man3/shmem_short_max_to_all.3 \ - shmem/man/man3/shmem_int_max_to_all.3 \ - shmem/man/man3/shmem_long_max_to_all.3 \ - shmem/man/man3/shmem_longlong_max_to_all.3 \ - shmem/man/man3/shmem_float_max_to_all.3 \ - shmem/man/man3/shmem_double_max_to_all.3 \ - shmem/man/man3/shmem_longdouble_max_to_all.3 \ - shmem/man/man3/shmem_short_min_to_all.3 \ - shmem/man/man3/shmem_int_min_to_all.3 \ - shmem/man/man3/shmem_long_min_to_all.3 \ - shmem/man/man3/shmem_longlong_min_to_all.3 \ - shmem/man/man3/shmem_float_min_to_all.3 \ - shmem/man/man3/shmem_double_min_to_all.3 \ - shmem/man/man3/shmem_longdouble_min_to_all.3 \ - shmem/man/man3/shmem_short_sum_to_all.3 \ - shmem/man/man3/shmem_int_sum_to_all.3 \ - shmem/man/man3/shmem_long_sum_to_all.3 \ - shmem/man/man3/shmem_longlong_sum_to_all.3 \ - shmem/man/man3/shmem_float_sum_to_all.3 \ - shmem/man/man3/shmem_double_sum_to_all.3 \ - shmem/man/man3/shmem_complexf_sum_to_all.3 \ - shmem/man/man3/shmem_complexd_sum_to_all.3 \ - shmem/man/man3/shmem_short_prod_to_all.3 \ - shmem/man/man3/shmem_int_prod_to_all.3 \ - shmem/man/man3/shmem_long_prod_to_all.3 \ - shmem/man/man3/shmem_longlong_prod_to_all.3 \ - shmem/man/man3/shmem_float_prod_to_all.3 \ - shmem/man/man3/shmem_double_prod_to_all.3 \ - shmem/man/man3/shmem_longdouble_prod_to_all.3 \ - shmem/man/man3/shmem_complexf_prod_to_all.3 \ - shmem/man/man3/shmem_complexd_prod_to_all.3 \ - shmem/man/man3/shmem_udcflush.3 \ - shmem/man/man3/shmem_udcflush_line.3 \ - shmem/man/man3/shmem_set_cache_inv.3 \ - shmem/man/man3/shmem_set_cache_line_inv.3 \ - shmem/man/man3/shmem_clear_cache_inv.3 \ - shmem/man/man3/shmem_clear_cache_line_inv.3 \ - shmem/man/man3/shmem_info_get_name.3 \ - shmem/man/man3/shmem_info_get_version.3 \ - shmem/man/man3/shmem_global_exit.3 \ -\ - shmem/man/man3/shmem_getmem_nbi.3 \ - shmem/man/man3/shmem_char_get_nbi.3 \ - shmem/man/man3/shmem_short_get_nbi.3 \ - shmem/man/man3/shmem_int_get_nbi.3 \ - shmem/man/man3/shmem_long_get_nbi.3 \ - shmem/man/man3/shmem_longlong_get_nbi.3 \ - shmem/man/man3/shmem_float_get_nbi.3 \ - shmem/man/man3/shmem_double_get_nbi.3 \ - shmem/man/man3/shmem_longdouble_get_nbi.3 \ - shmem/man/man3/shmem_get8_nbi.3 \ - shmem/man/man3/shmem_get16_nbi.3 \ - shmem/man/man3/shmem_get32_nbi.3 \ - shmem/man/man3/shmem_get64_nbi.3 \ - shmem/man/man3/shmem_get128_nbi.3 \ -\ - shmem/man/man3/shmem_putmem_nbi.3 \ - shmem/man/man3/shmem_char_put_nbi.3 \ - shmem/man/man3/shmem_short_put_nbi.3 \ - shmem/man/man3/shmem_int_put_nbi.3 \ - shmem/man/man3/shmem_long_put_nbi.3 \ - shmem/man/man3/shmem_longlong_put_nbi.3 \ - shmem/man/man3/shmem_float_put_nbi.3 \ - shmem/man/man3/shmem_double_put_nbi.3 \ - shmem/man/man3/shmem_longdouble_put_nbi.3 \ - shmem/man/man3/shmem_put8_nbi.3 \ - shmem/man/man3/shmem_put16_nbi.3 \ - shmem/man/man3/shmem_put32_nbi.3 \ - shmem/man/man3/shmem_put64_nbi.3 \ - shmem/man/man3/shmem_put128_nbi.3 - - -if PROJECT_OSHMEM -nodist_man_MANS += $(shmem_api_man_pages) -nodist_man_MANS += $(shmem_legacy_api_man_pages) -endif # PROJECT_OSHMEM -EXTRA_DIST += $(shmem_api_man_pages:.3=.3in) -EXTRA_DIST += $(shmem_legacy_api_man_pages:.3=.3in) - diff --git a/oshmem/shmem/man/man3/OpenSHMEM.3in b/oshmem/shmem/man/man3/OpenSHMEM.3in deleted file mode 100644 index 97469c502b1..00000000000 --- a/oshmem/shmem/man/man3/OpenSHMEM.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/intro_shmem.3 diff --git a/oshmem/shmem/man/man3/_my_pe.3in b/oshmem/shmem/man/man3/_my_pe.3in deleted file mode 100644 index 228477044bc..00000000000 --- a/oshmem/shmem/man/man3/_my_pe.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/shmem_my_pe.3 diff --git a/oshmem/shmem/man/man3/_num_pes.3in b/oshmem/shmem/man/man3/_num_pes.3in deleted file mode 100644 index ce155633cae..00000000000 --- a/oshmem/shmem/man/man3/_num_pes.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/shmem_n_pes.3 diff --git a/oshmem/shmem/man/man3/intro_shmem.3in b/oshmem/shmem/man/man3/intro_shmem.3in deleted file mode 100644 index 95b6c977ff5..00000000000 --- a/oshmem/shmem/man/man3/intro_shmem.3in +++ /dev/null @@ -1,1311 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright (c) 2015 University of Houston. All rights reserved. -.\" Copyright (c) 2015 Mellanox Technologies, Inc. -.\" $COPYRIGHT$ -.de Vb -.ft CW -.nf -.. -.de Ve -.ft R - -.fi -.. -.TH "INTRO\\_SHMEM" "3" "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME - -intro_shmem \- Introduction to the OpenSHMEM programming model -.PP -.SH DESCRIPTION - -The SHMEM programming model consists of library routines that provide low\-latency, -high\-bandwidth communication for use in highly parallelized scalable programs. The -routines in the OpenSHMEM application programming interface (API) provide a programming -model for exchanging data between cooperating parallel processes. The resulting programs -are similar in style to Message Passing Interface (MPI) programs. The SHMEM API can -be used either alone or in combination with MPI routines in the same parallel program. -.PP -An OpenSHMEM program is SPMD (single program, multiple data) in style. The SHMEM -processes, called processing elements or PEs, all start at the same time and they all run the -same program. Usually the PEs perform computation on their own subdomains of the larger -problem and periodically communicate with other PEs to exchange information on -which the next computation phase depends. -.PP -The OpenSHMEM routines minimize the overhead associated with data transfer requests, -maximize bandwidth and minimize data latency. Data latency is the period of time that -starts when a PE initiates a transfer of data and ends when a PE can use the data. -OpenSHMEM routines support remote data transfer through put operations, which transfer -data to a different PE, get operations, which transfer data from a different PE, and remote -pointers, which allow direct references to data objects owned by another PE. Other -operations supported are collective broadcast and reduction, barrier synchronization, and -atomic memory operations. An atomic memory operation is an atomic read\-and\-update -operation, such as a fetch\-and\-increment, on a remote or local data object. -.PP -.SH OPENSHMEM ROUTINES - -This section lists the significant OpenSHMEM message\-passing routines. -.TP -PE queries -.PP -.RS -.TP -.B * -C/C++ only: -.RS -.PP -.RS -.RE -.TP -.B * -\fI_num_pes\fP(3) -.TP -.B * -\fI_my_pe\fP(3) -.RE -.RS -.PP -.RE -.TP -.B * -Fortran only: -.RS -.PP -.RS -.RE -.TP -.B * -\fINUM_PES\fP(3) -.TP -.B * -\fIMY_PE\fP(3) -.RE -.RS -.PP -.RE -.RE -.PP -.RE -.TP -Elemental data put routines -.PP -.RS -.TP -.B * -C/C++ only: -.RS -.PP -.RS -.RE -.TP -.B * -\fIshmem_double_p\fP(3) -.TP -.B * -\fIshmem_float_p\fP(3) -.TP -.B * -\fIshmem_int_p\fP(3) -.TP -.B * -\fIshmem_long_p\fP(3) -.TP -.B * -\fIshmem_short_p.\fP(3) -.RE -.RS -.PP -.RE -.RE -.PP -.RE -.TP -Block data put routines -.PP -.RS -.TP -.B * -C/C++ and Fortran: -.RS -.PP -.RS -.RE -.TP -.B * -\fIshmem_put32\fP(3) -.TP -.B * -\fIshmem_put64\fP(3) -.TP -.B * -\fIshmem_put128\fP(3) -.RE -.RS -.PP -.RE -.TP -.B * -C/C++ only: -.RS -.PP -.RS -.RE -.TP -.B * -\fIshmem_double_put\fP(3) -.TP -.B * -\fIshmem_float_put\fP(3) -.TP -.B * -\fIshmem_int_put\fP(3) -.TP -.B * -\fIshmem_long_put\fP(3) -.TP -.B * -\fIshmem_short_put.\fP(3) -.RE -.RS -.PP -.RE -.TP -.B * -Fortran only: -.RS -.PP -.RS -.RE -.TP -.B * -\fIshmem_complex_put\fP(3) -.TP -.B * -\fIshmem_integer_put\fP(3) -.TP -.B * -\fIshmem_logical_put\fP(3) -.TP -.B * -\fIshmem_real_put\fP(3) -.RE -.RS -.PP -.RE -.RE -.PP -.RE -.TP -Elemental data get routines -.PP -.RS -.TP -.B * -C/C++ only: -.RS -.PP -.RS -.RE -.TP -.B * -\fIshmem_double_g\fP(3) -.TP -.B * -\fIshmem_float_g\fP(3) -.TP -.B * -\fIshmem_int_g\fP(3) -.TP -.B * -\fIshmem_long_g\fP(3) -.TP -.B * -\fIshmem_short_g\fP(3) -.RE -.RS -.PP -.RE -.RE -.PP -.RE -.TP -Block data get routines -.RS -.TP -.B * -C/C++ and Fortran: -.RS -.PP -.RS -.RE -.TP -.B * -\fIshmem_get32\fP(3) -.TP -.B * -\fIshmem_get64\fP(3) -.TP -.B * -\fIshmem_get128\fP(3) -.RE -.RS -.PP -.RE -.TP -.B * -C/C++ only: -.RS -.PP -.RS -.RE -.TP -.B * -\fIshmem_double_get\fP(3) -.TP -.B * -\fIshmem_float_get\fP(3) -.TP -.B * -\fIshmem_int_get\fP(3) -.TP -.B * -\fIshmem_long_get\fP(3) -.TP -.B * -\fIshmem_short_get\fP(3) -.RE -.RS -.PP -.RE -.TP -.B * -Fortran only: -.RS -.PP -.RS -.RE -.TP -.B * -\fIshmem_complex_get\fP(3) -.TP -.B * -\fIshmem_integer_get\fP(3) -.TP -.B * -\fIshmem_logical_get\fP(3) -.TP -.B * -\fIshmem_real_get\fP(3) -.RE -.RS -.PP -.RE -.RE -.PP -.RE -.TP -Strided put routines -.RS -.TP -.B * -C/C++ and Fortran: -.RS -.PP -.RS -.RE -.TP -.B * -\fIshmem_iput32\fP(3) -.TP -.B * -\fIshmem_iput64\fP(3) -.TP -.B * -\fIshmem_iput128\fP(3) -.RE -.RS -.PP -.RE -.TP -.B * -C/C++ only: -.RS -.PP -.RS -.RE -.TP -.B * -\fIshmem_double_iput\fP(3) -.TP -.B * -\fIshmem_float_iput\fP(3) -.TP -.B * -\fIshmem_int_iput\fP(3) -.TP -.B * -\fIshmem_long_iput\fP(3) -.TP -.B * -\fIshmem_short_iput\fP(3) -.RE -.RS -.PP -.RE -.TP -.B * -Fortran only: -.RS -.PP -.RS -.RE -.TP -.B * -\fIshmem_complex_iput\fP(3) -.TP -.B * -\fIshmem_integer_iput\fP(3) -.TP -.B * -\fIshmem_logical_iput\fP(3) -.TP -.B * -\fIshmem_real_iput\fP(3) -.RE -.RS -.PP -.RE -.RE -.PP -.RE -.TP -Strided get routines -.PP -.RS -.TP -.B * -C/C++ and Fortran: -.RS -.PP -.RS -.RE -.TP -.B * -\fIshmem_iget32\fP(3) -.TP -.B * -\fIshmem_iget64\fP(3) -.TP -.B * -\fIshmem_iget128\fP(3) -.RE -.RS -.PP -.RE -.TP -.B * -C/C++ only: -.RS -.PP -.RS -.RE -.TP -.B * -\fIshmem_double_iget\fP(3) -.TP -.B * -\fIshmem_float_iget\fP(3) -.TP -.B * -\fIshmem_int_iget\fP(3) -.TP -.B * -\fIshmem_long_iget\fP(3) -.TP -.B * -\fIshmem_short_iget\fP(3) -.RE -.RS -.PP -.RE -.TP -.B * -Fortran only: -.RS -.PP -.RS -.RE -.TP -.B * -\fIshmem_complex_iget\fP(3) -.TP -.B * -\fIshmem_integer_iget\fP(3) -.TP -.B * -\fIshmem_logical_iget\fP(3) -.TP -.B * -\fIshmem_real_iget\fP(3) -.RE -.RS -.PP -.RE -.RE -.PP -.RE -.TP -Point\-to\-point synchronization routines -.RS -.TP -.B * -C/C++ only: -.RS -.PP -.RS -.RE -.TP -.B * -\fIshmem_int_wait\fP(3) -.TP -.B * -\fIshmem_int_wait_until\fP(3) -.TP -.B * -\fIshmem_long_wait\fP(3) -.TP -.B * -\fIshmem_long_wait_until\fP(3) -.TP -.B * -\fIshmem_longlong_wait\fP(3) -.TP -.B * -\fIshmem_longlong_wait_until\fP(3) -.TP -.B * -\fIshmem_short_wait\fP(3) -.TP -.B * -\fIshmem_short_wait_until\fP(3) -.RE -.RS -.PP -.RE -.TP -.B * -Fortran: -.RS -.PP -.RS -.RE -.TP -.B * -\fIshmem_int4_wait\fP(3) -.TP -.B * -\fIshmem_int4_wait_until\fP(3) -.TP -.B * -\fIshmem_int8_wait\fP(3) -.TP -.B * -\fIshmem_int8_wait_until\fP(3) -.RE -.RS -.PP -.RE -.RE -.PP -.RE -.TP -Barrier synchronization routines -.PP -.RS -.TP -.B * -C/C++ and Fortran: -.RS -.PP -.RS -.RE -.TP -.B * -\fIshmem_barrier_all\fP(3) -.TP -.B * -\fIshmem_barrier\fP(3) -.RE -.RS -.PP -.RE -.RE -.PP -.RE -.TP -Atomic memory fetch\-and\-operate (fetch\-op) routines -.RS -.TP -.B * -C/C++ and Fortran: -.RS -.TP -.B * -shmem_swap -.RE -.RS -.PP -.RE -.RE -.PP -.RE -.TP -Reduction routines -.RS -.TP -.B * -C/C++ only: -.RS -.TP -.B * -\fIshmem_int_and_to_all\fP(3) -.TP -.B * -\fIshmem_long_and_to_all\fP(3) -.TP -.B * -\fIshmem_longlong_and_to_all\fP(3) -.TP -.B * -\fIshmem_short_and_to_all\fP(3) -.TP -.B * -\fIshmem_double_max_to_all\fP(3) -.TP -.B * -\fIshmem_float_max_to_all\fP(3) -.TP -.B * -\fIshmem_int_max_to_all\fP(3) -.TP -.B * -\fIshmem_long_max_to_all\fP(3) -.TP -.B * -\fIshmem_longlong_max_to_all\fP(3) -.TP -.B * -\fIshmem_short_max_to_all\fP(3) -.TP -.B * -\fIshmem_double_min_to_all\fP(3) -.TP -.B * -\fIshmem_float_min_to_all\fP(3) -.TP -.B * -\fIshmem_int_min_to_all\fP(3) -.TP -.B * -\fIshmem_long_min_to_all\fP(3) -.TP -.B * -\fIshmem_longlong_min_to_all\fP(3) -.TP -.B * -\fIshmem_short_min_to_all\fP(3) -.TP -.B * -\fIshmem_double_sum_to_all\fP(3) -.TP -.B * -\fIshmem_float_sum_to_all\fP(3) -.TP -.B * -\fIshmem_int_sum_to_all\fP(3) -.TP -.B * -\fIshmem_long_sum_to_all\fP(3) -.TP -.B * -\fIshmem_longlong_sum_to_all\fP(3) -.TP -.B * -\fIshmem_short_sum_to_all\fP(3) -.TP -.B * -\fIshmem_double_prod_to_all\fP(3) -.TP -.B * -\fIshmem_float_prod_to_all\fP(3) -.TP -.B * -\fIshmem_int_prod_to_all\fP(3) -.TP -.B * -\fIshmem_long_prod_to_all\fP(3) -.TP -.B * -\fIshmem_longlong_prod_to_all\fP(3) -.TP -.B * -\fIshmem_short_prod_to_all\fP(3) -.TP -.B * -\fIshmem_int_or_to_all\fP(3) -.TP -.B * -\fIshmem_long_or_to_all\fP(3) -.TP -.B * -\fIshmem_longlong_or_to_all\fP(3) -.TP -.B * -\fIshmem_short_or_to_all\fP(3) -.TP -.B * -\fIshmem_int_xor_to_all\fP(3) -.TP -.B * -\fIshmem_long_xor_to_all\fP(3) -.TP -.B * -\fIshmem_longlong_xor_to_all\fP(3) -.TP -.B * -\fIshmem_short_xor_to_all\fP(3) -.RE -.RS -.PP -.RE -.TP -.B * -Fortran only: -.RS -.TP -.B * -\fIshmem_int4_and_to_all\fP(3) -.TP -.B * -\fIshmem_int8_and_to_all\fP(3) -.TP -.B * -\fIshmem_real4_max_to_all\fP(3) -.TP -.B * -\fIshmem_real8_max_to_all\fP(3) -.TP -.B * -\fIshmem_int4_max_to_all\fP(3) -.TP -.B * -\fIshmem_int8_max_to_all\fP(3) -.TP -.B * -\fIshmem_real4_min_to_all\fP(3) -.TP -.B * -\fIshmem_real8_min_to_all\fP(3) -.TP -.B * -\fIshmem_int4_min_to_all\fP(3) -.TP -.B * -\fIshmem_int8_min_to_all\fP(3) -.TP -.B * -\fIshmem_real4_sum_to_all\fP(3) -.TP -.B * -\fIshmem_real8_sum_to_all\fP(3) -.TP -.B * -\fIshmem_int4_sum_to_all\fP(3) -.TP -.B * -\fIshmem_int8_sum_to_all\fP(3) -.TP -.B * -\fIshmem_real4_prod_to_all\fP(3) -.TP -.B * -\fIshmem_real8_prod_to_all\fP(3) -.TP -.B * -\fIshmem_int4_prod_to_all\fP(3) -.TP -.B * -\fIshmem_int8_prod_to_all\fP(3) -.TP -.B * -\fIshmem_int4_or_to_all\fP(3) -.TP -.B * -\fIshmem_int8_or_to_all\fP(3) -.TP -.B * -\fIshmem_int4_xor_to_all\fP(3) -.TP -.B * -\fIshmem_int8_xor_to_all\fP(3) -.RE -.RS -.PP -.RE -.RE -.PP -.RE -.TP -Broadcast routines -.PP -.RS -.TP -.B * -C/C++ and Fortran: -.RS -.PP -.RS -.RE -.TP -.B * -\fIshmem_broadcast32\fP(3) -.TP -.B * -\fIshmem_broadcast64\fP(3) -.RE -.RS -.PP -.RE -.RE -.PP -.RE -.TP -Cache management routines -.PP -.RS -.TP -.B * -C/C++ and Fortran: -.RS -.PP -.RS -.RE -.TP -.B * -\fIshmem_udcflush\fP(3) -.TP -.B * -\fIshmem_udcflush_line\fP(3) -.RE -.RS -.PP -.RE -.RE -.PP -.RE -.TP -Byte\-granularity block put routines -.PP -.RS -.TP -.B * -C/C++ and Fortran -.RS -.PP -.RS -.RE -.TP -.B * -\fIshmem_putmem\fP(3) -.TP -.B * -\fIshmem_getmem\fP(3) -.RE -.RS -.PP -.RE -.TP -.B * -Fortran only: -.RS -.PP -.RS -.RE -.TP -.B * -\fIshmem_character_put\fP(3) -.TP -.B * -\fIshmem_character_get\fP(3) -.RE -.RS -.PP -.RE -.RE -.PP -.RE -.TP -Collect routines -.RS -.TP -.B * -C/C++ and Fortran: -.RS -.PP -.RS -.RE -.TP -.B * -\fIshmem_collect32\fP(3) -.TP -.B * -\fIshmem_collect64\fP(3) -.TP -.B * -\fIshmem_fcollect32\fP(3) -.TP -.B * -\fIshmem_fcollect64\fP(3) -.RE -.RS -.PP -.RE -.RE -.PP -.RE -.TP -Atomic memory fetch\-and\-operate (fetch\-op) routines -.RS -.TP -.B * -C/C++ only: -.RS -.TP -.B * -\fIshmem_double_swap\fP(3) -.TP -.B * -\fIshmem_float_swap\fP(3) -.TP -.B * -\fIshmem_int_cswap\fP(3) -.TP -.B * -\fIshmem_int_fadd\fP(3) -.TP -.B * -\fIshmem_int_finc\fP(3) -.TP -.B * -\fIshmem_int_swap\fP(3) -.TP -.B * -\fIshmem_long_cswap\fP(3) -.TP -.B * -\fIshmem_long_fadd\fP(3) -.TP -.B * -\fIshmem_long_finc\fP(3) -.TP -.B * -\fIshmem_long_swap\fP(3) -.TP -.B * -\fIshmem_longlong_cswap\fP(3) -.TP -.B * -\fIshmem_longlong_fadd\fP(3) -.TP -.B * -\fIshmem_longlong_finc\fP(3) -.TP -.B * -\fIshmem_longlong_swap\fP(3) -.RE -.RS -.PP -.RE -.TP -.B * -Fortran only: -.RS -.TP -.B * -\fIshmem_int4_cswap\fP(3) -.TP -.B * -\fIshmem_int4_fadd\fP(3) -.TP -.B * -\fIshmem_int4_finc\fP(3) -.TP -.B * -\fIshmem_int4_swap\fP(3) -.TP -.B * -\fIshmem_int8_swap\fP(3) -.TP -.B * -\fIshmem_real4_swap\fP(3) -.TP -.B * -\fIshmem_real8_swap\fP(3) -.TP -.B * -\fIshmem_int8_cswap\fP(3) -.RE -.RS -.PP -.RE -.RE -.PP -.RE -.TP -Atomic memory operation routines -.RS -.TP -.B * -Fortran only: -.RS -.PP -.RS -.RE -.TP -.B * -\fIshmem_int4_add\fP(3) -.TP -.B * -\fIshmem_int4_inc\fP(3) -.RE -.RS -.PP -.RE -.RE -.PP -.RE -.TP -Remote memory pointer function -.RS -.TP -.B * -C/C++ and Fortran: -.RS -.PP -.RS -.RE -.TP -.B * -\fIshmem_ptr\fP(3) -.RE -.RS -.PP -.RE -.RE -.PP -.RE -.TP -Reduction routines -.RS -.TP -.B * -C/C++ only: -.RS -.TP -.B * -\fIshmem_longdouble_max_to_all\fP(3) -.TP -.B * -\fIshmem_longdouble_min_to_all\fP(3) -.TP -.B * -\fIshmem_longdouble_prod_to_all\fP(3) -.TP -.B * -\fIshmem_longdouble_sum_to_all\fP(3) -.RE -.RS -.PP -.RE -.TP -.B * -Fortran only: -.RS -.PP -.RS -.RE -.TP -.B * -\fIshmem_real16_max_to_all\fP(3) -.TP -.B * -\fIshmem_real16_min_to_all\fP(3) -.TP -.B * -\fIshmem_real16_prod_to_all\fP(3) -.TP -.B * -\fIshmem_real16_sum_to_all\fP(3) -.RE -.RS -.PP -.RE -.RE -.PP -.RE -.TP -Accessibility query routines -.RS -.TP -.B * -C/C++ and Fortran: -.RS -.TP -.B * -\fIshmem_pe_accessible\fP(3) -.TP -.B * -\fIshmem_addr_accessible\fP(3) -.RE -.RS -.PP -.RE -.RE -.PP -.RE -.TP -Symmetric Data Objects -.PP -Consistent with the SPMD nature of the OpenSHMEM programming model is the -concept of symmetric data objects. These are arrays or variables that -exist with the same size, type, and relative address on all PEs. -Another term for symmetric data objects is "remotely accessible data objects". -In the interface definitions for OpenSHMEM data transfer routines, one or more of the -parameters are typically required to be symmetric or remotely accessible. -.PP -The following kinds of data objects are symmetric: -.RS -.TP -.B * -Fortran data objects in common blocks or with the SAVE attribute. These data -objects must not be defined in a dynamic shared object (DSO). -.TP -.B * -Non\-stack C and C++ variables. These data objects must not be defined in a DSO. -.TP -.B * -Fortran arrays allocated with \fIshpalloc\fP(3F) -.TP -.B * -C and C++ data allocated by \fIshmalloc\fP(3C) -.RE -.RS -.PP -.RE -.TP -Collective Routines -Some SHMEM routines, for example, \fIshmem_broadcast\fP(3) -and -\fIshmem_float_sum_to_all\fP(3), -are classified as collective routines -because they distribute work across a set of PEs. -They must be called concurrently by all PEs in the active set defined by the PE_start, -logPE_stride, PE_size argument triplet. The following man pages describe the OpenSHMEM -collective routines: -.RS -.TP -.B * -\fIshmem_and\fP(3) -.TP -.B * -\fIshmem_barrier\fP(3) -.TP -.B * -\fIshmem_broadcast\fP(3) -.TP -.B * -\fIshmem_collect\fP(3) -.TP -.B * -\fIshmem_max\fP(3) -.TP -.B * -\fIshmem_min\fP(3) -.TP -.B * -\fIshmem_or\fP(3) -.TP -.B * -\fIshmem_prod\fP(3) -.TP -.B * -\fIshmem_sum\fP(3) -.TP -.B * -\fIshmem_xor\fP(3) -.RE -.RS -.PP -.RE -.PP -.SH USING THE SYMMETRIC WORK ARRAY, PSYNC - -Multiple pSync arrays are often needed if a particular PE calls as OpenSHMEM collective -routine twice without intervening barrier synchronization. Problems would occur if some PEs -in the active set for call 2 arrive at call 2 before processing of call 1 is complete by all PEs in -the call 1 active set. You can use \fIshmem_barrier\fP(3) -or \fIshmem_barrier_all\fP(3) -to perform a barrier synchronization between consecutive calls to OpenSHMEM collective -routines. -.PP -There are two special cases: -.RE -.TP -.B * -The \fIshmem_barrier\fP(3) routine allows the same pSync array to be used on -consecutive calls as long as the active PE set does not change. -.TP -.B * -If the same collective routine is called multiple times with the same active set, the -calls may alternate between two pSync arrays. The SHMEM routines guarantee that a -first call is completely finished by all PEs by the time processing of a third call begins -on any PE. -.PP -Because the SHMEM routines restore pSync to its original contents, multiple calls that -use the same pSync array do not require that pSync be reinitialized after the first call. -.PP -.SH SHMEM ENVIRONMENT VARIABLES - -This section lists the significant SHMEM environment variables. -.TP -.B * -\fBSMA_VERSION\fP print the library version at start\-up. -.TP -.B * -\fBSMA_INFO\fP print helpful text about all these environment variables. -.TP -.B * -\fBSMA_SYMMETRIC_SIZE\fP number of bytes to allocate for the symmetric heap. -.TP -.B * -\fBSMA_DEBUG\fP enable debugging messages. -.PP -The first call to SHMEM must be \fIstart_pes\fP(3)\&. -This routines initialize the SHMEM runtime. -.PP -Calling any other SHMEM routines beforehand has undefined behavior. Multiple calls -to this routine is not allowed. -.PP -.SH COMPILING AND RUNNING OPENSHMEM PROGRAMS - -The OpenSHMEM specification is silent regarding how OpenSHMEM programs are compiled, -linked and run. This section shows some examples of how wrapper programs could be utilized -to compile and launch applications. The commands are styled after wrapper programs -found in many MPI implementations. -.PP -The following sample command line demonstrates running an OpenSHMEM Program using a wrapper script (\fBoshrun\fP -in this case): -.PP -.TP -.B * -C/C++: -.Vb -oshcc c_program.c -.Ve -.TP -.B * -FORTRAN: -.Vb -oshfort fortran_program.f -.Ve -.PP -The following sample command line demonstrates running an OpenSHMEM Program assuming that the library provides a wrapper script for such purpose -(named \fBoshrun\fP -for this example): -.PP -.Vb -oshrun \-np 32 ./a.out -.Ve -.PP -.SH EXAMPLES - -\fBExample 1\fP: -The following Fortran OpenSHMEM program directs all PEs to sum -simultaneously the numbers in the VALUES variable across all PEs: -.Vb -PROGRAM REDUCTION - REAL VALUES, SUM - COMMON /C/ VALUES - REAL WORK - - CALL START_PES(0) - VALUES = MY_PE() - CALL SHMEM_BARRIER_ALL ! Synchronize all PEs - SUM = 0.0 - DO I = 0, NUM_PES()\-1 - CALL SHMEM_REAL_GET(WORK, VALUES, 1, I) ! Get next value - SUM = SUM + WORK ! Sum it - ENDDO - PRINT *, 'PE ', MY_PE(), ' COMPUTED SUM=', SUM - CALL SHMEM_BARRIER_ALL -END -.Ve -\fBExample 2\fP: -The following C OpenSHMEM program transfers an array of 10 longs from -PE 0 to PE 1: -.Vb -#include - -main() { - long source[10] = { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 }; - static long target[10]; - - shmem_init(); - if (shmem_my_pe() == 0) { - /* put 10 elements into target on PE 1 */ - shmem_long_put(target, source, 10, 1); - } - shmem_barrier_all(); /* sync sender and receiver */ - if (shmem_my_pe() == 1) - printf("target[0] on PE %d is %d\\n", shmem_my_pe(), target[0]); -} -.Ve -.SH SEE ALSO - -The following man pages also contain information on OpenSHMEM routines. See the -specific man pages for implementation information. -.PP -\fIshmem_add\fP(3), -\fIshmem_and\fP(3), -\fIshmem_barrier\fP(3), -\fIshmem_barrier_all\fP(3), -\fIshmem_broadcast\fP(3), -\fIshmem_cache\fP(3), -\fIshmem_collect\fP(3), -\fIshmem_cswap\fP(3), -\fIshmem_fadd\fP(3), -\fIshmem_fence\fP(3), -\fIshmem_finc\fP(3), -\fIshmem_get\fP(3), -\fIshmem_iget\fP(3), -\fIshmem_inc\fP(3), -\fIshmem_iput\fP(3), -\fIshmem_lock\fP(3), -\fIshmem_max\fP(3), -\fIshmem_min\fP(3), -\fIshmem_my_pe\fP(3), -\fIshmem_or\fP(3), -\fIshmem_prod\fP(3), -\fIshmem_put\fP(3), -\fIshmem_quiet\fP(3), -\fIshmem_short_g\fP(3), -\fIshmem_short_p\fP(3), -\fIshmem_sum\fP(3), -\fIshmem_swap\fP(3), -\fIshmem_wait\fP(3), -\fIshmem_xor\fP(3), -\fIshmem_pe_accessible\fP(3), -\fIshmem_addr_accessible\fP(3), -\fIshmem_init\fP(3), -\fIshmem_malloc\fP(3C), -\fIshmem_my_pe\fP(3I), -\fIshmem_n_pes\fP(3I) diff --git a/oshmem/shmem/man/man3/shfree.3in b/oshmem/shmem/man/man3/shfree.3in deleted file mode 100644 index 6cdb8014e9e..00000000000 --- a/oshmem/shmem/man/man3/shfree.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/shmem_malloc.3 diff --git a/oshmem/shmem/man/man3/shmalloc.3in b/oshmem/shmem/man/man3/shmalloc.3in deleted file mode 100644 index 5255598fa98..00000000000 --- a/oshmem/shmem/man/man3/shmalloc.3in +++ /dev/null @@ -1,105 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright (c) 2015 University of Houston. All rights reserved. -.\" Copyright (c) 2015 Mellanox Technologies, Inc. -.\" $COPYRIGHT$ -.de Vb -.ft CW -.nf -.. -.de Ve -.ft R - -.fi -.. -.TH "SHMALLOC" "3" "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME - -\fIshmalloc\fP(3), -\fIshfree\fP(3), -\fIshmemalign\fP(3), -\fIshrealloc\fP(3) -\- Symmetric heap memory management functions. -.SH SYNOPSIS - -C or C++: -.Vb -#include - -void *shmalloc(size_t size); - -void shfree(void *ptr); - -void *shrealloc(void *ptr, size_t size); - -void *shmemalign(size_t alignment, size_t size); - -extern long malloc_error; -.Ve -.SH DESCRIPTION - -The \fBshmalloc\fP -function returns a pointer to a block of at least size bytes -suitably aligned for any use. This space is allocated from the symmetric heap (in contrast -to \fImalloc\fP(3C), -which allocates from the private heap). -.PP -The \fBshmemalign\fP -function allocates a block in the symmetric heap that has a -byte alignment specified by the alignment argument. -.PP -The \fBshfree\fP -function causes the block to which ptr points to, to be deallocated, -that is, made available for further allocation. If ptr is a null pointer, no action -occurs; otherwise, if the argument does not match a pointer earlier returned by a symmetric -heap function, or if the space has already been deallocated, malloc_error is set to indicate the -error, and shfree returns. -.PP -The \fBshrealloc\fP -function changes the size of the block to which ptr points to, to the -size (in bytes) specified by size. -.PP -The contents of the block are unchanged up to the lesser of the new and old sizes. If the new -size is larger, the value of the newly allocated portion of the block is indeterminate. If ptr is a -null pointer, the shrealloc function behaves like the shmalloc function for the specified size. If -size is 0 and ptr is not a null pointer, the block to which it points to is freed. Otherwise, if ptr -does not match a pointer earlier returned by a symmetric heap function, or if the space has -already been deallocated, the malloc_error variable is set to indicate the error, and shrealloc -returns a null pointer. If the space cannot be allocated, the block to which ptr points to is -unchanged. -.PP -The shmalloc, shfree, and shrealloc functions are provided so that multiple PEs in an -application can allocate symmetric, remotely accessible memory blocks. These memory -blocks can then be used with (shmem) communication routines. Each of these functions call -the \fIshmem_barrier_all\fP(3) -function before returning; this ensures that all PEs -participate in the memory allocation, and that the memory on other PEs can be used as soon -as the local PE returns. -.PP -The user is responsible for calling these functions with identical argument(s) on all PEs; if -differing size arguments are used, subsequent calls may not return the same symmetric heap -address on all PEs. -.PP -.SH NOTES - -The total size of the symmetric heap is determined at job startup. One can adjust the size of -the heap using the SHMEM_SYMMETRIC_HEAP_SIZE environment variable. See the -\fIintro_shmem\fP(3) -man page for futher details. -The shmalloc, shfree, and shrealloc functions differ from the private heap allocation functions -in that all PEs in an application must call them (a barrier is used to ensure this). -.PP -.SH RETURN VALUES - -The \fBshmalloc\fP -function returns a pointer to the allocated space (which should -be identical on all PEs); otherwise, it returns a null pointer (with malloc_error set). -The \fBshfree\fP -function returns no value. -The \fBshrealloc\fP -function returns a pointer to the allocated space (which -may have moved); otherwise, it returns a null pointer (with malloc_error set). -.SH SEE ALSO - -\fIintro_shmem\fP(3), -\fImy_pe\fP(3I), -\fIstart_pes\fP(3) diff --git a/oshmem/shmem/man/man3/shmem_addr_accessible.3in b/oshmem/shmem/man/man3/shmem_addr_accessible.3in deleted file mode 100644 index d04b6d5cea1..00000000000 --- a/oshmem/shmem/man/man3/shmem_addr_accessible.3in +++ /dev/null @@ -1,56 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright (c) 2015 University of Houston. All rights reserved. -.\" Copyright (c) 2015 Mellanox Technologies, Inc. -.\" $COPYRIGHT$ -.de Vb -.ft CW -.nf -.. -.de Ve -.ft R - -.fi -.. -.TH "SHMEM\\_ADDR\\_ACCESSIBLE" "3" "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME - -shmem_addr_accessible \- Indicates if an address is accessible via OpenSHMEM operations -from the specified remote PE. -.SH SYNOPSIS - -C or C++: -.Vb -#include - -int shmem_addr_accessible(const void *addr, int pe); -.Ve -Fortran: -.Vb -INCLUDE "mpp/shmem.fh" - -LOGICAL LOG, SHMEM_ADDR_ACCESSIBLE -INTEGER pe - -LOG = SHMEM_ADDR_ACCESSIBLE(addr, pe) -.Ve -.SH DESCRIPTION - -shmem_addr_accessible is a query function that indicates whether a local address is -accessible via SHMEM operations from the specified remote PE. -.PP -This function verifies that the remote PE is accessible via SHMEM data transfer functions from -the local PE, and that the specified address is in a symmetric data segment with respect to the -remote PE. -.PP -.SH RETURN VALUES - -C: The return value is 1 if addr is a symmetric data object and accessible via SHMEM -operations from the specified remote PE; otherwise, it is 0. -.PP -Fortran: The return value is \&.TRUE. if addr is a symmetric data object and accessible via -SHMEM operations from the specified remote PE; otherwise, it is \&.FALSE.. -.PP -.SH SEE ALSO - -\fIintro_shmem\fP(3), -\fIshmem_pe_accessible\fP(3) diff --git a/oshmem/shmem/man/man3/shmem_align.3in b/oshmem/shmem/man/man3/shmem_align.3in deleted file mode 100644 index 6cdb8014e9e..00000000000 --- a/oshmem/shmem/man/man3/shmem_align.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/shmem_malloc.3 diff --git a/oshmem/shmem/man/man3/shmem_alltoall32.3in b/oshmem/shmem/man/man3/shmem_alltoall32.3in deleted file mode 100644 index c2f08d65847..00000000000 --- a/oshmem/shmem/man/man3/shmem_alltoall32.3in +++ /dev/null @@ -1,226 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright (c) 2016 Mellanox Technologies, Inc. -.\" $COPYRIGHT$ -.de Vb -.ft CW -.nf -.. -.de Ve -.ft R - -.fi -.. -.TH "SHMEM\\_ALLTOALL" "3" "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME - -\fIshmem_alltoall32\fP(3), -\fIshmem_alltoall64\fP(3), -\fIshmem_alltoalls32\fP(3), -\fIshmem_alltoalls64\fP(3) -\- collective routine where each PE exchanges a fixed amount of data with all -other PEs in the Active set -.SH SYNOPSIS - -C or C++: -.Vb -#include - -void shmem_alltoall32(void *target, const void *source, - size_t nelems, int PE_start, int logPE_stride, - int PE_size, long *pSync); - -void shmem_alltoall64(void *target, const void *source, - size_t nelems, int PE_start, int logPE_stride, - int PE_size, long *pSync); - -void shmem_alltoalls32(void *target, const void *source, - ptrdiff_t dst, ptrdiff_t sst, - size_t nelems, int PE_start, int logPE_stride, - int PE_size, long *pSync); - -void shmem_alltoalls64(void *target, const void *source, - ptrdiff_t dst, ptrdiff_t sst, - size_t nelems, int PE_start, int logPE_stride, - int PE_size, long *pSync); -.Ve -Fortran: -.Vb -INCLUDE "mpp/shmem.fh" - -INTEGER dst, sst, nelems, PE_root, PE_start, logPE_stride, PE_size -INTEGER pSync(SHMEM_ALLTOALL_SYNC_SIZE) - -CALL SHMEM_ALLTOALL32(target, source, nelems, -& PE_start, logPE_stride, PE_size, fIpSync) - -CALL SHMEM_ALLTOALL64(target, source, nelems, -& PE_start, logPE_stride, PE_size, pSync) - -CALL SHMEM_ALLTOALLS32(target, source, dst, sst, nelems, -& PE_start, logPE_stride, PE_size, pSync) - -CALL SHMEM_ALLTOALLS64(target, source, dst, sst, nelems, -& PE_start, logPE_stride, PE_size, pSync) -.Ve -.SH DESCRIPTION - -.PP -The shmem_alltoalls routines are collective routines. Each PE in the Active set exchanges nelems strided -data elements of size 32 bits (for shmem_alltoalls32) or 64 bits (for shmem_alltoalls64) with all other PEs -in the set. Both strides, dst and sst, must be greater than or equal to 1. The sst*jth block sent from PE i to -PE j is placed in the dst*ith block of the dest data object on PE j. -As with all OpenSHMEM collective routines, these routines assume that only PEs in the Active set call the -routine. If a PE not in the Active set calls an OpenSHMEM collective routine, undefined behavior results. -The values of arguments dst, sst, nelems, PE_start, logPE_stride, and PE_size must be equal on all PEs in -the Active set. The same dest and source data objects, and the same pSync work array must be passed to all -PEs in the Active set. -Before any PE calls to a shmem_alltoalls routine, the following conditions must exist (synchronization via -a barrier or some other method is often needed to ensure this): The pSync array on all PEs in the Active set -is not still in use from a prior call to a shmem_alltoalls routine. The dest data object on all PEs in the -Active set is ready to accept the shmem_alltoalls data. -Upon return from a shmem_alltoalls routine, the following is true for the local PE: Its dest symmetric data -object is completely updated and the data has been copied out of the source data object. The values in the -pSync array are restored to the original values. -.PP -The arguments are as follows: -.TP -A symmetric data object with one of the following data types: -.RS -.TP -\fBshmem_alltoall32\fP: Any noncharacter type that -has an element size of 32 bits. No Fortran derived types or C/C++ structures are allowed. -.TP -\fBshmem_alltoall64\fP: Any noncharacter type that has an element size -of 64 bits. No Fortran derived types or C/C++ structures are allowed. -.RE -.RS -.PP -.RE -target -A symmetric data object large enough to receive the combined total of -nelems elements from each PE in the Active set. -.TP -source -A symmetric data object that contains nelems elements of data for each -PE in the Active set, ordered according to destination PE. -.TP -dst -The stride between consecutive elements of the dest data object. The -stride is scaled by the element size. A value of 1 indicates contiguous -data. dst must be of type ptrdiff_t. If you are using Fortran, it must be -a default integer value. -.TP -sst -The stride between consecutive elements of the source data object. The -stride is scaled by the element size. A value of 1 indicates contiguous -data. sst must be of type ptrdiff_t. If you are using Fortran, it must be a -default integer value. -.TP -nelems -The number of elements to exchange for each PE. nelems must be of -type size_t for C/C++. If you are using Fortran, it must be a default -integer value -.TP -PE_start -The lowest virtual PE number of the active set of PEs. PE_start must be of -type integer. If you are using Fortran, it must be a default integer value. -.TP -logPE_stride -The log (base 2) of the stride between consecutive virtual PE numbers in -the active set. log_PE_stride must be of type integer. If you are using Fortran, it must be a -default integer value. -.TP -PE_size -The number of PEs in the active set. PE_size must be of type integer. If you -are using Fortran, it must be a default integer value. -.PP -.TP -pSync -A symmetric work array. In C/C++, pSync must be of type long and size -_SHMEM_ALLTOALL_SYNC_SIZE. -In Fortran, pSync must be of type integer and size SHMEM_ALLTOALL_SYNC_SIZE. Every -element of this array must be initialized with the value _SHMEM_SYNC_VALUE (in C/C++) -or SHMEM_SYNC_VALUE (in Fortran) before any of the PEs in the active set enter -shmem_barrier(). -.PP -Upon return from a shmem_alltoalls routine, the following is true for the local PE: Its dest -symmetric data object is completely updated and the data has been copied out of the source -data object. The values in the pSync array are restored to the original values. -.PP -The values of arguments PE_root, PE_start, logPE_stride, and PE_size must be equal on -all PEs in the active set. The same target and source data objects and the same pSync work -array must be passed to all PEs in the active set. -.PP -Before any PE calls a alltoall routine, you must ensure that the following conditions exist -(synchronization via a barrier or some other method is often needed to ensure this): The -pSync array on all PEs in the active set is not still in use from a prior call to a alltoall -routine. The target array on all PEs in the active set is ready to accept the alltoall data. -.SH NOTES - -The terms collective and symmetric are defined in \fIintro_shmem\fP(3)\&. -.PP -All SHMEM alltoall routines restore pSync to its original contents. Multiple calls to SHMEM -routines that use the same pSync array do not require that pSync be reinitialized after the -first call. -.PP -You must ensure the that the pSync array is not being updated by any PE in the active set -while any of the PEs participates in processing of a SHMEM broadcast routine. Be careful to -avoid these situations: If the pSync array is initialized at run time, some type of -synchronization is needed to ensure that all PEs in the working set have initialized pSync -before any of them enter a SHMEM routine called with the pSync synchronization array. A -pSync array may be reused on a subsequent SHMEM broadcast routine only if none of the PEs -in the active set are still processing a prior SHMEM alltoall routine call that used the same -pSync array. In general, this can be ensured only by doing some type of synchronization. -However, in the special case of SHMEM routines being called with the same active set, you -can allocate two pSync arrays and alternate between them on successive calls. -.PP -.SH EXAMPLES - -.PP -C/C++ example: -.Vb -#include -#include -long pSync[SHMEM_ALLTOALL_SYNC_SIZE]; -int main(void) -{ -int64_t *source, *dest; -int i, count, pe; -shmem_init(); -count = 2; -dest = (int64_t*) shmem_malloc(count * shmem_n_pes() * sizeof(int64_t)); -source = (int64_t*) shmem_malloc(count * shmem_n_pes() * sizeof(int64_t)); -/* assign source values */ -for (pe=0; pe - -void shmem_barrier(int PE_start, int logPE_stride, int PE_size, - long *pSync); -.Ve -Fortran: -.Vb -INCLUDE "mpp/shmem.fh" - -INTEGER PE_start, logPE_stride, PE_size -INTEGER pSync(SHMEM_BARRIER_SYNC_SIZE) - -CALL SHMEM_BARRIER(PE_start, logPE_stride, PE_size, pSync) -.Ve -.SH DESCRIPTION - -The shmem_barrier routine does not return until the subset of PEs specified by -\fBPE_start\fP, -\fBlogPE_stride\fP -and \fBPE_size\fP, -has entered this routine at the -same point of the execution path. -.PP -As with all SHMEM collective routines, each of these routines assumes that only PEs in the -active set call the routine. If a PE not in the active set calls a SHMEM collective routine, -undefined behavior results. -.PP -The arguments are as follows: -.TP -PE_start -The lowest virtual PE number of the active set of PEs. PE_start must be of -type integer. If you are using Fortran, it must be a default integer value. -.TP -logPE_stride -The log (base 2) of the stride between consecutive virtual PE numbers in -the active set. logPE_stride must be of type integer. If you are using Fortran, it must be a -default integer value. -.TP -PE_size -The number of PEs in the active set. PE_size must be of type integer. If you -are using Fortran, it must be a default integer value. -.TP -pSync -A symmetric work array. In C/C++, pSync must be of type int and size -_SHMEM_BARRIER_SYNC_SIZE. In Fortran, pSync must be of type integer and size -SHMEM_BARRIER_SYNC_SIZE. If you are using Fortran, it must be a default integer type. -Every element of this array must be initialized to 0 before any of the PEs in the active set enter -shmem_barrier the first time. -.PP -The values of arguments PE_start, logPE_stride, and PE_size must be equal on all PEs in the -active set. The same work array must be passed in pSync to all PEs in the active set. -.PP -shmem_barrier ensures that all previously issued local stores and previously issued remote -memory updates done by any of the PEs in the active set (by using SHMEM calls, for -example \fIshmem_put\fP(3)) -are complete before returning. -.PP -The same pSync array may be reused on consecutive calls to shmem_barrier if the same -active PE set is used. -.PP -.SH NOTES - -The term symmetric is defined in \fIintro_shmem\fP(3)\&. -.PP -If the pSync array is initialized at run time, be sure to use some type of synchronization, for -example, a call to \fIshmem_barrier_all\fP(3), -before calling shmem_barrier for the first -time. -.PP -If the active set does not change, shmem_barrier can be called repeatedly with the same -pSync array. No additional synchronization beyond that implied by shmem_barrier itself is -necessary in this case. -.PP -.SH EXAMPLES - -C/C++ example: -.Vb -shmem_barrier(PE_start, logPE_stride, size, pSync); -.Ve -Fortran example: -.Vb -INTEGER PSYNC(SHMEM_BARRIER_SYNC_SIZE) -INTEGER PE_START, LOGPE_STRIDE, PE_SIZE, PSYNC -DATA PSYNC /SHMEM_BARRIER_SYNC_SIZE*0/ - -CALL SHMEM_BARRIER(PE_START, LOGPE_STRIDE, PE_SIZE, PSYNC) -.Ve -.SH SEE ALSO - -\fIintro_shmem\fP(3), -\fIshmem_barrier_all\fP(3) diff --git a/oshmem/shmem/man/man3/shmem_barrier_all.3in b/oshmem/shmem/man/man3/shmem_barrier_all.3in deleted file mode 100644 index a530b63e7ae..00000000000 --- a/oshmem/shmem/man/man3/shmem_barrier_all.3in +++ /dev/null @@ -1,59 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright (c) 2015 University of Houston. All rights reserved. -.\" Copyright (c) 2015 Mellanox Technologies, Inc. -.\" $COPYRIGHT$ -.de Vb -.ft CW -.nf -.. -.de Ve -.ft R - -.fi -.. -.TH "SHMEM\\_BARRIER\\_ALL" "3" "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME - -shmem_barrier_all \- Suspends the execution of the calling PE until all other PEs issue a call -to this particular shmem_barrier_all() statement. -.SH SYNOPSIS - -C or C++: -.Vb -#include - -void shmem_barrier_all(void); -.Ve -Fortran: -.Vb -include 'mpp/shmem.h' - -CALL SHMEM_BARRIER_ALL -.Ve -.SH DESCRIPTION - -The shmem_barrier_all routine does not return until all other PEs have entered this routine -at the same point of the execution path. -.PP -Prior to synchronizing with other PEs, shmem_barrier_all ensures completion of all -previously issued local memory stores and remote memory updates issued via SHMEM -functions such as \fIshmem_put32\fP(3)\&. -.PP -.SH EXAMPLES - -.Vb -setup_data() -{ - if (shmem_my_pe() == 0) { - setup(); - } - - /* All PEs wait for PE 0 to complete setup(). */ - shmem_barrier_all(); -} -.Ve -.PP -.SH SEE ALSO - -\fIshmem_barrier\fP(3), -\fIshmem_init\fP(3) diff --git a/oshmem/shmem/man/man3/shmem_broadcast32.3in b/oshmem/shmem/man/man3/shmem_broadcast32.3in deleted file mode 100644 index abb38e7952e..00000000000 --- a/oshmem/shmem/man/man3/shmem_broadcast32.3in +++ /dev/null @@ -1,186 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright (c) 2015 University of Houston. All rights reserved. -.\" Copyright (c) 2015 Mellanox Technologies, Inc. -.\" $COPYRIGHT$ -.de Vb -.ft CW -.nf -.. -.de Ve -.ft R - -.fi -.. -.TH "SHMEM\\_BROADCAST" "3" "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME - -\fIshmem_broadcast4\fP(3), -\fIshmem_broadcast8\fP(3), -\fIshmem_broadcast32\fP(3), -\fIshmem_broadcast64\fP(3) -\- Copy a data object from a designated PE to a target -location on all other PEs of the active set. -.SH SYNOPSIS - -C or C++: -.Vb -#include - -void shmem_broadcast32(void *target, const void *source, - size_t nelems, int PE_root, int PE_start, int logPE_stride, - int PE_size, long *pSync); - -void shmem_broadcast64(void *target, const void *source, - size_t nelems, int PE_root, int PE_start, int logPE_stride, - int PE_size, long *pSync); -.Ve -Fortran: -.Vb -INCLUDE "mpp/shmem.fh" - -INTEGER nelems, PE_root, PE_start, logPE_stride, PE_size -INTEGER pSync(SHMEM_BCAST_SYNC_SIZE) - -CALL SHMEM_BROADCAST4(target, source, nelems, PE_root, -& PE_start, logPE_stride, PE_size, fIpSync) - -CALL SHMEM_BROADCAST8(target, source, nelems, PE_root, -& PE_start, logPE_stride, PE_size, pSync) - -CALL SHMEM_BROADCAST32(target, source, nelems, -& PE_root, PE_start, logPE_stride, PE_size, pSync) - -CALL SHMEM_BROADCAST64(target, source, nelems, -& PE_root, PE_start, logPE_stride, PE_size, pSync) -.Ve -.SH DESCRIPTION - -The broadcast routines write the data at address source of the PE specified by -\fBPE_root\fP -to address \fBtarget\fP -on all other PEs in the active set. The active set of -PEs is defined by the triplet \fBPE_start\fP, -\fBlogPE_stride\fP -and \fBPE_size\fP\&. -The data is not copied to the target address on the PE specified by \fBPE_root\fP\&. -Before returning, the broadcast routines ensure that the elements of the pSync array are -restored to their initial values. -.PP -As with all SHMEM collective routines, each of these routines assumes that only PEs in the -active set call the routine. If a PE not in the active set calls a SHMEM collective routine, -undefined behavior results. -.PP -The arguments are as follows: -.TP -target -A symmetric data object with one of the following data types: -.RS -.TP -\fBshmem_broadcast8, shmem_broadcast64\fP: Any noncharacter type that -has an element size of 64 bits. No Fortran derived types or C/C++ structures are allowed. -.TP -\fBshmem_broadcast32\fP: Any noncharacter type that has an element size -of 32 bits. No Fortran derived types or C/C++ structures are allowed. -.TP -\fBshmem_broadcast4\fP: Any noncharacter type that has an element size -of 32 bits. -.RE -.RS -.PP -.RE -.TP -source -A symmetric data object that can be of any data type that is permissible for the -target argument. -.TP -nelems -The number of elements in source. For shmem_broadcast32 and -shmem_broadcast4, this is the number of 32\-bit halfwords. nelems must be of type integer. -If you are using Fortran, it must be a default integer value. -.TP -PE_root -Zero\-based ordinal of the PE, with respect to the active set, from which the -data is copied. Must be greater than or equal to 0 and less than PE_size. PE_root must be of -type integer. If you are using Fortran, it must be a default integer value. -.TP -PE_start -The lowest virtual PE number of the active set of PEs. PE_start must be of -type integer. If you are using Fortran, it must be a default integer value. -.TP -logPE_stride -The log (base 2) of the stride between consecutive virtual PE numbers in -the active set. log_PE_stride must be of type integer. If you are using Fortran, it must be a -default integer value. -.TP -PE_size -The number of PEs in the active set. PE_size must be of type integer. If you -are using Fortran, it must be a default integer value. -.PP -.TP -pSync -A symmetric work array. In C/C++, pSync must be of type long and size -_SHMEM_BCAST_SYNC_SIZE. -In Fortran, pSync must be of type integer and size SHMEM_BCAST_SYNC_SIZE. Every -element of this array must be initialized with the value _SHMEM_SYNC_VALUE (in C/C++) -or SHMEM_SYNC_VALUE (in Fortran) before any of the PEs in the active set enter -shmem_barrier(). -.PP -The values of arguments PE_root, PE_start, logPE_stride, and PE_size must be equal on -all PEs in the active set. The same target and source data objects and the same pSync work -array must be passed to all PEs in the active set. -.PP -Before any PE calls a broadcast routine, you must ensure that the following conditions exist -(synchronization via a barrier or some other method is often needed to ensure this): The -pSync array on all PEs in the active set is not still in use from a prior call to a broadcast -routine. The target array on all PEs in the active set is ready to accept the broadcast data. -.PP -Upon return from a broadcast routine, the following are true for the local PE: If the current PE -is not the root PE, the target data object is updated. The values in the pSync array are -restored to the original values. -.SH NOTES - -The terms collective and symmetric are defined in \fIintro_shmem\fP(3)\&. -.PP -All SHMEM broadcast routines restore pSync to its original contents. Multiple calls to SHMEM -routines that use the same pSync array do not require that pSync be reinitialized after the -first call. -.PP -You must ensure the that the pSync array is not being updated by any PE in the active set -while any of the PEs participates in processing of a SHMEM broadcast routine. Be careful to -avoid these situations: If the pSync array is initialized at run time, some type of -synchronization is needed to ensure that all PEs in the working set have initialized pSync -before any of them enter a SHMEM routine called with the pSync synchronization array. A -pSync array may be reused on a subsequent SHMEM broadcast routine only if none of the PEs -in the active set are still processing a prior SHMEM broadcast routine call that used the same -pSync array. In general, this can be ensured only by doing some type of synchronization. -However, in the special case of SHMEM routines being called with the same active set, you -can allocate two pSync arrays and alternate between them on successive calls. -.PP -.SH EXAMPLES - -In the following examples, the call to shmem_broadcast64 copies source on PE 4 to target -on PEs 5, 6, and 7. -.PP -C/C++ example: -.Vb -for (i=0; i < _SHMEM_BCAST_SYNC_SIZE; i++) { - pSync[i] = _SHMEM_SYNC_VALUE; -} -shmem_barrier_all(); /* Wait for all PEs to initialize pSync */ -shmem_broadcast64(target, source, nelems, 0, 4, 0, 4, pSync); -.Ve -Fortran example: -.Vb -INTEGER PSYNC(SHMEM_BCAST_SYNC_SIZE) -INTEGER TARGET, SOURCE, NELEMS, PE_ROOT, PE_START, -& LOGPE_STRIDE, PE_SIZE, PSYNC -COMMON /COM/ TARGET, SOURCE -DATA PSYNC /SHMEM_BCAST_SYNC_SIZE*SHMEM_SYNC_VALUE/ - -CALL SHMEM_BROADCAST64(TARGET, SOURCE, NELEMS, 0, 4, 0, 4, -& PSYNC) -.Ve -.PP -.SH SEE ALSO - -\fIintro_shmem\fP(3) diff --git a/oshmem/shmem/man/man3/shmem_broadcast64.3in b/oshmem/shmem/man/man3/shmem_broadcast64.3in deleted file mode 100644 index c9b171b335c..00000000000 --- a/oshmem/shmem/man/man3/shmem_broadcast64.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/shmem_broadcast32.3 diff --git a/oshmem/shmem/man/man3/shmem_char_g.3in b/oshmem/shmem/man/man3/shmem_char_g.3in deleted file mode 100644 index 512444a1638..00000000000 --- a/oshmem/shmem/man/man3/shmem_char_g.3in +++ /dev/null @@ -1,64 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright (c) 2015 University of Houston. All rights reserved. -.\" Copyright (c) 2015 Mellanox Technologies, Inc. -.\" $COPYRIGHT$ -.de Vb -.ft CW -.nf -.. -.de Ve -.ft R - -.fi -.. -.TH "SHMEM\\_CHAR\\_G" "3" "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME - -\fIshmem_char_g\fP(3), -\fIshmem_float_g\fP(3), -\fIshmem_int_g\fP(3), -\fIshmem_long_g\fP(3), -\fIshmem_short_g\fP(3), -\fIshmem_longlong_g\fP(3), -\fIshmem_longdouble_g\fP(3) -\- These routines provide a low latency mechanism to read basic types (char, short, int, float, double, long, long long, long double) from symmetric data objects on remote PEs. -.SH SYNOPSIS - -C or C++: -.Vb -#include - - -char shmem_char_g(const char *addr, int pe); - -short shmem_short_g(const short *addr, int pe); - -int shmem_int_g(const int *addr, int pe); - -long shmem_long_g(const long *addr, int pe); - -long shmem_longlong_g(const long long *addr, int pe); - -float shmem_float_g(const float *addr, int pe); - -double shmem_double_g(const double *addr, int pe); - -long shmem_longdouble_g(const long double *addr, int pe); - -.Ve -.SH DESCRIPTION - -These routines provide a very low latency get capability for single elements of most basic types. -.PP -The arguments are as follows: -.TP -addr -The remotely accessible array element or scalar data object which will receive the -data on the remote PE. -.TP -pe -The number of the remote PE. -.SH SEE ALSO - -\fIintro_shmem\fP(3), -\fIshmem_get\fP(3) diff --git a/oshmem/shmem/man/man3/shmem_char_get.3in b/oshmem/shmem/man/man3/shmem_char_get.3in deleted file mode 100644 index 80910049200..00000000000 --- a/oshmem/shmem/man/man3/shmem_char_get.3in +++ /dev/null @@ -1,207 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright (c) 2015 University of Houston. All rights reserved. -.\" Copyright (c) 2015 Mellanox Technologies, Inc. -.\" $COPYRIGHT$ -.de Vb -.ft CW -.nf -.. -.de Ve -.ft R - -.fi -.. -.TH "SHMEM\\_GET" "3" "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME - -\fIshmem_character_get\fP(3), -\fIshmem_complex_get\fP(3), -\fIshmem_double_get\fP(3), -\fIshmem_float_get\fP(3), -\fIshmem_get4\fP(3), -\fIshmem_get8\fP(3), -\fIshmem_get32\fP(3), -\fIshmem_get64\fP(3), -\fIshmem_get128\fP(3), -\fIshmem_getmem\fP(3), -\fIshmem_int_get\fP(3), -\fIshmem_integer_get\fP(3), -\fIshmem_logical_get\fP(3), -\fIshmem_long_get\fP(3), -\fIshmem_longdouble_get\fP(3), -\fIshmem_longlong_get\fP(3), -\fIshmem_real_get\fP(3), -\fIshmem_short_get\fP(3) -\- Transfers data from a specified processing element (PE). -.SH SYNOPSIS - -C or C++: -.Vb -#include - -void shmem_get32(void *target, const void *source, - size_t len, int pe); - -void shmem_get64(void *target, const void *source, - size_t len, int pe); - -void shmem_get128(void *target, const void *source, - size_t len, int pe); - -void shmem_getmem(void *target, const void *source, - size_t len, int pe); - -void shmem_int_get(int *target, const int *source, - size_t len, int pe); - -void shmem_double_get(double *target, const double *source, - size_t len, int pe); - -void shmem_float_get(float *target, const float *source, - size_t len, int pe); - -void shmem_long_get(long *target, const long *source, - size_t len, int pe); - -void shmem_longdouble_get(long double *target, - const long double *source, size_t len, int pe); - -void shmem_longlong_get(long long *target, - const long long *source, size_t len, int pe); - -void shmem_short_get(short *target, - const short *source, size_t len, int pe); -.Ve -Fortran: -.Vb -INCLUDE "mpp/shmem.fh" - -INTEGER len, pe - -CALL SHMEM_CHARACTER_GET(target, source, len, pe) - -CALL SHMEM_COMPLEX_GET(target, source, len, pe) - -CALL SHMEM_DOUBLE_GET(target, source, len, pe) - -CALL SHMEM_GET4(target, source, len, pe) - -CALL SHMEM_GET8(target, source, len, pe) - -CALL SHMEM_GET32(target, source, len, pe) - -CALL SHMEM_GET64(target, source, len, pe) - -CALL SHMEM_GET128(target, source, len, pe) - -CALL SHMEM_GETMEM(target, source, len, pe) - -CALL SHMEM_INTEGER_GET(target, source, len, pe) - -CALL SHMEM_LOGICAL_GET(target, source, len, pe) - -CALL SHMEM_REAL_GET(target, source, len, pe) -.Ve -.SH DESCRIPTION - -The shmem_get routines transfer \fBnelems\fP -elements of the data object at address \fBsource\fP -on the remote PE \fBpe\fP, -to the data object at address \fBtarget\fP -on the local PE. These routines -return after the data has been copied to address \fBtarget\fP -on the local PE. -.PP -The arguments are as follows: -.TP -target -Local data object to be updated. -.TP -source -Data object on the PE identified by pe that contains the data to be copied. This -data object must be remotely accessible. -.TP -len -Number of elements in the target and source arrays. len must be of type integer. If -you are using Fortran, it must be a constant, variable, or array element of default -integer type. -.TP -pe -PE number of the remote PE. pe must be of type integer. If you are using Fortran, it -must be a constant, variable, or array element of default integer type. -.PP -The target and source data objects must conform to typing constraints, which are as follows: -.TP -\fBshmem_getmem\fP: Fortran: Any noncharacter type. C: Any data type. len is -scaled in bytes. -.TP -\fBshmem_get4, shmem_get32\fP: Any noncharacter type that has a storage size -equal to 32 bits. -.TP -{shmem_get8, shmem_get64}: Any noncharacter type that has a storage size equal to -64 bits. -.TP -\fBshmem_get128\fP: Any noncharacter type that has a storage size equal to 128 -bits. -.TP -\fBshmem_short_get\fP: Elements of type short. -.TP -\fBshmem_int_get\fP: Elements of type int. -.TP -\fBshmem_long_get\fP: Elements of type long. -.TP -\fBshmem_longlong_get\fP: Elements of type long long. -.TP -\fBshmem_float_get\fP: Elements of type float. -.TP -\fBshmem_double_get\fP: Elements of type double. -.TP -\fBshmem_longdouble_get\fP: Elements of type long double. -.TP -\fBSHMEM_CHARACTER_GET\fP: Elements of type character. len is the number of -characters to transfer. The actual character lengths of the source and target variables are -ignored. -.TP -\fBSHMEM_COMPLEX_GET\fP: Elements of type complex of default size. -.TP -\fBSHMEM_DOUBLE_GET\fP: (Fortran) Elements of type double precision. -.TP -\fBSHMEM_INTEGER_GET\fP: Elements of type integer. -.TP -\fBSHMEM_LOGICAL_GET\fP: Elements of type logical. -.TP -\fBSHMEM_REAL_GET\fP: Elements of type real. -.PP -If you are using Fortran, data types must be of default size. For example, a real variable must -be declared as REAL, REAL*4, or REAL(KIND=4). -.SH NOTES - -See \fIintro_shmem\fP(3) -for a definition of the term remotely accessible. -.SH EXAMPLES - -Consider this simple example for Fortran. -.Vb -PROGRAM REDUCTION - REAL VALUES, SUM - COMMON /C/ VALUES - REAL WORK - - CALL START_PES(0) ! ALLOW ANY NUMBER OF PES - VALUES = MY_PE() ! INITIALIZE IT TO SOMETHING - CALL SHMEM_BARRIER_ALL - SUM = 0.0 - DO I = 0,NUM_PES()\-1 - CALL SHMEM_REAL_GET(WORK, VALUES, 1, I) - SUM = SUM + WORK - ENDDO - PRINT *, 'PE ', MY_PE(), ' COMPUTED SUM=', SUM - CALL SHMEM_BARRIER_ALL -END -.Ve -.SH SEE ALSO - -\fIintro_shmem\fP(3), -\fIshmem_put\fP(3), -\fIshmem_iget\fP(3), -\fIshmem_quiet\fP(3) diff --git a/oshmem/shmem/man/man3/shmem_char_get_nbi.3in b/oshmem/shmem/man/man3/shmem_char_get_nbi.3in deleted file mode 100644 index 50449e90a6d..00000000000 --- a/oshmem/shmem/man/man3/shmem_char_get_nbi.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/shmem_getmem_nbi.3 diff --git a/oshmem/shmem/man/man3/shmem_char_p.3in b/oshmem/shmem/man/man3/shmem_char_p.3in deleted file mode 100644 index 3d122b2d9cc..00000000000 --- a/oshmem/shmem/man/man3/shmem_char_p.3in +++ /dev/null @@ -1,73 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright (c) 2015 University of Houston. All rights reserved. -.\" Copyright (c) 2015 Mellanox Technologies, Inc. -.\" $COPYRIGHT$ -.de Vb -.ft CW -.nf -.. -.de Ve -.ft R - -.fi -.. -.TH "SHMEM\\_CHAR\\_P" "3" "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME - -\fIshmem_char_p\fP(3), -\fIshmem_float_p\fP(3), -\fIshmem_int_p\fP(3), -\fIshmem_long_p\fP(3), -\fIshmem_short_p\fP(3), -\fIshmem_longlong_p\fP(3), -\fIshmem_longdouble_p\fP(3) -\- These routines provide a low latency mechanism to write basic types (char, short, int, float, double, long, long long, long double) to symmetric data objects on remote PEs. -.SH SYNOPSIS - -C or C++: -.Vb -#include - - -void shmem_char_p(char *addr, char value, int pe); - -void shmem_short_p(short *addr, short value, int pe); - -void shmem_int_p(int *addr, int value, int pe); - -void shmem_long_p(long *addr, long value, int pe); - -void shmem_longlong_p(long long *addr, long long value, int pe); - -void shmem_float_p(float *addr, float value, int pe); - -void shmem_double_p(double *addr, double value, int pe); - -void shmem_longdouble_p(long double *addr, long double value, int pe); - -.Ve -.SH DESCRIPTION - -These routines provide a very low latency put capability for single elements of most basic types. -.PP -The arguments are as follows: -.TP -addr -The remotely accessible array element or scalar data object which will receive the -data on the remote PE. -.TP -value -The value to be transferred to addr on the remote PE. -.TP -pe -The number of the remote PE. -.PP -As with \fIshmem_put\fP(3), -these functions start the remote transfer and may return before -the data is delivered to the remote PE. Use \fIshmem_quiet\fP(3) -to force completion of all -remote PUT transfers. -.SH SEE ALSO - -\fIintro_shmem\fP(3), -\fIshmem_put\fP(3) diff --git a/oshmem/shmem/man/man3/shmem_char_put.3in b/oshmem/shmem/man/man3/shmem_char_put.3in deleted file mode 100644 index fca93b5ff90..00000000000 --- a/oshmem/shmem/man/man3/shmem_char_put.3in +++ /dev/null @@ -1,214 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright (c) 2015 University of Houston. All rights reserved. -.\" Copyright (c) 2015 Mellanox Technologies, Inc. -.\" $COPYRIGHT$ -.de Vb -.ft CW -.nf -.. -.de Ve -.ft R - -.fi -.. -.TH "SHMEM\\_PUT" "3" "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME - -\fIshmem_character_put\fP(3), -\fIshmem_complex_put\fP(3), -\fIshmem_double_put\fP(3), -\fIshmem_float_put\fP(3), -\fIshmem_int_put\fP(3), -\fIshmem_integer_put\fP(3), -\fIshmem_logical_put\fP(3), -\fIshmem_long_put\fP(3), -\fIshmem_longdouble_put\fP(3), -\fIshmem_longlong_put\fP(3), -\fIshmem_put4\fP(3), -\fIshmem_put8\fP(3), -\fIshmem_put32\fP(3), -\fIshmem_put64\fP(3), -\fIshmem_put128\fP(3), -\fIshmem_putmem\fP(3), -\fIshmem_real_put\fP(3), -\fIshmem_short_put\fP(3) -\- Transfers data to a specified -processing element (PE) -.SH SYNOPSIS - -C or C++: -.Vb -#include - -void shmem_double_put(double *target, const double *source, - size_t len, int pe); - -void shmem_float_put(float *target, const float *source, - size_t len, int pe); - -void shmem_int_put(int *target, const int *source, size_t len, - int pe); - -void shmem_long_put(long *target, const long *source, - size_t len, int pe); - -void shmem_longdouble_put(long double *target, - const long double *source, size_t len, int pe); - -void shmem_longlong_put(long long *target, - const long long *source, size_t len, int pe); - -void shmem_put32(void *target, const void *source, size_t len, - int pe); - -void shmem_put64(void *target, const void *source, size_t len, - int pe); - -void shmem_put128(void *target, const void *source, size_t len, - int pe); - -void shmem_putmem(void *target, const void *source, size_t len, - int pe); - -void shmem_short_put(short *target, const short *source, - size_t len, int pe); -.Ve -Fortran: -.Vb -INCLUDE "mpp/shmem.fh" - -INTEGER len, pe - -CALL SHMEM_CHARACTER_PUT(target, source, len, pe) - -CALL SHMEM_COMPLEX_PUT(target, source, len, pe) - -CALL SHMEM_DOUBLE_PUT(target, source, len, pe) - -CALL SHMEM_INTEGER_PUT(target, source, len, pe) - -CALL SHMEM_LOGICAL_PUT(target, source, len, pe) - -CALL SHMEM_PUT(target, source, len, pe) - -CALL SHMEM_PUT4(target, source, len, pe) - -CALL SHMEM_PUT8(target, source, len, pe) - -CALL SHMEM_PUT32(target, source, len, pe) - -CALL SHMEM_PUT64(target, source, len, pe) - -CALL SHMEM_PUT128(target, source, len, pe) - -CALL SHMEM_PUTMEM(target, source, len, pe) - -CALL SHMEM_REAL_PUT(target, source, len, pe) -.Ve -.SH DESCRIPTION - -These routines transfer \fBnelems\fP -elements of the data object at address -\fBsource\fP -on the calling PE, to the data object at address \fBtarget\fP -on the remote -PE \fBpe\fP\&. -These routines start the remote transfer and may return before the data is -delivered to the remote PE. -.PP -The delivery of data into the data object on the destination PE from different put calls may -occur in any order. Because of this, two successive put operations may deliver data out of -order unless a call to \fIshmem_fence\fP(3) -is introduced between the two calls. -.PP -The arguments are as follows: -.TP -target -Data object to be updated on the remote PE. This data object must be remotely -accessible. -.TP -source -Data object containing the data to be copied. -.TP -len -Number of elements in the target and source arrays. len must be of type integer. If -you are using Fortran, it must be a constant, variable, or array element of default integer -type. -.TP -pe -PE number of the remote PE. pe must be of type integer. If you are using Fortran, it -must be a constant, variable, or array element of default integer type. -.PP -The target and source data objects must conform to certain typing constraints, which are as -follows: -.TP -\fBshmem_putmem\fP: Fortran: Any noncharacter type. C: Any data type. len is scaled in -bytes. -.TP -\fBshmem_put4, shmem_put32:\fP Any noncharacter type that has a storage size -equal to 32 bits. -.TP -\fBshmem_put8, shmem_put64:\fP Any noncharacter type that has a storage size -equal to 64 bits. -.TP -\fBshmem_put128:\fP Any noncharacter type that has a storage size equal to 128 -bits. -.TP -\fBshmem_short_put:\fP Elements of type short. -.TP -\fBshmem_int_put:\fP Elements of type int. -.TP -\fBshmem_long_put:\fP Elements of type long. -.TP -\fBshmem_longlong_put:\fP Elements of type long long. -.TP -\fBshmem_float_put:\fP Elements of type float. -.TP -\fBshmem_double_put:\fP Elements of type double. -.TP -\fBshmem_longdouble_put:\fP Elements of type long double. -.TP -\fBSHMEM_CHARACTER_PUT:\fP Elements of type character. len is the number of -characters to transfer. The actual character lengths of the source and target variables are -ignored. -.TP -\fBSHMEM_COMPLEX_PUT:\fP Elements of type complex of default size. -.TP -\fBSHMEM_DOUBLE_PUT:\fP (Fortran) Elements of type double precision. -.TP -\fBSHMEM_INTEGER_PUT:\fP Elements of type integer. -.TP -\fBSHMEM_LOGICAL_PUT:\fP Elements of type logical. -.TP -\fBSHMEM_REAL_PUT:\fP Elements of type real. -If you are using Fortran, data types must be of default size. For example, a real variable must -be declared as REAL, REAL*4, or REAL(KIND=4). -.PP -.SH EXAMPLES - -The following shmem_put example is for C/C++ programs: -.Vb -#include -#include - -main() -{ - long source[10] = { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 }; - static long target[10]; - shmem_init(); - - if (shmem_my_pe() == 0) { - /* put 10 words into target on PE 1 */ - shmem_long_put(target, source, 10, 1); - } - shmem_barrier_all(); /* sync sender and receiver */ - if (shmem_my_pe() == 1) - shmem_udcflush(); /* not required on Altix systems */ - printf("target[0] on PE %d is %d\\n", shmem_my_pe(), target[0]); -} -.Ve -.SH SEE ALSO - -\fIintro_shmem\fP(3), -\fIshmem_iput\fP(3), -\fIshmem_quiet\fP(3) diff --git a/oshmem/shmem/man/man3/shmem_char_put_nbi.3in b/oshmem/shmem/man/man3/shmem_char_put_nbi.3in deleted file mode 100644 index fb4ad1413b0..00000000000 --- a/oshmem/shmem/man/man3/shmem_char_put_nbi.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/shmem_putmem_nbi.3 diff --git a/oshmem/shmem/man/man3/shmem_clear_cache_inv.3in b/oshmem/shmem/man/man3/shmem_clear_cache_inv.3in deleted file mode 100644 index 4a6a361ef97..00000000000 --- a/oshmem/shmem/man/man3/shmem_clear_cache_inv.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/shmem_udcflush.3 diff --git a/oshmem/shmem/man/man3/shmem_clear_cache_line_inv.3in b/oshmem/shmem/man/man3/shmem_clear_cache_line_inv.3in deleted file mode 100644 index 4a6a361ef97..00000000000 --- a/oshmem/shmem/man/man3/shmem_clear_cache_line_inv.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/shmem_udcflush.3 diff --git a/oshmem/shmem/man/man3/shmem_clear_lock.3in b/oshmem/shmem/man/man3/shmem_clear_lock.3in deleted file mode 100644 index 9a1ee8ffc2e..00000000000 --- a/oshmem/shmem/man/man3/shmem_clear_lock.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/shmem_set_lock.3 diff --git a/oshmem/shmem/man/man3/shmem_collect32.3in b/oshmem/shmem/man/man3/shmem_collect32.3in deleted file mode 100644 index bce6dc5aa00..00000000000 --- a/oshmem/shmem/man/man3/shmem_collect32.3in +++ /dev/null @@ -1,197 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright (c) 2015 University of Houston. All rights reserved. -.\" Copyright (c) 2015 Mellanox Technologies, Inc. -.\" $COPYRIGHT$ -.de Vb -.ft CW -.nf -.. -.de Ve -.ft R - -.fi -.. -.TH "SHMEM\\_COLLECT" "3" "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME - -\fIshmem_collect4\fP(3), -\fIshmem_collect8\fP(3), -\fIshmem_collect32\fP(3), -\fIshmem_collect64\fP(3), -\fIshmem_fcollect\fP(3), -\fIshmem_fcollect4\fP(3), -\fIshmem_fcollect8\fP(3), -\fIshmem_fcollect32\fP(3), -\fIshmem_fcollect64\fP(3) -\- Concatenates blocks of data from multiple processing elements (PEs) to an array in every PE -.SH SYNOPSIS - -C or C++: -.Vb -#include - -void shmem_collect32(void *target, const void *source, - size_t nelems, int PE_start, int logPE_stride, int PE_size, - long *pSync); - -void shmem_collect64(void *target, const void *source, - size_t nelems, int PE_start, int logPE_stride, int PE_size, - long *pSync); - -void shmem_fcollect32(void *target, const void *source, - size_t nelems, int PE_start, int logPE_stride, int PE_size, - long *pSync); - -void shmem_fcollect64(void *target, const void *source, - size_t nelems, int PE_start, int logPE_stride, int PE_size, - long *pSync); -.Ve -Fortran: -.Vb -INCLUDE "mpp/shmem.fh" - -INTEGER nelems -INTEGER PE_start, logPE_stride, PE_size -INTEGER pSync(SHMEM_COLLECT_SYNC_SIZE) - -CALL SHMEM_COLLECT4(target, source, nelems, PE_start, -& logPE_stride, PE_size, pSync) - -CALL SHMEM_COLLECT8(target, source, nelems, PE_start, -& logPE_stride, PE_size, pSync) - -CALL SHMEM_FCOLLECT4(target, source, nelems, PE_start, -& logPE_stride, PE_size, pSync) - -CALL SHMEM_FCOLLECT8(target, source, nelems, PE_start, -& logPE_stride, PE_size, pSync) -.Ve -.SH DESCRIPTION - -The shared memory (SHMEM) collect and fcollect routines concatenate nelems 64\-bit or 32\-bit -data items from the source array into the target array, over the set of PEs defined by -PE_start, log2PE_stride, and PE_size, in processor number order. The resultant target array -contains the contribution from PE PE_start first, then the contribution from PE PE_start + -PE_stride second, and so on. The collected result is written to the target array for all PEs in -the active set. -.PP -The fcollect routines require that nelems be the same value in all participating PEs, while the -collect routines allow nelems to vary from PE to PE. -.PP -The resulting target array is as follows: -.Vb -\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\- - source(1..nelems) - from PE (PE_start + 0 * (2**logPE_stride)) -\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\- - source(1..nelems) - from PE (PE_start + 1 * (2**logPE_stride)) -\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\- - ... -\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\- - source(1..nelems) from - PE (PE_start + (PE_size \- 1) * (2**logPE_stride)) -\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\- -.Ve -.PP -As with all SHMEM collective routines, each of these routines assumes that only PEs in -the active set call the routine. If a PE not in the active set calls a SHMEM collective routine, -undefined behavior results. -.PP -The arguments are as follows: -.TP -target -A symmetric array. The target argument must be large enough to accept the concatenation of the source arrays on all PEs. The data types are -as follows: -.RS -.TP -[shmem_collect8, shmem_collect64, shmem_fcollect8, and -shmem_fcollect64] any data type with an element size of 64 bits. Fortran derived types, -Fortran character type, and C/C++ structures are not permitted. -.TP -[shmem_collect4, shmem_collect32, shmem_fcollect4, and -shmem_fcollect32] any data type with an element size of 32 bits. Fortran derived types, -Fortran character type, and C/C++ structures are not permitted. -.RE -.RS -.PP -.RE -.TP -source -A symmetric data object that can be of any type permissible for the target -argument. -.TP -nelems -The number of elements in the source array. nelems must be of type integer. If -you are using Fortran, it must be a default integer value. -.TP -PE_start -The lowest virtual PE number of the active set of PEs. PE_start must be of -type integer. If you are using Fortran, it must be a default integer value. -.TP -logPE_stride -The log (base 2) of the stride between consecutive virtual PE numbers in -the active set. logPE_stride must be of type integer. If you are using Fortran, it must be a -default integer value. -.TP -PE_size -The number of PEs in the active set. PE_size must be of type integer. If you -are using Fortran, it must be a default integer value. -.TP -pSync -A symmetric work array. In C/C++, pSync must be of type int and size -_SHMEM_COLLECT_SYNC_SIZE. In Fortran, pSync must be of type integer and size -SHMEM_COLLECT_SYNC_SIZE. If you are using Fortran, it must be a default integer value. -Every element of this array must be initialized with the value _SHMEM_SYNC_VALUE in -C/C++ or SHMEM_SYNC_VALUE in Fortran before any of the PEs in the active set enter -shmem_barrier(). -.PP -The values of arguments PE_start, logPE_stride, and PE_size must be equal on all PEs in -the active set. The same target and source arrays and the same pSync work array must be -passed to all PEs in the active set. -.PP -Upon return from a collective routine, the following are true for the local PE: The target array -is updated. The values in the pSync array are restored to the original values. -.SH NOTES - -The terms collective and symmetric are defined in \fIintro_shmem\fP(3)\&. -All SHMEM collective routines reset the values in pSync before they return, so a particular -pSync buffer need only be initialized the first time it is used. -.PP -You must ensure that the pSync array is not being updated on any PE in the active set while -any of the PEs participate in processing of a SHMEM collective routine. Be careful to -avoid these situations: If the pSync array is initialized at run time, some type of -synchronization is needed to ensure that all PEs in the working set have initialized -pSync before any of them enter a SHMEM routine called with the pSync synchronization array. -A pSync array can be reused on a subsequent SHMEM collective routine only if none -of the PEs in the active set are still processing a prior SHMEM collective routine call that used -the same pSync array. In general, this may be ensured only by doing some type of -synchronization. However, in the special case of SHMEM routines being called with the same -active set, you can allocate two pSync arrays and alternate between them on -successive calls. -.PP -The collective routines operate on active PE sets that have a non\-power\-of\-two PE_size -with some performance degradation. They operate with no performance degradation -when nelems is a non\-power\-of\-two value. -.SH EXAMPLES - -C/C++: -.Vb -for (i=0; i < _SHMEM_COLLECT_SYNC_SIZE; i++) { - pSync[i] = _SHMEM_SYNC_VALUE; -} -shmem_barrier_all(); /* Wait for all PEs to initialize pSync */ -shmem_collect32(target, source, 64, pe_start, logPE_stride, - pe_size, pSync); -.Ve -Fortran: -.Vb -INTEGER PSYNC(SHMEM_COLLECT_SYNC_SIZE) -DATA PSYNC /SHMEM_COLLECT_SYNC_SIZE*SHMEM_SYNC_VALUE/ - -CALL SHMEM_COLLECT4(TARGET, SOURCE, 64, PE_START, -& LOGPE_STRIDE, PE_SIZE, PSYNC) -.Ve -.SH SEE ALSO - -\fIintro_shmem\fP(3) diff --git a/oshmem/shmem/man/man3/shmem_collect64.3in b/oshmem/shmem/man/man3/shmem_collect64.3in deleted file mode 100644 index 42d0ca9595c..00000000000 --- a/oshmem/shmem/man/man3/shmem_collect64.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/shmem_collect32.3 diff --git a/oshmem/shmem/man/man3/shmem_complexd_prod_to_all.3in b/oshmem/shmem/man/man3/shmem_complexd_prod_to_all.3in deleted file mode 100644 index 39b196d0820..00000000000 --- a/oshmem/shmem/man/man3/shmem_complexd_prod_to_all.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/shmem_short_prod_to_all.3 diff --git a/oshmem/shmem/man/man3/shmem_complexd_sum_to_all.3in b/oshmem/shmem/man/man3/shmem_complexd_sum_to_all.3in deleted file mode 100644 index f75a4948417..00000000000 --- a/oshmem/shmem/man/man3/shmem_complexd_sum_to_all.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/shmem_short_sum_to_all.3 diff --git a/oshmem/shmem/man/man3/shmem_complexf_prod_to_all.3in b/oshmem/shmem/man/man3/shmem_complexf_prod_to_all.3in deleted file mode 100644 index 39b196d0820..00000000000 --- a/oshmem/shmem/man/man3/shmem_complexf_prod_to_all.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/shmem_short_prod_to_all.3 diff --git a/oshmem/shmem/man/man3/shmem_complexf_sum_to_all.3in b/oshmem/shmem/man/man3/shmem_complexf_sum_to_all.3in deleted file mode 100644 index f75a4948417..00000000000 --- a/oshmem/shmem/man/man3/shmem_complexf_sum_to_all.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/shmem_short_sum_to_all.3 diff --git a/oshmem/shmem/man/man3/shmem_double_fetch.3in b/oshmem/shmem/man/man3/shmem_double_fetch.3in deleted file mode 100644 index 7213c75e538..00000000000 --- a/oshmem/shmem/man/man3/shmem_double_fetch.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/shmem_int_fetch.3 diff --git a/oshmem/shmem/man/man3/shmem_double_g.3in b/oshmem/shmem/man/man3/shmem_double_g.3in deleted file mode 100644 index cd9ba27b1bc..00000000000 --- a/oshmem/shmem/man/man3/shmem_double_g.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/shmem_char_g.3 diff --git a/oshmem/shmem/man/man3/shmem_double_get.3in b/oshmem/shmem/man/man3/shmem_double_get.3in deleted file mode 100644 index 7cecf74d7ad..00000000000 --- a/oshmem/shmem/man/man3/shmem_double_get.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/shmem_char_get.3 diff --git a/oshmem/shmem/man/man3/shmem_double_get_nbi.3in b/oshmem/shmem/man/man3/shmem_double_get_nbi.3in deleted file mode 100644 index 50449e90a6d..00000000000 --- a/oshmem/shmem/man/man3/shmem_double_get_nbi.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/shmem_getmem_nbi.3 diff --git a/oshmem/shmem/man/man3/shmem_double_iget.3in b/oshmem/shmem/man/man3/shmem_double_iget.3in deleted file mode 100644 index df8770591af..00000000000 --- a/oshmem/shmem/man/man3/shmem_double_iget.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/shmem_short_iget.3 diff --git a/oshmem/shmem/man/man3/shmem_double_iput.3in b/oshmem/shmem/man/man3/shmem_double_iput.3in deleted file mode 100644 index 2fdf7a97e3d..00000000000 --- a/oshmem/shmem/man/man3/shmem_double_iput.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/shmem_short_iput.3 diff --git a/oshmem/shmem/man/man3/shmem_double_max_to_all.3in b/oshmem/shmem/man/man3/shmem_double_max_to_all.3in deleted file mode 100644 index 3e41c814889..00000000000 --- a/oshmem/shmem/man/man3/shmem_double_max_to_all.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/shmem_short_max_to_all.3 diff --git a/oshmem/shmem/man/man3/shmem_double_min_to_all.3in b/oshmem/shmem/man/man3/shmem_double_min_to_all.3in deleted file mode 100644 index d688221529b..00000000000 --- a/oshmem/shmem/man/man3/shmem_double_min_to_all.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/shmem_short_min_to_all.3 diff --git a/oshmem/shmem/man/man3/shmem_double_p.3in b/oshmem/shmem/man/man3/shmem_double_p.3in deleted file mode 100644 index 42b9fd0e81f..00000000000 --- a/oshmem/shmem/man/man3/shmem_double_p.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/shmem_char_p.3 diff --git a/oshmem/shmem/man/man3/shmem_double_prod_to_all.3in b/oshmem/shmem/man/man3/shmem_double_prod_to_all.3in deleted file mode 100644 index 39b196d0820..00000000000 --- a/oshmem/shmem/man/man3/shmem_double_prod_to_all.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/shmem_short_prod_to_all.3 diff --git a/oshmem/shmem/man/man3/shmem_double_put.3in b/oshmem/shmem/man/man3/shmem_double_put.3in deleted file mode 100644 index 9c7b2e25452..00000000000 --- a/oshmem/shmem/man/man3/shmem_double_put.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/shmem_char_put.3 diff --git a/oshmem/shmem/man/man3/shmem_double_put_nbi.3in b/oshmem/shmem/man/man3/shmem_double_put_nbi.3in deleted file mode 100644 index fb4ad1413b0..00000000000 --- a/oshmem/shmem/man/man3/shmem_double_put_nbi.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/shmem_putmem_nbi.3 diff --git a/oshmem/shmem/man/man3/shmem_double_set.3in b/oshmem/shmem/man/man3/shmem_double_set.3in deleted file mode 100644 index a02bb7ea5ea..00000000000 --- a/oshmem/shmem/man/man3/shmem_double_set.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/shmem_int_set.3 diff --git a/oshmem/shmem/man/man3/shmem_double_sum_to_all.3in b/oshmem/shmem/man/man3/shmem_double_sum_to_all.3in deleted file mode 100644 index f75a4948417..00000000000 --- a/oshmem/shmem/man/man3/shmem_double_sum_to_all.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/shmem_short_sum_to_all.3 diff --git a/oshmem/shmem/man/man3/shmem_double_swap.3in b/oshmem/shmem/man/man3/shmem_double_swap.3in deleted file mode 100644 index 31324f2c2e3..00000000000 --- a/oshmem/shmem/man/man3/shmem_double_swap.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/shmem_swap.3 diff --git a/oshmem/shmem/man/man3/shmem_fcollect32.3in b/oshmem/shmem/man/man3/shmem_fcollect32.3in deleted file mode 100644 index 42d0ca9595c..00000000000 --- a/oshmem/shmem/man/man3/shmem_fcollect32.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/shmem_collect32.3 diff --git a/oshmem/shmem/man/man3/shmem_fcollect64.3in b/oshmem/shmem/man/man3/shmem_fcollect64.3in deleted file mode 100644 index 42d0ca9595c..00000000000 --- a/oshmem/shmem/man/man3/shmem_fcollect64.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/shmem_collect32.3 diff --git a/oshmem/shmem/man/man3/shmem_fence.3in b/oshmem/shmem/man/man3/shmem_fence.3in deleted file mode 100644 index 12e97a55dc7..00000000000 --- a/oshmem/shmem/man/man3/shmem_fence.3in +++ /dev/null @@ -1,54 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright (c) 2015 University of Houston. All rights reserved. -.\" Copyright (c) 2015 Mellanox Technologies, Inc. -.\" $COPYRIGHT$ -.de Vb -.ft CW -.nf -.. -.de Ve -.ft R - -.fi -.. -.TH "SHMEM\\_FENCE" "3" "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME - -shmem_fence \- Provides a separate ordering on the sequence of puts issued by this PE to each destination -PE. -.SH SYNOPSIS - -C or C++: -.Vb -#include - -void shmem_fence(void); -.Ve -Fortran: -.Vb -INCLUDE "mpp/shmem.fh" - -CALL SHMEM_FENCE -.Ve -.SH DESCRIPTION - -The \fBshmem_fence()\fP -routine provides an ordering on the put operations issued by the calling -PE prior to the call to \fBshmem_fence()\fP -relative to the put operations issued by the -calling PE following the call to \fBshmem_fence()\fP\&. -It guarantees that all such prior put operations -issued to a particular destination PE are fully written to the symmetric memory of -that destination PE, before any such following put operations to that same destination PE -are written to the symmetric memory of that destination PE. -Note that the ordering is provided separately on the sequences of puts from the calling PE to -each distinct destination PE. The \fBshmem_quiet()\fP -routine should be used instead if ordering -of puts is required when multiple destination PEs are involved. -.SH NOTES - -The shmem_quiet function should be called if ordering of puts is desired when multiple remote -PEs are involved. -.SH SEE ALSO - -\fIintro_shmem\fP(3) diff --git a/oshmem/shmem/man/man3/shmem_finalize.3in b/oshmem/shmem/man/man3/shmem_finalize.3in deleted file mode 100644 index d0acafdf6ea..00000000000 --- a/oshmem/shmem/man/man3/shmem_finalize.3in +++ /dev/null @@ -1,47 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright (c) 2015 University of Houston. All rights reserved. -.\" Copyright (c) 2015 Mellanox Technologies, Inc. -.\" $COPYRIGHT$ -.de Vb -.ft CW -.nf -.. -.de Ve -.ft R - -.fi -.. -.TH "SHMEM\\_FINALIZE" "3" "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME - -shmem_finalize -\- A collective operation that releases resources used by the OpenSHMEM library. -This only terminates the Open-SHMEM portion of a program, not the entire program. -.SH SYNOPSIS - -C or C++: -.Vb -#include -void shmem_finalize(void); -.Ve -Fortran: -.Vb -include 'mpp/shmem.fh' -CALL SHMEM_FINALIZE -.Ve -.SH DESCRIPTION - -shmem_finalize -is a collective operation that ends the OpenSHMEM portion of a program previously initialized -by shmem_init and releases resources used by the OpenSHMEM library. This collective operation requires -all PEs to participate in the call. There is an implicit global barrier in shmem_finalize so that pending -communication is completed, and no resources can be released until all PEs have entered shmem_finalize. -shmem_finalize must be the last OpenSHMEM library call encountered in the OpenSHMEM portion of -a program. A call to shmem_finalize will release any resources initialized by a corresponding call to -shmem_init. All processes and threads that represent the PEs will still exist after the call to shmem_finalize -returns, but they will no longer have access to any resources that have been released. -.SH SEE ALSO - -\fIintro_shmem\fP(3), -\fIshmem_my_pe\fP(3), -\fIshmem_init\fP(3) diff --git a/oshmem/shmem/man/man3/shmem_float_fetch.3in b/oshmem/shmem/man/man3/shmem_float_fetch.3in deleted file mode 100644 index 7213c75e538..00000000000 --- a/oshmem/shmem/man/man3/shmem_float_fetch.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/shmem_int_fetch.3 diff --git a/oshmem/shmem/man/man3/shmem_float_g.3in b/oshmem/shmem/man/man3/shmem_float_g.3in deleted file mode 100644 index cd9ba27b1bc..00000000000 --- a/oshmem/shmem/man/man3/shmem_float_g.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/shmem_char_g.3 diff --git a/oshmem/shmem/man/man3/shmem_float_get.3in b/oshmem/shmem/man/man3/shmem_float_get.3in deleted file mode 100644 index 7cecf74d7ad..00000000000 --- a/oshmem/shmem/man/man3/shmem_float_get.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/shmem_char_get.3 diff --git a/oshmem/shmem/man/man3/shmem_float_get_nbi.3in b/oshmem/shmem/man/man3/shmem_float_get_nbi.3in deleted file mode 100644 index 50449e90a6d..00000000000 --- a/oshmem/shmem/man/man3/shmem_float_get_nbi.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/shmem_getmem_nbi.3 diff --git a/oshmem/shmem/man/man3/shmem_float_iget.3in b/oshmem/shmem/man/man3/shmem_float_iget.3in deleted file mode 100644 index df8770591af..00000000000 --- a/oshmem/shmem/man/man3/shmem_float_iget.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/shmem_short_iget.3 diff --git a/oshmem/shmem/man/man3/shmem_float_iput.3in b/oshmem/shmem/man/man3/shmem_float_iput.3in deleted file mode 100644 index 2fdf7a97e3d..00000000000 --- a/oshmem/shmem/man/man3/shmem_float_iput.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/shmem_short_iput.3 diff --git a/oshmem/shmem/man/man3/shmem_float_max_to_all.3in b/oshmem/shmem/man/man3/shmem_float_max_to_all.3in deleted file mode 100644 index 3e41c814889..00000000000 --- a/oshmem/shmem/man/man3/shmem_float_max_to_all.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/shmem_short_max_to_all.3 diff --git a/oshmem/shmem/man/man3/shmem_float_min_to_all.3in b/oshmem/shmem/man/man3/shmem_float_min_to_all.3in deleted file mode 100644 index d688221529b..00000000000 --- a/oshmem/shmem/man/man3/shmem_float_min_to_all.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/shmem_short_min_to_all.3 diff --git a/oshmem/shmem/man/man3/shmem_float_p.3in b/oshmem/shmem/man/man3/shmem_float_p.3in deleted file mode 100644 index 42b9fd0e81f..00000000000 --- a/oshmem/shmem/man/man3/shmem_float_p.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/shmem_char_p.3 diff --git a/oshmem/shmem/man/man3/shmem_float_prod_to_all.3in b/oshmem/shmem/man/man3/shmem_float_prod_to_all.3in deleted file mode 100644 index 39b196d0820..00000000000 --- a/oshmem/shmem/man/man3/shmem_float_prod_to_all.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/shmem_short_prod_to_all.3 diff --git a/oshmem/shmem/man/man3/shmem_float_put.3in b/oshmem/shmem/man/man3/shmem_float_put.3in deleted file mode 100644 index 9c7b2e25452..00000000000 --- a/oshmem/shmem/man/man3/shmem_float_put.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/shmem_char_put.3 diff --git a/oshmem/shmem/man/man3/shmem_float_put_nbi.3in b/oshmem/shmem/man/man3/shmem_float_put_nbi.3in deleted file mode 100644 index fb4ad1413b0..00000000000 --- a/oshmem/shmem/man/man3/shmem_float_put_nbi.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/shmem_putmem_nbi.3 diff --git a/oshmem/shmem/man/man3/shmem_float_set.3in b/oshmem/shmem/man/man3/shmem_float_set.3in deleted file mode 100644 index a02bb7ea5ea..00000000000 --- a/oshmem/shmem/man/man3/shmem_float_set.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/shmem_int_set.3 diff --git a/oshmem/shmem/man/man3/shmem_float_sum_to_all.3in b/oshmem/shmem/man/man3/shmem_float_sum_to_all.3in deleted file mode 100644 index f75a4948417..00000000000 --- a/oshmem/shmem/man/man3/shmem_float_sum_to_all.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/shmem_short_sum_to_all.3 diff --git a/oshmem/shmem/man/man3/shmem_float_swap.3in b/oshmem/shmem/man/man3/shmem_float_swap.3in deleted file mode 100644 index 31324f2c2e3..00000000000 --- a/oshmem/shmem/man/man3/shmem_float_swap.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/shmem_swap.3 diff --git a/oshmem/shmem/man/man3/shmem_free.3in b/oshmem/shmem/man/man3/shmem_free.3in deleted file mode 100644 index 6cdb8014e9e..00000000000 --- a/oshmem/shmem/man/man3/shmem_free.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/shmem_malloc.3 diff --git a/oshmem/shmem/man/man3/shmem_get128.3in b/oshmem/shmem/man/man3/shmem_get128.3in deleted file mode 100644 index 7cecf74d7ad..00000000000 --- a/oshmem/shmem/man/man3/shmem_get128.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/shmem_char_get.3 diff --git a/oshmem/shmem/man/man3/shmem_get128_nbi.3in b/oshmem/shmem/man/man3/shmem_get128_nbi.3in deleted file mode 100644 index 50449e90a6d..00000000000 --- a/oshmem/shmem/man/man3/shmem_get128_nbi.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/shmem_getmem_nbi.3 diff --git a/oshmem/shmem/man/man3/shmem_get16_nbi.3in b/oshmem/shmem/man/man3/shmem_get16_nbi.3in deleted file mode 100644 index 50449e90a6d..00000000000 --- a/oshmem/shmem/man/man3/shmem_get16_nbi.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/shmem_getmem_nbi.3 diff --git a/oshmem/shmem/man/man3/shmem_get32.3in b/oshmem/shmem/man/man3/shmem_get32.3in deleted file mode 100644 index 7cecf74d7ad..00000000000 --- a/oshmem/shmem/man/man3/shmem_get32.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/shmem_char_get.3 diff --git a/oshmem/shmem/man/man3/shmem_get32_nbi.3in b/oshmem/shmem/man/man3/shmem_get32_nbi.3in deleted file mode 100644 index 50449e90a6d..00000000000 --- a/oshmem/shmem/man/man3/shmem_get32_nbi.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/shmem_getmem_nbi.3 diff --git a/oshmem/shmem/man/man3/shmem_get64.3in b/oshmem/shmem/man/man3/shmem_get64.3in deleted file mode 100644 index 7cecf74d7ad..00000000000 --- a/oshmem/shmem/man/man3/shmem_get64.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/shmem_char_get.3 diff --git a/oshmem/shmem/man/man3/shmem_get64_nbi.3in b/oshmem/shmem/man/man3/shmem_get64_nbi.3in deleted file mode 100644 index 50449e90a6d..00000000000 --- a/oshmem/shmem/man/man3/shmem_get64_nbi.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/shmem_getmem_nbi.3 diff --git a/oshmem/shmem/man/man3/shmem_get8_nbi.3in b/oshmem/shmem/man/man3/shmem_get8_nbi.3in deleted file mode 100644 index 50449e90a6d..00000000000 --- a/oshmem/shmem/man/man3/shmem_get8_nbi.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/shmem_getmem_nbi.3 diff --git a/oshmem/shmem/man/man3/shmem_getmem.3in b/oshmem/shmem/man/man3/shmem_getmem.3in deleted file mode 100644 index 7cecf74d7ad..00000000000 --- a/oshmem/shmem/man/man3/shmem_getmem.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/shmem_char_get.3 diff --git a/oshmem/shmem/man/man3/shmem_getmem_nbi.3in b/oshmem/shmem/man/man3/shmem_getmem_nbi.3in deleted file mode 100644 index 702e1db3de6..00000000000 --- a/oshmem/shmem/man/man3/shmem_getmem_nbi.3in +++ /dev/null @@ -1,168 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright (c) 2016 Mellanox Technologies, Inc. -.\" $COPYRIGHT$ -.de Vb -.ft CW -.nf -.. -.de Ve -.ft R - -.fi -.. -.TH "SHMEM\\_GET\\_NBI" "3" "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME - -\fIshmem_getmem_nbi\fP(3), -\fIshmem_char_get_nbi\fP(3), -\fIshmem_short_get_nbi\fP(3), -\fIshmem_int_get_nbi\fP(3), -\fIshmem_long_get_nbi\fP(3), -\fIshmem_longlong_get_nbi\fP(3), -\fIshmem_float_get_nbi\fP(3), -\fIshmem_double_get_nbi\fP(3), -\fIshmem_longdouble_get_nbi\fP(3), -\fIshmem_get8_nbi\fP(3), -\fIshmem_get16_nbi\fP(3), -\fIshmem_get32_nbi\fP(3), -\fIshmem_get64_nbi\fP(3), -\fIshmem_get128_nbi\fP(3), -\- The nonblocking get routines provide a method for copying data from a contiguous remote data object on the specified PE to the local data object. -.SH SYNOPSIS - -C or C++: -.Vb -#include - -void shmem_getmem_nbi(void *dest, const void *source, - size_t nelems, int pe); - -void shmem_char_get(char *dest, const char *source, - size_t nelems, int pe); - -void shmem_short_get(short *dest, const short *source, - size_t nelems, int pe); - -void shmem_int_get(int *dest, const int *source, - size_t nelems, int pe); - -void shmem_long_get(long *dest, const long *source, - size_t nelems, int pe); - -void shmem_longlong_get(long long *dest, const long long *source, - size_t nelems, int pe); - -void shmem_float_get(float *dest, const float *source, - size_t nelems, int pe); - -void shmem_double_get(double *dest, const double *source, - size_t nelems, int pe); - -void shmem_longdouble_get(long double *dest, const long double *source, - size_t nelems, int pe); - -void shmem_get8(void *dest, const void *source, - size_t nelems, int pe); - -void shmem_get16(void *dest, const void *source, - size_t nelems, int pe); - -void shmem_get32(void *dest, const void *source, - size_t nelems, int pe); - -void shmem_get64(void *dest, const void *source, - size_t nelems, int pe); - -void shmem_get128(void *dest, const void *source, - size_t nelems, int pe); - -.Ve -Fortran: -.Vb -INCLUDE "mpp/shmem.fh" - -INTEGER nelems, pe - -CALL SHMEM_GETMEM_NBI(dest, source, nelems, pe) - -CALL SHMEM_CHARACTER_GET_NBI(dest, source, nelems, pe) - -CALL SHMEM_COMPLEX_GET_NBI(dest, source, nelems, pe) - -CALL SHMEM_DOUBLE_GET_NBI(dest, source, nelems, pe) - -CALL SHMEM_INTEGER_GET_NBI(dest, source, nelems, pe) - -CALL SHMEM_LOGICAL_GET_NBI(dest, source, nelems, pe) - -CALL SHMEM_REAL_GET_NBI(dest, source, nelems, pe) - -CALL SHMEM_GET4_NBI(dest, source, nelems, pe) - -CALL SHMEM_GET8_NBI(dest, source, nelems, pe) - -CALL SHMEM_GET32_NBI(dest, source, nelems, pe) - -CALL SHMEM_GET64_NBI(dest, source, nelems, pe) - -CALL SHMEM_GET128_NBI(dest, source, nelems, pe) - -.Ve -.SH DESCRIPTION - -The get routines provide a method for copying a contiguous symmetric data -object from a different PE to a contiguous data object on the local PE. -The routines return after posting the operation. The operation is -considered complete after a subsequent call to shmem_quiet. At the completion -of shmem_quiet, the data has been delivered to the dest array on the local PE. -.PP -The arguments are as follows: -.TP -dest -Local data object to be updated. -.TP -source -Data object on the PE identified by pe that contains the data to be copied. This -data object must be remotely accessible. -.TP -nelems -Number of elements in the target and source arrays. len must be of type integer. If -you are using Fortran, it must be a constant, variable, or array element of default -integer type. -.TP -pe -PE number of the remote PE. pe must be of type integer. If you are using Fortran, it -must be a constant, variable, or array element of default integer type. -.PP -If you are using Fortran, data types must be of default size. For example, a real variable must -be declared as REAL, REAL*4, or REAL(KIND=4). -.SH NOTES - -See \fIintro_shmem\fP(3) -for a definition of the term remotely accessible. -.SH EXAMPLES - -Consider this simple example for Fortran. -.Vb -PROGRAM REDUCTION - REAL VALUES, SUM - COMMON /C/ VALUES - REAL WORK - - CALL START_PES(0) ! ALLOW ANY NUMBER OF PES - VALUES = MY_PE() ! INITIALIZE IT TO SOMETHING - CALL SHMEM_BARRIER_ALL - SUM = 0.0 - DO I = 0,NUM_PES()\-1 - CALL SHMEM_REAL_GET_NBI(WORK, VALUES, 1, I) - CALL SHMEM_QUIET ! wait for delivery - SUM = SUM + WORK - ENDDO - PRINT *, 'PE ', MY_PE(), ' COMPUTED SUM=', SUM - CALL SHMEM_BARRIER_ALL -END -.Ve -.SH SEE ALSO - -\fIintro_shmem\fP(3), -\fIshmem_quiet\fP(3) diff --git a/oshmem/shmem/man/man3/shmem_global_exit.3in b/oshmem/shmem/man/man3/shmem_global_exit.3in deleted file mode 100644 index 718fc63b9b5..00000000000 --- a/oshmem/shmem/man/man3/shmem_global_exit.3in +++ /dev/null @@ -1,47 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright (c) 2015 University of Houston. All rights reserved. -.\" Copyright (c) 2015 Mellanox Technologies, Inc. -.\" $COPYRIGHT$ -.de Vb -.ft CW -.nf -.. -.de Ve -.ft R - -.fi -.. -.TH "SHMEM\\_GLOBAL\\_EXIT" "3" "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME - -shmem_global_exit -\- A routine that allows any PE to force termination of an entire program. -.SH SYNOPSIS - -C or C++: -.Vb -#include -void shmem_global_exit(int status); -.Ve -Fortran: -.Vb -include 'mpp/shmem.fh' -INTEGER STATUS -CALL SHMEM_GLOBAL_EXIT(status) -.Ve -.SH DESCRIPTION - -shmem_global_exit() -shmem_global_exit is a non-collective routine that allows any one PE to force termination of an Open- -SHMEM program for all PEs, passing an exit status to the execution environment. This routine terminates -the entire program, not just the OpenSHMEM portion. When any PE calls shmem_global_exit, it results in -the immediate notification to all PEs to terminate. shmem_global_exit flushes I/O and releases resources -in accordance with C/C++/Fortran language requirements for normal program termination. If more than -one PE calls shmem_global_exit, then the exit status returned to the environment shall be one of the values -passed to shmem_global_exit as the status argument. There is no return to the caller of shmem_global_exit; -control is returned from the OpenSHMEM program to the execution environment for all PEs. -.SH SEE ALSO - -\fIintro_shmem\fP(3), -\fIshmem_my_pe\fP(3), -\fIshmem_init\fP(3) diff --git a/oshmem/shmem/man/man3/shmem_iget128.3in b/oshmem/shmem/man/man3/shmem_iget128.3in deleted file mode 100644 index df8770591af..00000000000 --- a/oshmem/shmem/man/man3/shmem_iget128.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/shmem_short_iget.3 diff --git a/oshmem/shmem/man/man3/shmem_iget32.3in b/oshmem/shmem/man/man3/shmem_iget32.3in deleted file mode 100644 index df8770591af..00000000000 --- a/oshmem/shmem/man/man3/shmem_iget32.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/shmem_short_iget.3 diff --git a/oshmem/shmem/man/man3/shmem_iget64.3in b/oshmem/shmem/man/man3/shmem_iget64.3in deleted file mode 100644 index df8770591af..00000000000 --- a/oshmem/shmem/man/man3/shmem_iget64.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/shmem_short_iget.3 diff --git a/oshmem/shmem/man/man3/shmem_info_get_name.3in b/oshmem/shmem/man/man3/shmem_info_get_name.3in deleted file mode 100644 index 36fd1da352d..00000000000 --- a/oshmem/shmem/man/man3/shmem_info_get_name.3in +++ /dev/null @@ -1,48 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright (c) 2015 University of Houston. All rights reserved. -.\" Copyright (c) 2015 Mellanox Technologies, Inc. -.\" $COPYRIGHT$ -.de Vb -.ft CW -.nf -.. -.de Ve -.ft R - -.fi -.. -.TH "SHMEM\\_INFO\\_GET\\_NAME" "3" "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME - -shmem_info_get_name -\- This routine returns the vendor defined character string. -.SH SYNOPSIS - -C or C++: -.Vb -#include -void shmem_info_get_name(char *name); -.Ve -Fortran: -.Vb -include 'mpp/shmem.fh' -SHMEM_INFO_GET_NAME(NAME) -CHARACTER *(*)NAME -.Ve -.SH DESCRIPTION - -shmem_info_get_name() -This routine returns the vendor defined character string of size defined by the constant SHMEM_MAX_NAME_LEN. -The program calling this function prepares the memory of size SHMEM_MAX_NAME_LEN, and the implementation -copies the string of size at most SHMEM_MAX_NAME_LEN. In C, the string is terminated -by a null character. In Fortran, the string of size less than SHMEM_MAX_NAME_LEN is padded with -blank characters up to size SHMEM_MAX_NAME_LEN. The implementation copying a string of size -greater than SHMEM_MAX_NAME_LEN results in an undefined behavior. Multiple invocations of the -routine in an OpenSHMEM program always return the same string. For a given library implementation, -the major and minor version returned by these calls is consistent with the compile-time constants defined -in its shmem.h. -.SH SEE ALSO - -\fIintro_shmem\fP(3), -\fIshmem_my_pe\fP(3), -\fIshmem_init\fP(3) diff --git a/oshmem/shmem/man/man3/shmem_info_get_version.3in b/oshmem/shmem/man/man3/shmem_info_get_version.3in deleted file mode 100644 index 7313c37567f..00000000000 --- a/oshmem/shmem/man/man3/shmem_info_get_version.3in +++ /dev/null @@ -1,43 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright (c) 2015 University of Houston. All rights reserved. -.\" Copyright (c) 2015 Mellanox Technologies, Inc. -.\" $COPYRIGHT$ -.de Vb -.ft CW -.nf -.. -.de Ve -.ft R - -.fi -.. -.TH "SHMEM\\_INFO\\_GET\\_VERSION" "3" "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME - -shmem_info_get_version -\- Returns the major and minor version of the library implementation. -.SH SYNOPSIS - -C or C++: -.Vb -#include -void shmem_info_get_version(int *major, int *minor); -.Ve -Fortran: -.Vb -include 'mpp/shmem.fh' -SHMEM_INFO_GET_VERSION(MAJOR, MINOR) -INTEGER MAJOR, MINOR -.Ve -.SH DESCRIPTION - -shmem_info_get_version() -This routine returns the major and minor version of the OpenSHMEM standard in use. For a given library -implementation, the major and minor version returned by these calls is consistent with the compile-time -constants, SHMEM_MAJOR_VERSION and SHMEM_MINOR_VERSION, defined in its shmem.h. The -valid major version value is 1, and the valid minor version value is 2. -.SH SEE ALSO - -\fIintro_shmem\fP(3), -\fIshmem_my_pe\fP(3), -\fIshmem_init\fP(3) diff --git a/oshmem/shmem/man/man3/shmem_init.3in b/oshmem/shmem/man/man3/shmem_init.3in deleted file mode 100644 index 00d49c63182..00000000000 --- a/oshmem/shmem/man/man3/shmem_init.3in +++ /dev/null @@ -1,84 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright (c) 2015 University of Houston. All rights reserved. -.\" Copyright (c) 2015 Mellanox Technologies, Inc. -.\" $COPYRIGHT$ -.de Vb -.ft CW -.nf -.. -.de Ve -.ft R - -.fi -.. -.TH "SHMEM\\_INIT" "3" "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME - -shmem_init, start_pes -\- Allocates a block of memory from the symmetric heap. -.SH SYNOPSIS - -C or C++: -.Vb -void shmem_init(void); -void start_pes(int npes); -.Ve -Fortran: -.Vb -CALL SHMEM_INIT() -CALL START_PES(npes) -.Ve -.SH DESCRIPTION - -The start_pes routine should be the first statement in a SHMEM parallel program. -.PP -The start_pes routine accepts the following argument: -.TP -npes -Unused. Should be set to 0. -.PP -This routine initializes the SHMEM API, therefore it must be called before calling any -other SHMEM routine. -This routine is responsible inter alia for setting up the symmetric heap on the calling PE, and -the creation of the virtual PE numbers. Upon successful return from this routine, the calling PE -will be able to communicate with and transfer data to other PEs. -.PP -Multiple calls to this function are not allowed. -.PP -For an overview of programming with SHMEM communication routines, example SHMEM -programs, and instructions for compiling SHMEM programs, see the \fIintro_shmem\fP(3) -man page. -.SH EXAMPLES - -This is a simple program that calls \fIshmem_integer_put\fP(3): -.Vb -PROGRAM PUT - INCLUDE "mpp/shmem.fh" - - INTEGER TARG, SRC, RECEIVER, BAR - COMMON /T/ TARG - PARAMETER (RECEIVER=1) - - CALL SHMEM_INIT() - IF (MY_PE() .EQ. 0) THEN - SRC = 33 - CALL SHMEM_INTEGER_PUT(TARG, SRC, 1, RECEIVER) - ENDIF - CALL SHMEM_BARRIER_ALL ! SYNCHRONIZES SENDER AND RECEIVER - IF (MY_PE() .EQ. RECEIVER) THEN - PRINT *,'PE ', MY_PE(),' TARG=',TARG,' (expect 33)' - ENDIF -END -.Ve -.SH NOTES - -If the start_pes call is not the first statement in a program, unexpected results may occur on -some architectures. -.SH SEE ALSO - -\fIintro_shmem\fP(3), -\fIshmem_barrier\fP(3), -\fIshmem_barrier_all\fP(3), -\fIshmem_put\fP(3), -\fImy_pe\fP(3I), -\fIshmem_n_pes\fP(3I) diff --git a/oshmem/shmem/man/man3/shmem_int_add.3in b/oshmem/shmem/man/man3/shmem_int_add.3in deleted file mode 100644 index ff4728492a3..00000000000 --- a/oshmem/shmem/man/man3/shmem_int_add.3in +++ /dev/null @@ -1,76 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright (c) 2015 University of Houston. All rights reserved. -.\" Copyright (c) 2015 Mellanox Technologies, Inc. -.\" $COPYRIGHT$ -.de Vb -.ft CW -.nf -.. -.de Ve -.ft R - -.fi -.. -.TH "SHMEM\\_ADD" "3" "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME - -\fIshmem_int_add\fP(3), -\fIshmem_int4_add\fP(3), -\fIshmem_int8_add\fP(3), -\fIshmem_long_add\fP(3), -\fIshmem_longlong_add\fP(3) -\- Performs an atomic add -operation. -.SH SYNOPSIS - -C or C++: -.Vb -#include - -void shmem_int_add(int *target, int value, int pe); -void shmem_long_add(long *target, long value, int pe); -void shmem_longlong_add(long long *target, long long value, - int pe); -.Ve -Fortran: -.Vb -include 'mpp/shmem.h' - -INTEGER pe - -CALL SHMEM_INT4_ADD(target, value, pe) -CALL SHMEM_INT8_ADD(target, value, pe) -.Ve -.SH DESCRIPTION - -The atomic add routines add \fBvalue\fP -to the data at address \fBtarget\fP -on PE -\fBpe\fP\&. -The operation completes without the possibility of another process updating -target between the time of the fetch and the update. -.PP -The arguments are as follows: -.TP -target -The remotely accessible integer data object to be updated on the remote PE. If -you are using C/C++, the type of target should match that implied in the SYNOPSIS section. If -you are using the Fortran compiler, it must be of type integer with an element size of 4 bytes -for SHMEM_INT4_ADD and 8 bytes for SHMEM_INT8_ADD. -.TP -value -The value to be atomically added to target. If you are using C/C++, the type of -value should match that implied in the SYNOPSIS section. If you are using Fortran, it must be -of type integer with an element size of target. -.TP -pe -An integer that indicates the PE number upon which target is to be updated. If you -are using Fortran, it must be a default integer value. -.PP -.SH NOTES - -The term remotely accessible is defined in \fIintro_shmem\fP(3)\&. -.SH SEE ALSO - -\fIintro_shmem\fP(3), -\fIshmem_cache\fP(3) diff --git a/oshmem/shmem/man/man3/shmem_int_and_to_all.3in b/oshmem/shmem/man/man3/shmem_int_and_to_all.3in deleted file mode 100644 index 0a769b7c8a8..00000000000 --- a/oshmem/shmem/man/man3/shmem_int_and_to_all.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/shmem_short_and_to_all.3 diff --git a/oshmem/shmem/man/man3/shmem_int_cswap.3in b/oshmem/shmem/man/man3/shmem_int_cswap.3in deleted file mode 100644 index 0f28e7712e2..00000000000 --- a/oshmem/shmem/man/man3/shmem_int_cswap.3in +++ /dev/null @@ -1,127 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright (c) 2015 University of Houston. All rights reserved. -.\" Copyright (c) 2015 Mellanox Technologies, Inc. -.\" $COPYRIGHT$ -.de Vb -.ft CW -.nf -.. -.de Ve -.ft R - -.fi -.. -.TH "SHMEM\\_CSWAP" "3" "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME - -\fIshmem_int_cswap\fP(3), -\fIshmem_int4_cswap\fP(3), -\fIshmem_int8_cswap\fP(3), -\fIshmem_long_cswap\fP(3), -\fIshmem_longlong_cswap\fP(3) -\- Performs an atomic conditional swap to a remote data object -.SH SYNOPSIS - -C or C++: -.Vb -#include - -int shmem_int_cswap(int *target, int cond, int value, int pe); - -long shmem_long_cswap(long *target, long cond, long value, - int pe); - -long long shmem_longlong_cswap(longlong *target, - longlong cond, longlong value, int pe); -.Ve -Fortran: -.Vb -INCLUDE "mpp/shmem.fh" - -INTEGER pe - -INTEGER(KIND=4) SHMEM_INT4_CSWAP -ires = SHMEM_INT4_CSWAP(target, cond, value, pe) - -INTEGER(KIND=8) SHMEM_INT8_CSWAP -ires = SHMEM_INT8_CSWAP(target, cond, value, pe) -.Ve -.SH DESCRIPTION - -The conditional swap routines conditionally update a target data object on an arbitrary -processing element (PE) and return the prior contents of the data object in one atomic -operation. -.PP -The arguments are as follows: -.TP -target -The remotely accessible integer data object to be updated on the remote PE. If -you are using C/C++, the type of target should match that implied in the SYNOPSIS section. If -you are using the Fortran compiler, it must be of type integer with an element size of 4 bytes -for SHMEM_INT4_ADD and 8 bytes for SHMEM_INT8_ADD. -.TP -value -The value to be atomically added to target. If you are using C/C++, the type of -value should match that implied in the SYNOPSIS section. If you are using Fortran, it must be -of type integer with an element size of target. -.TP -pe -An integer that indicates the PE number upon which target is to be updated. If you -are using Fortran, it must be a default integer value. -.TP -target -The remotely accessible integer data object to be updated on the remote PE. If -you are using C/C++, the data type of target should match that implied in the SYNOPSIS -section. If you are using Fortran, it must be of the following type: -.RS -.TP -\fBSHMEM_INT4_CSWAP\fP: 4\-byte integer -.TP -\fBSHMEM_INT8_CSWAP\fP: 8\-byte integer -.RE -.RS -.PP -.RE -.TP -cond -cond is compared to the remote target value. If cond and the remote target are -equal, then value is swapped into the remote target. Otherwise, the remote target is -unchanged. In either case, the old value of the remote target is returned as the function return -value. cond must be of the same data type as target. -.TP -value -The value to be atomically written to the remote PE. value must be the same data -type as target. -.TP -pe -An integer that indicates the PE number upon which target is to be updated. If you -are using Fortran, it must be a default integer value. -.PP -.SH NOTES - -The term remotely accessible is defined in \fIintro_shmem\fP(3)\&. -.SH RETURN VALUES - -The contents that had been in the target data object on the remote PE prior to the conditional -swap. -.SH EXAMPLES - -The following call ensures that the first PE to execute the conditional swap will successfully -write its PE number to race_winner on PE 0. -.Vb -main() -{ - static int race_winner = \-1; - int oldval; - - shmem_init(); - oldval = shmem_int_cswap(&race_winner, \-1, shmem_my_pe(), 0); - if (oldval == \-1) - printf("pe %d was first\\n",shmem_my_pe()); -} -.Ve -.SH SEE ALSO - -\fIintro_shmem\fP(3), -\fIshmem_cache\fP(3), -\fIshmem_swap\fP(3) diff --git a/oshmem/shmem/man/man3/shmem_int_fadd.3in b/oshmem/shmem/man/man3/shmem_int_fadd.3in deleted file mode 100644 index a1eb7269240..00000000000 --- a/oshmem/shmem/man/man3/shmem_int_fadd.3in +++ /dev/null @@ -1,79 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright (c) 2015 University of Houston. All rights reserved. -.\" Copyright (c) 2015 Mellanox Technologies, Inc. -.\" $COPYRIGHT$ -.de Vb -.ft CW -.nf -.. -.de Ve -.ft R - -.fi -.. -.TH "SHMEM\\_FADD" "3" "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME - -\fIshmem_int4_fadd\fP(3), -\fIshmem_int8_fadd\fP(3), -\fIshmem_int_fadd\fP(3), -\fIshmem_long_fadd\fP(3), -\fIshmem_longlong_fadd\fP(3) -\- Performs an atomic fetch\-and\-add operation on a remote data object -.SH SYNOPSIS - -C or C++: -.Vb -#include - -int shmem_int_fadd(int *target, int value, int pe); - -long shmem_long_fadd(long *target, long value, int pe); - -long long shmem_longlong_fadd(long long *target, longlong value, - int pe); -.Ve -Fortran: -.Vb -INCLUDE "mpp/shmem.fh" - -INTEGER pe - -INTEGER(KIND=4) SHMEM_INT4_FADD, ires, target, value -ires = SHMEM_INT4_FADD(target, value, pe) - -INTEGER(KIND=8) SHMEM_INT8_FADD, ires, target, value -ires = SHMEM_INT8_FADD(target, value, pe) -.Ve -.SH DESCRIPTION - -shmem_fadd functions perform an atomic fetch\-and\-add operation. An atomic -fetch\-and\-add operation fetches the old target and adds value to target without the -possibility of another process updating target between the time of the fetch and the update. -These routines add value to target on Processing Element (PE) pe and return the previous -contents of target as an atomic operation. -.PP -The arguments are as follows: -.TP -target -The remotely accessible integer data object to be updated on the remote PE. The -type of target should match that implied in the SYNOPSIS section. -.TP -value -The value to be atomically added to target. The type of value should match that -implied in the SYNOPSIS section. -.TP -pe -An integer that indicates the PE number on which target is to be updated. If you are -using Fortran, it must be a default integer value. -.PP -.SH NOTES - -The term remotely accessible is defined in \fIintro_shmem\fP(3)\&. -.SH RETURN VALUES - -The contents that had been at the target address on the remote PE prior to the atomic addition -operation. -.SH SEE ALSO - -\fIintro_shmem\fP(3) diff --git a/oshmem/shmem/man/man3/shmem_int_fetch.3in b/oshmem/shmem/man/man3/shmem_int_fetch.3in deleted file mode 100644 index e11e131eb69..00000000000 --- a/oshmem/shmem/man/man3/shmem_int_fetch.3in +++ /dev/null @@ -1,83 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright (c) 2016 Mellanox Technologies, Inc. -.\" $COPYRIGHT$ -.de Vb -.ft CW -.nf -.. -.de Ve -.ft R - -.fi -.. -.TH "SHMEM\\_FETCH" "3" "Unreleased developer copy" "gitclone" "Open MPI" -.SH NAME - -\fIshmem_int4_fetch\fP(3), -\fIshmem_int8_fetch\fP(3), -\fIshmem_int_fetch\fP(3), -\fIshmem_long_fetch\fP(3), -\fIshmem_longlong_fetch\fP(3) -\fIshmem_double_fetch\fP(3) -\fIshmem_float_fetch\fP(3) -\- Atomically fetches the value of a remote data object -.SH SYNOPSIS - -C or C++: -.Vb -#include - -int shmem_int_fetch(int *target, int pe); - -long shmem_long_fetch(long *target, int pe); - -long long shmem_longlong_fetch(long long *target, int pe); - -double shmem_double_fetch(long long *target, int pe); - -float shmem_float_fetch(float *target, int pe); -.Ve -Fortran: -.Vb -INCLUDE "mpp/shmem.fh" - -INTEGER pe - -INTEGER(KIND=4) SHMEM_INT4_FETCH, ires, target -ires = SHMEM_INT4_FETCH(target, pe) - -INTEGER(KIND=8) SHMEM_INT8_FETCH, ires, target -ires = SHMEM_INT8_FETCH(target, pe) - - -REAL(KIND=4) SHMEM_INT4_FETCH, ires, target -ires = SHMEM_REAL4_FETCH(target, pe) - -REAL(KIND=8) SHMEM_INT8_FETCH, ires, target -ires = SHMEM_REAL8_FETCH(target, pe) - -.Ve -.SH DESCRIPTION - -The shmem_fetch functions perform an atomic fetch operation. They return the contents of the -\fBtarget\fP as an atomic operation. - -.PP -The arguments are as follows: -.TP -target -The remotely accessible data object to be fetched from the remote PE. -.TP -pe -An integer that indicates the PE number from which \fItarget\fP is to be fetched. If you are -using Fortran, it must be a default integer value. -.PP - -.SH RETURN VALUES -The contents at the \fItarget\fP address on the remote PE. -The data type of the return value is the same as the the -type of the remote data object. - -.SH SEE ALSO - -\fIintro_shmem\fP(3) diff --git a/oshmem/shmem/man/man3/shmem_int_finc.3in b/oshmem/shmem/man/man3/shmem_int_finc.3in deleted file mode 100644 index 0f1808693e9..00000000000 --- a/oshmem/shmem/man/man3/shmem_int_finc.3in +++ /dev/null @@ -1,76 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright (c) 2015 University of Houston. All rights reserved. -.\" Copyright (c) 2015 Mellanox Technologies, Inc. -.\" $COPYRIGHT$ -.de Vb -.ft CW -.nf -.. -.de Ve -.ft R - -.fi -.. -.TH "SHMEM\\_FINC" "3" "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME - -\fIshmem_int4_finc\fP(3), -\fIshmem_int8_finc\fP(3), -\fIshmem_int_finc\fP(3), -\fIshmem_long_finc\fP(3), -\fIshmem_longlong_finc\fP(3) -\- Performs an atomic fetch\-and\-increment operation on a remote data object -.SH SYNOPSIS - -C or C++: -.Vb -#include - -int shmem_int_finc(int *target, int pe); - -long shmem_long_finc(long *target, int pe); - -long long shmem_longlong_finc(long long *target, int pe); -.Ve -Fortran: -.Vb -INCLUDE "mpp/shmem.fh" - -INTEGER pe -INTEGER(KIND=4) SHMEM_INT4_FINC, target4 -INTEGER(KIND=8) SHMEM_INT8_FINC, target8 - -ires4 = SHMEM_INT4_FINC(target4, pe) - -ires8 = SHMEM_INT8_FINC(target8, pe) -.Ve -.SH DESCRIPTION - -The fetch and increment routines retrieve the value at address \fBtarget\fP -on PE -\fBpe\fP, -and update \fBtarget\fP -with the result of incrementing the retrieved value by -one. The operation must be completed without the possibility of another process updating -\fBtarget\fP -between the time of the fetch and the update. -.PP -The arguments are as follows: -.TP -target -The remotely accessible integer data object to be updated on the remote PE. The -type of target should match that implied in the SYNOPSIS section. -.TP -pe -An integer that indicates the PE number upon which target is to be updated. If you -are using Fortran, it must be a default integer value. -.PP -.SH NOTES - -The term remotely accessible is defined in \fIintro_shmem\fP(3)\&. -.SH RETURN VALUES - -The contents that had been at the target address on the remote PE prior to the increment. -.SH SEE ALSO - -\fIintro_shmem\fP(3) diff --git a/oshmem/shmem/man/man3/shmem_int_g.3in b/oshmem/shmem/man/man3/shmem_int_g.3in deleted file mode 100644 index cd9ba27b1bc..00000000000 --- a/oshmem/shmem/man/man3/shmem_int_g.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/shmem_char_g.3 diff --git a/oshmem/shmem/man/man3/shmem_int_get.3in b/oshmem/shmem/man/man3/shmem_int_get.3in deleted file mode 100644 index 7cecf74d7ad..00000000000 --- a/oshmem/shmem/man/man3/shmem_int_get.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/shmem_char_get.3 diff --git a/oshmem/shmem/man/man3/shmem_int_get_nbi.3in b/oshmem/shmem/man/man3/shmem_int_get_nbi.3in deleted file mode 100644 index 50449e90a6d..00000000000 --- a/oshmem/shmem/man/man3/shmem_int_get_nbi.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/shmem_getmem_nbi.3 diff --git a/oshmem/shmem/man/man3/shmem_int_iget.3in b/oshmem/shmem/man/man3/shmem_int_iget.3in deleted file mode 100644 index df8770591af..00000000000 --- a/oshmem/shmem/man/man3/shmem_int_iget.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/shmem_short_iget.3 diff --git a/oshmem/shmem/man/man3/shmem_int_inc.3in b/oshmem/shmem/man/man3/shmem_int_inc.3in deleted file mode 100644 index 0a879d766d7..00000000000 --- a/oshmem/shmem/man/man3/shmem_int_inc.3in +++ /dev/null @@ -1,73 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright (c) 2015 University of Houston. All rights reserved. -.\" Copyright (c) 2015 Mellanox Technologies, Inc. -.\" $COPYRIGHT$ -.de Vb -.ft CW -.nf -.. -.de Ve -.ft R - -.fi -.. -.TH "SHMEM\\_INC" "3" "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME - -\fIshmem_int4_inc\fP(3), -\fIshmem_int8_inc\fP(3), -\fIshmem_int_inc\fP(3), -\fIshmem_long_inc\fP(3), -\fIshmem_longlong_inc\fP(3) -\- These routines perform an atomic increment operation on a remote data object. -.SH SYNOPSIS - -C or C++: -.Vb -#include - -int shmem_int_inc(int *target, int pe); - -long shmem_long_inc(long *target, int pe); - -long long shmem_longlong_inc(long long *target, int pe); -.Ve -Fortran: -.Vb -INCLUDE "mpp/shmem.fh" - -INTEGER pe -INTEGER(KIND=4) SHMEM_INT4_INC, target4 -INTEGER(KIND=8) SHMEM_INT8_INC, target8 - -ires4 = SHMEM_INT4_INC(target4, pe) - -ires8 = SHMEM_INT8_INC(target8, pe) -.Ve -.SH DESCRIPTION - -The atomic increment routines replace the value of \fBtarget\fP -with its value incremented by -one. The operation must be completed without the possibility of another process updating -\fBtarget\fP -between the time of the fetch and the update. -.PP -The arguments are as follows: -.TP -target -The remotely accessible integer data object to be updated on the remote PE. The -type of target should match that implied in the SYNOPSIS section. -.TP -pe -An integer that indicates the PE number upon which target is to be updated. If you -are using Fortran, it must be a default integer value. -.PP -.SH NOTES - -The term remotely accessible is defined in \fIintro_shmem\fP(3)\&. -.SH RETURN VALUES - -None. -.SH SEE ALSO - -\fIintro_shmem\fP(3) diff --git a/oshmem/shmem/man/man3/shmem_int_iput.3in b/oshmem/shmem/man/man3/shmem_int_iput.3in deleted file mode 100644 index 2fdf7a97e3d..00000000000 --- a/oshmem/shmem/man/man3/shmem_int_iput.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/shmem_short_iput.3 diff --git a/oshmem/shmem/man/man3/shmem_int_max_to_all.3in b/oshmem/shmem/man/man3/shmem_int_max_to_all.3in deleted file mode 100644 index 3e41c814889..00000000000 --- a/oshmem/shmem/man/man3/shmem_int_max_to_all.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/shmem_short_max_to_all.3 diff --git a/oshmem/shmem/man/man3/shmem_int_min_to_all.3in b/oshmem/shmem/man/man3/shmem_int_min_to_all.3in deleted file mode 100644 index d688221529b..00000000000 --- a/oshmem/shmem/man/man3/shmem_int_min_to_all.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/shmem_short_min_to_all.3 diff --git a/oshmem/shmem/man/man3/shmem_int_or_to_all.3in b/oshmem/shmem/man/man3/shmem_int_or_to_all.3in deleted file mode 100644 index 17b3fe22e3c..00000000000 --- a/oshmem/shmem/man/man3/shmem_int_or_to_all.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/shmem_short_or_to_all.3 diff --git a/oshmem/shmem/man/man3/shmem_int_p.3in b/oshmem/shmem/man/man3/shmem_int_p.3in deleted file mode 100644 index 42b9fd0e81f..00000000000 --- a/oshmem/shmem/man/man3/shmem_int_p.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/shmem_char_p.3 diff --git a/oshmem/shmem/man/man3/shmem_int_prod_to_all.3in b/oshmem/shmem/man/man3/shmem_int_prod_to_all.3in deleted file mode 100644 index 39b196d0820..00000000000 --- a/oshmem/shmem/man/man3/shmem_int_prod_to_all.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/shmem_short_prod_to_all.3 diff --git a/oshmem/shmem/man/man3/shmem_int_put.3in b/oshmem/shmem/man/man3/shmem_int_put.3in deleted file mode 100644 index 9c7b2e25452..00000000000 --- a/oshmem/shmem/man/man3/shmem_int_put.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/shmem_char_put.3 diff --git a/oshmem/shmem/man/man3/shmem_int_put_nbi.3in b/oshmem/shmem/man/man3/shmem_int_put_nbi.3in deleted file mode 100644 index fb4ad1413b0..00000000000 --- a/oshmem/shmem/man/man3/shmem_int_put_nbi.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/shmem_putmem_nbi.3 diff --git a/oshmem/shmem/man/man3/shmem_int_set.3in b/oshmem/shmem/man/man3/shmem_int_set.3in deleted file mode 100644 index 7a68b2eee35..00000000000 --- a/oshmem/shmem/man/man3/shmem_int_set.3in +++ /dev/null @@ -1,77 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright (c) 2016 Mellanox Technologies, Inc. -.\" $COPYRIGHT$ -.de Vb -.ft CW -.nf -.. -.de Ve -.ft R - -.fi -.. -.TH "SHMEM\\_SET" "3" "Unreleased developer copy" "gitclone" "Open MPI" -.SH NAME - -\fIshmem_double_set\fP(3), -\fIshmem_float_set\fP(3), -\fIshmem_int_set\fP(3), -\fIshmem_long_set\fP(3), -\fIshmem_longlong_set\fP(3) -\fIshmem_int4_set\fP(3), -\fIshmem_int8_set\fP(3), -\fIshmem_real4_set\fP(3), -\fIshmem_real8_set\fP(3), -\- Atomically sets the value of a remote data object - -.SH SYNOPSIS - -C or C++: -.Vb -#include - -void shmem_double_set(double *target, double value, int pe); - -void shmem_float_set(float *target, float value, int pe); - -void shmem_int_set(int *target, int value, int pe); - -void shmem_long_set(long *target, long value, int pe); - -void shmem_longlong_set(long long *target, long long value, int pe); -.Ve -Fortran: -.Vb -INCLUDE "mpp/shmem.fh" - -INTEGER pe - -CALL SHMEM_INT4_SET(target, value, pe) -CALL SHMEM_INT8_SET(target, value, pe) -CALL SHMEM_REAL4_SET(target, value, pe) -CALL SHMEM_REAL8_SET(target, value, pe) - -.Ve -.SH DESCRIPTION - -The set routines write the \fBvalue\fP into the address \fBtarget\fP on \fBpe\fP as an atomic operation. -.PP -The arguments are as follows: -.TP -target -The remotely accessible data object to be set on the remote PE. -.TP -value -The value to be atomically written to the remote PE. -.TP -pe -An integer that indicates the PE number upon which target is to be updated. If you -are using Fortran, it must be a default integer value. -.PP -.SH RETURN VALUES -NONE - -.SH SEE ALSO - -\fIintro_shmem\fP(3) - diff --git a/oshmem/shmem/man/man3/shmem_int_sum_to_all.3in b/oshmem/shmem/man/man3/shmem_int_sum_to_all.3in deleted file mode 100644 index f75a4948417..00000000000 --- a/oshmem/shmem/man/man3/shmem_int_sum_to_all.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/shmem_short_sum_to_all.3 diff --git a/oshmem/shmem/man/man3/shmem_int_swap.3in b/oshmem/shmem/man/man3/shmem_int_swap.3in deleted file mode 100644 index 31324f2c2e3..00000000000 --- a/oshmem/shmem/man/man3/shmem_int_swap.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/shmem_swap.3 diff --git a/oshmem/shmem/man/man3/shmem_int_wait.3in b/oshmem/shmem/man/man3/shmem_int_wait.3in deleted file mode 100644 index 03267ffbc55..00000000000 --- a/oshmem/shmem/man/man3/shmem_int_wait.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/shmem_wait.3 diff --git a/oshmem/shmem/man/man3/shmem_int_wait_until.3in b/oshmem/shmem/man/man3/shmem_int_wait_until.3in deleted file mode 100644 index 03267ffbc55..00000000000 --- a/oshmem/shmem/man/man3/shmem_int_wait_until.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/shmem_wait.3 diff --git a/oshmem/shmem/man/man3/shmem_int_xor_to_all.3in b/oshmem/shmem/man/man3/shmem_int_xor_to_all.3in deleted file mode 100644 index 3a0855e2f47..00000000000 --- a/oshmem/shmem/man/man3/shmem_int_xor_to_all.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/shmem_short_xor_to_all.3 diff --git a/oshmem/shmem/man/man3/shmem_iput128.3in b/oshmem/shmem/man/man3/shmem_iput128.3in deleted file mode 100644 index 2fdf7a97e3d..00000000000 --- a/oshmem/shmem/man/man3/shmem_iput128.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/shmem_short_iput.3 diff --git a/oshmem/shmem/man/man3/shmem_iput32.3in b/oshmem/shmem/man/man3/shmem_iput32.3in deleted file mode 100644 index 2fdf7a97e3d..00000000000 --- a/oshmem/shmem/man/man3/shmem_iput32.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/shmem_short_iput.3 diff --git a/oshmem/shmem/man/man3/shmem_iput64.3in b/oshmem/shmem/man/man3/shmem_iput64.3in deleted file mode 100644 index 2fdf7a97e3d..00000000000 --- a/oshmem/shmem/man/man3/shmem_iput64.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/shmem_short_iput.3 diff --git a/oshmem/shmem/man/man3/shmem_long_add.3in b/oshmem/shmem/man/man3/shmem_long_add.3in deleted file mode 100644 index 8606d1aa608..00000000000 --- a/oshmem/shmem/man/man3/shmem_long_add.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/shmem_int_add.3 diff --git a/oshmem/shmem/man/man3/shmem_long_and_to_all.3in b/oshmem/shmem/man/man3/shmem_long_and_to_all.3in deleted file mode 100644 index 0a769b7c8a8..00000000000 --- a/oshmem/shmem/man/man3/shmem_long_and_to_all.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/shmem_short_and_to_all.3 diff --git a/oshmem/shmem/man/man3/shmem_long_cswap.3in b/oshmem/shmem/man/man3/shmem_long_cswap.3in deleted file mode 100644 index cf2a78fe882..00000000000 --- a/oshmem/shmem/man/man3/shmem_long_cswap.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/shmem_int_cswap.3 diff --git a/oshmem/shmem/man/man3/shmem_long_fadd.3in b/oshmem/shmem/man/man3/shmem_long_fadd.3in deleted file mode 100644 index d3bc2141cad..00000000000 --- a/oshmem/shmem/man/man3/shmem_long_fadd.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/shmem_int_fadd.3 diff --git a/oshmem/shmem/man/man3/shmem_long_fetch.3in b/oshmem/shmem/man/man3/shmem_long_fetch.3in deleted file mode 100644 index 7213c75e538..00000000000 --- a/oshmem/shmem/man/man3/shmem_long_fetch.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/shmem_int_fetch.3 diff --git a/oshmem/shmem/man/man3/shmem_long_finc.3in b/oshmem/shmem/man/man3/shmem_long_finc.3in deleted file mode 100644 index fc800da8ff8..00000000000 --- a/oshmem/shmem/man/man3/shmem_long_finc.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/shmem_int_finc.3 diff --git a/oshmem/shmem/man/man3/shmem_long_g.3in b/oshmem/shmem/man/man3/shmem_long_g.3in deleted file mode 100644 index cd9ba27b1bc..00000000000 --- a/oshmem/shmem/man/man3/shmem_long_g.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/shmem_char_g.3 diff --git a/oshmem/shmem/man/man3/shmem_long_get.3in b/oshmem/shmem/man/man3/shmem_long_get.3in deleted file mode 100644 index 7cecf74d7ad..00000000000 --- a/oshmem/shmem/man/man3/shmem_long_get.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/shmem_char_get.3 diff --git a/oshmem/shmem/man/man3/shmem_long_get_nbi.3in b/oshmem/shmem/man/man3/shmem_long_get_nbi.3in deleted file mode 100644 index 50449e90a6d..00000000000 --- a/oshmem/shmem/man/man3/shmem_long_get_nbi.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/shmem_getmem_nbi.3 diff --git a/oshmem/shmem/man/man3/shmem_long_iget.3in b/oshmem/shmem/man/man3/shmem_long_iget.3in deleted file mode 100644 index df8770591af..00000000000 --- a/oshmem/shmem/man/man3/shmem_long_iget.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/shmem_short_iget.3 diff --git a/oshmem/shmem/man/man3/shmem_long_inc.3in b/oshmem/shmem/man/man3/shmem_long_inc.3in deleted file mode 100644 index 96ae7ddd34c..00000000000 --- a/oshmem/shmem/man/man3/shmem_long_inc.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/shmem_int_inc.3 diff --git a/oshmem/shmem/man/man3/shmem_long_iput.3in b/oshmem/shmem/man/man3/shmem_long_iput.3in deleted file mode 100644 index 2fdf7a97e3d..00000000000 --- a/oshmem/shmem/man/man3/shmem_long_iput.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/shmem_short_iput.3 diff --git a/oshmem/shmem/man/man3/shmem_long_max_to_all.3in b/oshmem/shmem/man/man3/shmem_long_max_to_all.3in deleted file mode 100644 index 3e41c814889..00000000000 --- a/oshmem/shmem/man/man3/shmem_long_max_to_all.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/shmem_short_max_to_all.3 diff --git a/oshmem/shmem/man/man3/shmem_long_min_to_all.3in b/oshmem/shmem/man/man3/shmem_long_min_to_all.3in deleted file mode 100644 index d688221529b..00000000000 --- a/oshmem/shmem/man/man3/shmem_long_min_to_all.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/shmem_short_min_to_all.3 diff --git a/oshmem/shmem/man/man3/shmem_long_or_to_all.3in b/oshmem/shmem/man/man3/shmem_long_or_to_all.3in deleted file mode 100644 index 17b3fe22e3c..00000000000 --- a/oshmem/shmem/man/man3/shmem_long_or_to_all.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/shmem_short_or_to_all.3 diff --git a/oshmem/shmem/man/man3/shmem_long_p.3in b/oshmem/shmem/man/man3/shmem_long_p.3in deleted file mode 100644 index 42b9fd0e81f..00000000000 --- a/oshmem/shmem/man/man3/shmem_long_p.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/shmem_char_p.3 diff --git a/oshmem/shmem/man/man3/shmem_long_prod_to_all.3in b/oshmem/shmem/man/man3/shmem_long_prod_to_all.3in deleted file mode 100644 index 39b196d0820..00000000000 --- a/oshmem/shmem/man/man3/shmem_long_prod_to_all.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/shmem_short_prod_to_all.3 diff --git a/oshmem/shmem/man/man3/shmem_long_put.3in b/oshmem/shmem/man/man3/shmem_long_put.3in deleted file mode 100644 index 9c7b2e25452..00000000000 --- a/oshmem/shmem/man/man3/shmem_long_put.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/shmem_char_put.3 diff --git a/oshmem/shmem/man/man3/shmem_long_put_nbi.3in b/oshmem/shmem/man/man3/shmem_long_put_nbi.3in deleted file mode 100644 index fb4ad1413b0..00000000000 --- a/oshmem/shmem/man/man3/shmem_long_put_nbi.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/shmem_putmem_nbi.3 diff --git a/oshmem/shmem/man/man3/shmem_long_set.3in b/oshmem/shmem/man/man3/shmem_long_set.3in deleted file mode 100644 index a02bb7ea5ea..00000000000 --- a/oshmem/shmem/man/man3/shmem_long_set.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/shmem_int_set.3 diff --git a/oshmem/shmem/man/man3/shmem_long_sum_to_all.3in b/oshmem/shmem/man/man3/shmem_long_sum_to_all.3in deleted file mode 100644 index f75a4948417..00000000000 --- a/oshmem/shmem/man/man3/shmem_long_sum_to_all.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/shmem_short_sum_to_all.3 diff --git a/oshmem/shmem/man/man3/shmem_long_swap.3in b/oshmem/shmem/man/man3/shmem_long_swap.3in deleted file mode 100644 index 31324f2c2e3..00000000000 --- a/oshmem/shmem/man/man3/shmem_long_swap.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/shmem_swap.3 diff --git a/oshmem/shmem/man/man3/shmem_long_wait.3in b/oshmem/shmem/man/man3/shmem_long_wait.3in deleted file mode 100644 index 03267ffbc55..00000000000 --- a/oshmem/shmem/man/man3/shmem_long_wait.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/shmem_wait.3 diff --git a/oshmem/shmem/man/man3/shmem_long_wait_until.3in b/oshmem/shmem/man/man3/shmem_long_wait_until.3in deleted file mode 100644 index 03267ffbc55..00000000000 --- a/oshmem/shmem/man/man3/shmem_long_wait_until.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/shmem_wait.3 diff --git a/oshmem/shmem/man/man3/shmem_long_xor_to_all.3in b/oshmem/shmem/man/man3/shmem_long_xor_to_all.3in deleted file mode 100644 index 3a0855e2f47..00000000000 --- a/oshmem/shmem/man/man3/shmem_long_xor_to_all.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/shmem_short_xor_to_all.3 diff --git a/oshmem/shmem/man/man3/shmem_longdouble_g.3in b/oshmem/shmem/man/man3/shmem_longdouble_g.3in deleted file mode 100644 index cd9ba27b1bc..00000000000 --- a/oshmem/shmem/man/man3/shmem_longdouble_g.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/shmem_char_g.3 diff --git a/oshmem/shmem/man/man3/shmem_longdouble_get.3in b/oshmem/shmem/man/man3/shmem_longdouble_get.3in deleted file mode 100644 index 7cecf74d7ad..00000000000 --- a/oshmem/shmem/man/man3/shmem_longdouble_get.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/shmem_char_get.3 diff --git a/oshmem/shmem/man/man3/shmem_longdouble_get_nbi.3in b/oshmem/shmem/man/man3/shmem_longdouble_get_nbi.3in deleted file mode 100644 index 50449e90a6d..00000000000 --- a/oshmem/shmem/man/man3/shmem_longdouble_get_nbi.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/shmem_getmem_nbi.3 diff --git a/oshmem/shmem/man/man3/shmem_longdouble_iget.3in b/oshmem/shmem/man/man3/shmem_longdouble_iget.3in deleted file mode 100644 index df8770591af..00000000000 --- a/oshmem/shmem/man/man3/shmem_longdouble_iget.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/shmem_short_iget.3 diff --git a/oshmem/shmem/man/man3/shmem_longdouble_iput.3in b/oshmem/shmem/man/man3/shmem_longdouble_iput.3in deleted file mode 100644 index 2fdf7a97e3d..00000000000 --- a/oshmem/shmem/man/man3/shmem_longdouble_iput.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/shmem_short_iput.3 diff --git a/oshmem/shmem/man/man3/shmem_longdouble_max_to_all.3in b/oshmem/shmem/man/man3/shmem_longdouble_max_to_all.3in deleted file mode 100644 index 3e41c814889..00000000000 --- a/oshmem/shmem/man/man3/shmem_longdouble_max_to_all.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/shmem_short_max_to_all.3 diff --git a/oshmem/shmem/man/man3/shmem_longdouble_min_to_all.3in b/oshmem/shmem/man/man3/shmem_longdouble_min_to_all.3in deleted file mode 100644 index d688221529b..00000000000 --- a/oshmem/shmem/man/man3/shmem_longdouble_min_to_all.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/shmem_short_min_to_all.3 diff --git a/oshmem/shmem/man/man3/shmem_longdouble_p.3in b/oshmem/shmem/man/man3/shmem_longdouble_p.3in deleted file mode 100644 index 42b9fd0e81f..00000000000 --- a/oshmem/shmem/man/man3/shmem_longdouble_p.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/shmem_char_p.3 diff --git a/oshmem/shmem/man/man3/shmem_longdouble_prod_to_all.3in b/oshmem/shmem/man/man3/shmem_longdouble_prod_to_all.3in deleted file mode 100644 index 39b196d0820..00000000000 --- a/oshmem/shmem/man/man3/shmem_longdouble_prod_to_all.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/shmem_short_prod_to_all.3 diff --git a/oshmem/shmem/man/man3/shmem_longdouble_put.3in b/oshmem/shmem/man/man3/shmem_longdouble_put.3in deleted file mode 100644 index 9c7b2e25452..00000000000 --- a/oshmem/shmem/man/man3/shmem_longdouble_put.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/shmem_char_put.3 diff --git a/oshmem/shmem/man/man3/shmem_longdouble_put_nbi.3in b/oshmem/shmem/man/man3/shmem_longdouble_put_nbi.3in deleted file mode 100644 index fb4ad1413b0..00000000000 --- a/oshmem/shmem/man/man3/shmem_longdouble_put_nbi.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/shmem_putmem_nbi.3 diff --git a/oshmem/shmem/man/man3/shmem_longdouble_sum_to_all.3in b/oshmem/shmem/man/man3/shmem_longdouble_sum_to_all.3in deleted file mode 100644 index f75a4948417..00000000000 --- a/oshmem/shmem/man/man3/shmem_longdouble_sum_to_all.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/shmem_short_sum_to_all.3 diff --git a/oshmem/shmem/man/man3/shmem_longlong_add.3in b/oshmem/shmem/man/man3/shmem_longlong_add.3in deleted file mode 100644 index 8606d1aa608..00000000000 --- a/oshmem/shmem/man/man3/shmem_longlong_add.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/shmem_int_add.3 diff --git a/oshmem/shmem/man/man3/shmem_longlong_and_to_all.3in b/oshmem/shmem/man/man3/shmem_longlong_and_to_all.3in deleted file mode 100644 index 0a769b7c8a8..00000000000 --- a/oshmem/shmem/man/man3/shmem_longlong_and_to_all.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/shmem_short_and_to_all.3 diff --git a/oshmem/shmem/man/man3/shmem_longlong_cswap.3in b/oshmem/shmem/man/man3/shmem_longlong_cswap.3in deleted file mode 100644 index cf2a78fe882..00000000000 --- a/oshmem/shmem/man/man3/shmem_longlong_cswap.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/shmem_int_cswap.3 diff --git a/oshmem/shmem/man/man3/shmem_longlong_fadd.3in b/oshmem/shmem/man/man3/shmem_longlong_fadd.3in deleted file mode 100644 index d3bc2141cad..00000000000 --- a/oshmem/shmem/man/man3/shmem_longlong_fadd.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/shmem_int_fadd.3 diff --git a/oshmem/shmem/man/man3/shmem_longlong_fetch.3in b/oshmem/shmem/man/man3/shmem_longlong_fetch.3in deleted file mode 100644 index 7213c75e538..00000000000 --- a/oshmem/shmem/man/man3/shmem_longlong_fetch.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/shmem_int_fetch.3 diff --git a/oshmem/shmem/man/man3/shmem_longlong_finc.3in b/oshmem/shmem/man/man3/shmem_longlong_finc.3in deleted file mode 100644 index fc800da8ff8..00000000000 --- a/oshmem/shmem/man/man3/shmem_longlong_finc.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/shmem_int_finc.3 diff --git a/oshmem/shmem/man/man3/shmem_longlong_g.3in b/oshmem/shmem/man/man3/shmem_longlong_g.3in deleted file mode 100644 index cd9ba27b1bc..00000000000 --- a/oshmem/shmem/man/man3/shmem_longlong_g.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/shmem_char_g.3 diff --git a/oshmem/shmem/man/man3/shmem_longlong_get.3in b/oshmem/shmem/man/man3/shmem_longlong_get.3in deleted file mode 100644 index 7cecf74d7ad..00000000000 --- a/oshmem/shmem/man/man3/shmem_longlong_get.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/shmem_char_get.3 diff --git a/oshmem/shmem/man/man3/shmem_longlong_get_nbi.3in b/oshmem/shmem/man/man3/shmem_longlong_get_nbi.3in deleted file mode 100644 index 50449e90a6d..00000000000 --- a/oshmem/shmem/man/man3/shmem_longlong_get_nbi.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/shmem_getmem_nbi.3 diff --git a/oshmem/shmem/man/man3/shmem_longlong_iget.3in b/oshmem/shmem/man/man3/shmem_longlong_iget.3in deleted file mode 100644 index df8770591af..00000000000 --- a/oshmem/shmem/man/man3/shmem_longlong_iget.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/shmem_short_iget.3 diff --git a/oshmem/shmem/man/man3/shmem_longlong_inc.3in b/oshmem/shmem/man/man3/shmem_longlong_inc.3in deleted file mode 100644 index 96ae7ddd34c..00000000000 --- a/oshmem/shmem/man/man3/shmem_longlong_inc.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/shmem_int_inc.3 diff --git a/oshmem/shmem/man/man3/shmem_longlong_iput.3in b/oshmem/shmem/man/man3/shmem_longlong_iput.3in deleted file mode 100644 index 2fdf7a97e3d..00000000000 --- a/oshmem/shmem/man/man3/shmem_longlong_iput.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/shmem_short_iput.3 diff --git a/oshmem/shmem/man/man3/shmem_longlong_max_to_all.3in b/oshmem/shmem/man/man3/shmem_longlong_max_to_all.3in deleted file mode 100644 index 3e41c814889..00000000000 --- a/oshmem/shmem/man/man3/shmem_longlong_max_to_all.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/shmem_short_max_to_all.3 diff --git a/oshmem/shmem/man/man3/shmem_longlong_min_to_all.3in b/oshmem/shmem/man/man3/shmem_longlong_min_to_all.3in deleted file mode 100644 index d688221529b..00000000000 --- a/oshmem/shmem/man/man3/shmem_longlong_min_to_all.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/shmem_short_min_to_all.3 diff --git a/oshmem/shmem/man/man3/shmem_longlong_or_to_all.3in b/oshmem/shmem/man/man3/shmem_longlong_or_to_all.3in deleted file mode 100644 index 17b3fe22e3c..00000000000 --- a/oshmem/shmem/man/man3/shmem_longlong_or_to_all.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/shmem_short_or_to_all.3 diff --git a/oshmem/shmem/man/man3/shmem_longlong_p.3in b/oshmem/shmem/man/man3/shmem_longlong_p.3in deleted file mode 100644 index 42b9fd0e81f..00000000000 --- a/oshmem/shmem/man/man3/shmem_longlong_p.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/shmem_char_p.3 diff --git a/oshmem/shmem/man/man3/shmem_longlong_prod_to_all.3in b/oshmem/shmem/man/man3/shmem_longlong_prod_to_all.3in deleted file mode 100644 index 39b196d0820..00000000000 --- a/oshmem/shmem/man/man3/shmem_longlong_prod_to_all.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/shmem_short_prod_to_all.3 diff --git a/oshmem/shmem/man/man3/shmem_longlong_put.3in b/oshmem/shmem/man/man3/shmem_longlong_put.3in deleted file mode 100644 index 9c7b2e25452..00000000000 --- a/oshmem/shmem/man/man3/shmem_longlong_put.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/shmem_char_put.3 diff --git a/oshmem/shmem/man/man3/shmem_longlong_put_nbi.3in b/oshmem/shmem/man/man3/shmem_longlong_put_nbi.3in deleted file mode 100644 index fb4ad1413b0..00000000000 --- a/oshmem/shmem/man/man3/shmem_longlong_put_nbi.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/shmem_putmem_nbi.3 diff --git a/oshmem/shmem/man/man3/shmem_longlong_set.3in b/oshmem/shmem/man/man3/shmem_longlong_set.3in deleted file mode 100644 index a02bb7ea5ea..00000000000 --- a/oshmem/shmem/man/man3/shmem_longlong_set.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/shmem_int_set.3 diff --git a/oshmem/shmem/man/man3/shmem_longlong_sum_to_all.3in b/oshmem/shmem/man/man3/shmem_longlong_sum_to_all.3in deleted file mode 100644 index f75a4948417..00000000000 --- a/oshmem/shmem/man/man3/shmem_longlong_sum_to_all.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/shmem_short_sum_to_all.3 diff --git a/oshmem/shmem/man/man3/shmem_longlong_swap.3in b/oshmem/shmem/man/man3/shmem_longlong_swap.3in deleted file mode 100644 index 31324f2c2e3..00000000000 --- a/oshmem/shmem/man/man3/shmem_longlong_swap.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/shmem_swap.3 diff --git a/oshmem/shmem/man/man3/shmem_longlong_wait.3in b/oshmem/shmem/man/man3/shmem_longlong_wait.3in deleted file mode 100644 index 03267ffbc55..00000000000 --- a/oshmem/shmem/man/man3/shmem_longlong_wait.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/shmem_wait.3 diff --git a/oshmem/shmem/man/man3/shmem_longlong_wait_until.3in b/oshmem/shmem/man/man3/shmem_longlong_wait_until.3in deleted file mode 100644 index 03267ffbc55..00000000000 --- a/oshmem/shmem/man/man3/shmem_longlong_wait_until.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/shmem_wait.3 diff --git a/oshmem/shmem/man/man3/shmem_longlong_xor_to_all.3in b/oshmem/shmem/man/man3/shmem_longlong_xor_to_all.3in deleted file mode 100644 index 3a0855e2f47..00000000000 --- a/oshmem/shmem/man/man3/shmem_longlong_xor_to_all.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/shmem_short_xor_to_all.3 diff --git a/oshmem/shmem/man/man3/shmem_malloc.3in b/oshmem/shmem/man/man3/shmem_malloc.3in deleted file mode 100644 index c7491e563e9..00000000000 --- a/oshmem/shmem/man/man3/shmem_malloc.3in +++ /dev/null @@ -1,113 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright (c) 2015 University of Houston. All rights reserved. -.\" Copyright (c) 2015 Mellanox Technologies, Inc. -.\" $COPYRIGHT$ -.de Vb -.ft CW -.nf -.. -.de Ve -.ft R - -.fi -.. -.TH "SHMALLOC" "3" "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME - -\fIshmem_malloc\fP(3), -\fIshmem_free\fP(3), -\fIshmem_align\fP(3), -\fIshmem_realloc\fP(3) -\fIshmalloc\fP(3), -\fIshfree\fP(3), -\fIshmemalign\fP(3), -\fIshrealloc\fP(3) -\- Symmetric heap memory management functions. -.SH SYNOPSIS - -C or C++: -.Vb -#include - -void *shmem_malloc(size_t size); -void *shmalloc(size_t size); - -void shmem_free(void *ptr); -void shfree(void *ptr); - -void *shmem_realloc(void *ptr, size_t size); -void *shrealloc(void *ptr, size_t size); - -void *shmem_align(size_t alignment, size_t size); -void *shmemalign(size_t alignment, size_t size); - -extern long malloc_error; -.Ve -.SH DESCRIPTION - -The \fBshmem_malloc\fP -function returns a pointer to a block of at least size bytes -suitably aligned for any use. This space is allocated from the symmetric heap (in contrast -to \fImalloc\fP(3C), -which allocates from the private heap). -.PP -The \fBshmem_align\fP -function allocates a block in the symmetric heap that has a -byte alignment specified by the alignment argument. -.PP -The \fBshmem_free\fP -function causes the block to which ptr points to, to be deallocated, -that is, made available for further allocation. If ptr is a null pointer, no action -occurs; otherwise, if the argument does not match a pointer earlier returned by a symmetric -heap function, or if the space has already been deallocated, malloc_error is set to indicate the -error, and shfree returns. -.PP -The \fBshmem_realloc\fP -function changes the size of the block to which ptr points to, to the -size (in bytes) specified by size. -.PP -The contents of the block are unchanged up to the lesser of the new and old sizes. If the new -size is larger, the value of the newly allocated portion of the block is indeterminate. If ptr is a -null pointer, the shrealloc function behaves like the shmalloc function for the specified size. If -size is 0 and ptr is not a null pointer, the block to which it points to is freed. Otherwise, if ptr -does not match a pointer earlier returned by a symmetric heap function, or if the space has -already been deallocated, the malloc_error variable is set to indicate the error, and shrealloc -returns a null pointer. If the space cannot be allocated, the block to which ptr points to is -unchanged. -.PP -The shmem_malloc, shmem_free, and shmem_realloc functions are provided so that multiple PEs in an -application can allocate symmetric, remotely accessible memory blocks. These memory -blocks can then be used with (shmem) communication routines. Each of these functions call -the \fIshmem_barrier_all\fP(3) -function before returning; this ensures that all PEs -participate in the memory allocation, and that the memory on other PEs can be used as soon -as the local PE returns. -.PP -The user is responsible for calling these functions with identical argument(s) on all PEs; if -differing size arguments are used, subsequent calls may not return the same symmetric heap -address on all PEs. -.PP -.SH NOTES - -The total size of the symmetric heap is determined at job startup. One can adjust the size of -the heap using the SHMEM_SYMMETRIC_HEAP_SIZE environment variable. See the -\fIintro_shmem\fP(3) -man page for futher details. -The shmem_malloc, shmem_free, and shmem_realloc functions differ from the private heap allocation functions -in that all PEs in an application must call them (a barrier is used to ensure this). -.PP -.SH RETURN VALUES - -The \fBshmem_malloc\fP -function returns a pointer to the allocated space (which should -be identical on all PEs); otherwise, it returns a null pointer (with malloc_error set). -The \fBshmem_free\fP -function returns no value. -The \fBshmem_realloc\fP -function returns a pointer to the allocated space (which -may have moved); otherwise, it returns a null pointer (with malloc_error set). -.SH SEE ALSO - -\fIintro_shmem\fP(3), -\fIshmem_my_pe\fP(3I), -\fIshmem_init\fP(3) diff --git a/oshmem/shmem/man/man3/shmem_my_pe.3in b/oshmem/shmem/man/man3/shmem_my_pe.3in deleted file mode 100644 index cd92e13a688..00000000000 --- a/oshmem/shmem/man/man3/shmem_my_pe.3in +++ /dev/null @@ -1,41 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright (c) 2015 University of Houston. All rights reserved. -.\" Copyright (c) 2015 Mellanox Technologies, Inc. -.\" $COPYRIGHT$ -.de Vb -.ft CW -.nf -.. -.de Ve -.ft R - -.fi -.. -.TH "SHMEM\\_MY\\_PE" "3" "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME - -shmem_my_pe, my_pe, _my_pe \- Returns the virtual PE number of the calling PE. -.SH SYNOPSIS - -C or C++: -.Vb -#include -int shmem_my_pe (void); -int my_pe (void); -.Ve -Fortran: -.Vb -include 'mpp/shmem.fh' -I = SHMEM_MY_PE () -I = MY_PE () -.Ve -.SH DESCRIPTION - -my_pe() or shmem_my_pe() return the processing element (PE) number of the calling PE. It accepts no -arguments. The result is an integer between 0 and npes \- 1, where npes is the total -number of PEs executing the current program. -.SH SEE ALSO - -\fIintro_shmem\fP(3), -\fIshmem_n_pes\fP(3), -\fIshmem_init\fP(3) diff --git a/oshmem/shmem/man/man3/shmem_n_pes.3in b/oshmem/shmem/man/man3/shmem_n_pes.3in deleted file mode 100644 index 83d7c309125..00000000000 --- a/oshmem/shmem/man/man3/shmem_n_pes.3in +++ /dev/null @@ -1,39 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright (c) 2015 University of Houston. All rights reserved. -.\" Copyright (c) 2015 Mellanox Technologies, Inc. -.\" $COPYRIGHT$ -.de Vb -.ft CW -.nf -.. -.de Ve -.ft R - -.fi -.. -.TH "SHMEM\\_N\\_PES" "3" "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME - -num_pes, _num_pes, shmem_n_pes \- Returns the number of processing elements (PEs) used to run the application. -.SH SYNOPSIS - -C or C++: -.Vb -#include -int _num_pes (void); -int shmem_n_pes (void); -.Ve -Fortran: -.Vb -include 'mpp/shmem.fh' -I = NUM_PES () -I = SHMEM_N_PES () -.Ve -.SH DESCRIPTION - -num_pes() or shmem_n_pes() return the total number of PEs running in an application. -.SH SEE ALSO - -\fIintro_shmem\fP(3), -\fIshmem_my_pe\fP(3), -\fIshmem_init\fP(3) diff --git a/oshmem/shmem/man/man3/shmem_pe_accessible.3in b/oshmem/shmem/man/man3/shmem_pe_accessible.3in deleted file mode 100644 index 66b38b9224b..00000000000 --- a/oshmem/shmem/man/man3/shmem_pe_accessible.3in +++ /dev/null @@ -1,54 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright (c) 2015 University of Houston. All rights reserved. -.\" Copyright (c) 2015 Mellanox Technologies, Inc. -.\" $COPYRIGHT$ -.de Vb -.ft CW -.nf -.. -.de Ve -.ft R - -.fi -.. -.TH "SHMEM\\_PE\\_ACCESSIBLE" "3" "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME - -shmem_pe_accessible \- Determines whether a processing element (PE) is accessible via -SHMEM data transfer operations. -.SH SYNOPSIS - -C: -.Vb -#include - -int shmem_pe_accessible(int pe); -.Ve -Fortran: -.Vb -INCLUDE "mpp/shmem.fh" - -LOGICAL LOG, SHMEM_PE_ACCESSIBLE -INTEGER pe - -LOG = SHMEM_PE_ACCESSIBLE(pe) -.Ve -.SH DESCRIPTION - -shmem_pe_accessible returns a value that indicates whether the calling PE is able to perform -OpenSHMEM communication operations with the remote PE. -.SH RETURN VALUES - -.TP -C/C++ -The return value is 1 if the specified PE is a valid remote PE for SHMEM functions; -otherwise,it is 0. -.TP -Fortran -The return value is \&.TRUE. if the specified PE is a valid remote PE for SHMEM -functions; otherwise, it is \&.FALSE.. -.PP -.SH SEE ALSO - -\fIintro_shmem\fP(3), -\fIshmem_addr_accessible\fP(3) diff --git a/oshmem/shmem/man/man3/shmem_ptr.3in b/oshmem/shmem/man/man3/shmem_ptr.3in deleted file mode 100644 index 6c6240ef110..00000000000 --- a/oshmem/shmem/man/man3/shmem_ptr.3in +++ /dev/null @@ -1,129 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright (c) 2015 University of Houston. All rights reserved. -.\" Copyright (c) 2015 Mellanox Technologies, Inc. -.\" $COPYRIGHT$ -.de Vb -.ft CW -.nf -.. -.de Ve -.ft R - -.fi -.. -.TH "SHMEM\\_PTR" "3" "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME - -\fIshmem_ptr\fP(3) -\- Returns a pointer to a data object on a specified processing element -(PE). -.SH SYNOPSIS - -C or C++: -.Vb -#include - -void *shmem_ptr(const void *target, int pe); -.Ve -Fortran: -.Vb -INCLUDE "mpp/shmem.fh" - -POINTER (PTR, POINTEE) -INTEGER pe - -PTR = SHMEM_PTR(target, pe) -.Ve -.SH DESCRIPTION - -The shmem_ptr routine returns an address that can be used to directly reference -\fBtarget\fP -on the remote PE \fBpe\fP\&. -With this address we can perform ordinary loads -and stores to the remote address. -.PP -When a sequence of loads (gets) and stores (puts) to a data object on a remote PE does not -match the access pattern provided in a SHMEM data transfer routine like -\fIshmem_put32\fP(3) -or \fIshmem_real_iget\fP(3), -the shmem_ptr function can -provide an efficient means to accomplish the communication. -.PP -The arguments are as follows: -.TP -target -The symmetric data object to be referenced. -.TP -pe -An integer that indicates the PE number on which target is to be accessed. If you -are using Fortran, it must be a default integer value. -.PP -.SH EXAMPLES - -This Fortran program calls shmem_ptr and then PE 0 writes to the BIGD array on PE 1: -.Vb -PROGRAM REMOTEWRITE - INCLUDE 'mpp/shmem.fh' - - INTEGER BIGD(100) - SAVE BIGD - INTEGER POINTEE(*) - - POINTER (PTR,POINTEE) - CALL START_PES(0) - IF (MY_PE() .EQ. 0) THEN - ! initialize PE 1's BIGD array - PTR = SHMEM_PTR(BIGD, 1) ! get address of PE 1's BIGD - ! array - DO I=1,100 - POINTEE(I) = I - ENDDO - ENDIF - CALL SHMEM_BARRIER_ALL - IF (MY_PE() .EQ. 1) THEN - PRINT *, 'BIGD on PE 1 is: ' - PRINT *, BIGD - ENDIF -END -.Ve -This is the equivalent program written in C: -.Vb -#include -main() -{ - static int bigd[100]; - int *ptr; - int i; - - shmem_init(); - if (shmem_my_pe() == 0) { - /* initialize PE 1's bigd array */ - ptr = shmem_ptr(bigd, 1); - for (i=0; i<100; i++) - *ptr++ = i+1; - } - shmem_barrier_all(); - if (shmem_my_pe() == 1) { - printf("bigd on PE 1 is:\\n"); - for (i=0; i<100; i++) - printf(" %d\\n",bigd[i]); - printf("\\n"); - } -} -.Ve -.SH NOTES - -The shmem_ptr function is available only on systems where ordinary memory loads and -stores are used to implement SHMEM put and get operations. -.PP -.SH RETURN VALUES - -shmem_ptr returns a pointer to the data object on the specified remote PE. If target is not -remotely accessible, a NULL pointer is returned. -.PP -.SH SEE ALSO - -\fIintro_shmem\fP(3), -\fIshmem_put\fP(3), -\fIshmem_get\fP(3) -.PP diff --git a/oshmem/shmem/man/man3/shmem_put128.3in b/oshmem/shmem/man/man3/shmem_put128.3in deleted file mode 100644 index 9c7b2e25452..00000000000 --- a/oshmem/shmem/man/man3/shmem_put128.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/shmem_char_put.3 diff --git a/oshmem/shmem/man/man3/shmem_put128_nbi.3in b/oshmem/shmem/man/man3/shmem_put128_nbi.3in deleted file mode 100644 index fb4ad1413b0..00000000000 --- a/oshmem/shmem/man/man3/shmem_put128_nbi.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/shmem_putmem_nbi.3 diff --git a/oshmem/shmem/man/man3/shmem_put16_nbi.3in b/oshmem/shmem/man/man3/shmem_put16_nbi.3in deleted file mode 100644 index fb4ad1413b0..00000000000 --- a/oshmem/shmem/man/man3/shmem_put16_nbi.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/shmem_putmem_nbi.3 diff --git a/oshmem/shmem/man/man3/shmem_put32.3in b/oshmem/shmem/man/man3/shmem_put32.3in deleted file mode 100644 index 9c7b2e25452..00000000000 --- a/oshmem/shmem/man/man3/shmem_put32.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/shmem_char_put.3 diff --git a/oshmem/shmem/man/man3/shmem_put32_nbi.3in b/oshmem/shmem/man/man3/shmem_put32_nbi.3in deleted file mode 100644 index fb4ad1413b0..00000000000 --- a/oshmem/shmem/man/man3/shmem_put32_nbi.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/shmem_putmem_nbi.3 diff --git a/oshmem/shmem/man/man3/shmem_put64.3in b/oshmem/shmem/man/man3/shmem_put64.3in deleted file mode 100644 index 9c7b2e25452..00000000000 --- a/oshmem/shmem/man/man3/shmem_put64.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/shmem_char_put.3 diff --git a/oshmem/shmem/man/man3/shmem_put64_nbi.3in b/oshmem/shmem/man/man3/shmem_put64_nbi.3in deleted file mode 100644 index fb4ad1413b0..00000000000 --- a/oshmem/shmem/man/man3/shmem_put64_nbi.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/shmem_putmem_nbi.3 diff --git a/oshmem/shmem/man/man3/shmem_put8_nbi.3in b/oshmem/shmem/man/man3/shmem_put8_nbi.3in deleted file mode 100644 index fb4ad1413b0..00000000000 --- a/oshmem/shmem/man/man3/shmem_put8_nbi.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/shmem_putmem_nbi.3 diff --git a/oshmem/shmem/man/man3/shmem_putmem.3in b/oshmem/shmem/man/man3/shmem_putmem.3in deleted file mode 100644 index 9c7b2e25452..00000000000 --- a/oshmem/shmem/man/man3/shmem_putmem.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/shmem_char_put.3 diff --git a/oshmem/shmem/man/man3/shmem_putmem_nbi.3in b/oshmem/shmem/man/man3/shmem_putmem_nbi.3in deleted file mode 100644 index 4a2e7db3eef..00000000000 --- a/oshmem/shmem/man/man3/shmem_putmem_nbi.3in +++ /dev/null @@ -1,171 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright (c) 2016 Mellanox Technologies, Inc. -.\" $COPYRIGHT$ -.de Vb -.ft CW -.nf -.. -.de Ve -.ft R - -.fi -.. -.TH "SHMEM\\_PUT\\_NBI" "3" "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME - -\fIshmem_putmem_nbi\fP(3), -\fIshmem_char_put_nbi\fP(3), -\fIshmem_short_put_nbi\fP(3), -\fIshmem_int_put_nbi\fP(3), -\fIshmem_long_put_nbi\fP(3), -\fIshmem_longlong_put_nbi\fP(3), -\fIshmem_float_put_nbi\fP(3), -\fIshmem_double_put_nbi\fP(3), -\fIshmem_longdouble_put_nbi\fP(3), -\fIshmem_put8_nbi\fP(3), -\fIshmem_put16_nbi\fP(3), -\fIshmem_put32_nbi\fP(3), -\fIshmem_put64_nbi\fP(3), -\fIshmem_put128_nbi\fP(3), -\- The nonblocking put routines provide a method for copying data from a contiguous local data object to a data object on a specified PE. -.SH SYNOPSIS - -C or C++: -.Vb -#include - -void shmem_putmem_nbi(void *dest, const void *source, - size_t nelems, int pe); - -void shmem_char_put(char *dest, const char *source, - size_t nelems, int pe); - -void shmem_short_put(short *dest, const short *source, - size_t nelems, int pe); - -void shmem_int_put(int *dest, const int *source, - size_t nelems, int pe); - -void shmem_long_put(long *dest, const long *source, - size_t nelems, int pe); - -void shmem_longlong_put(long long *dest, const long long *source, - size_t nelems, int pe); - -void shmem_float_put(float *dest, const float *source, - size_t nelems, int pe); - -void shmem_double_put(double *dest, const double *source, - size_t nelems, int pe); - -void shmem_longdouble_put(long double *dest, const long double *source, - size_t nelems, int pe); - -void shmem_put8(void *dest, const void *source, - size_t nelems, int pe); - -void shmem_put16(void *dest, const void *source, - size_t nelems, int pe); - -void shmem_put32(void *dest, const void *source, - size_t nelems, int pe); - -void shmem_put64(void *dest, const void *source, - size_t nelems, int pe); - -void shmem_put128(void *dest, const void *source, - size_t nelems, int pe); - -.Ve -Fortran: -.Vb -INCLUDE "mpp/shmem.fh" - -INTEGER nelems, pe - -CALL SHMEM_PUTMEM_NBI(dest, source, nelems, pe) - -CALL SHMEM_CHARACTER_PUT_NBI(dest, source, nelems, pe) - -CALL SHMEM_COMPLEX_PUT_NBI(dest, source, nelems, pe) - -CALL SHMEM_DOUBLE_PUT_NBI(dest, source, nelems, pe) - -CALL SHMEM_INTEGER_PUT_NBI(dest, source, nelems, pe) - -CALL SHMEM_LOGICAL_PUT_NBI(dest, source, nelems, pe) - -CALL SHMEM_REAL_PUT_NBI(dest, source, nelems, pe) - -CALL SHMEM_PUT4_NBI(dest, source, nelems, pe) - -CALL SHMEM_PUT8_NBI(dest, source, nelems, pe) - -CALL SHMEM_PUT32_NBI(dest, source, nelems, pe) - -CALL SHMEM_PUT64_NBI(dest, source, nelems, pe) - -CALL SHMEM_PUT128_NBI(dest, source, nelems, pe) - -.Ve -.SH DESCRIPTION - -The routines return after posting the operation. The operation is considered -complete after a subsequent call to shmem_quiet. At the completion of shmem_quiet, -the data has been copied into the dest array on the destination PE. -The delivery of data words into the data object on the destination PE may occur -in any order. Furthermore, two successive put routines may deliver data out of -order unless a call to shmem_fence is introduced between the two calls. -.PP -The arguments are as follows: -.TP -dest -Data object to be updated on the remote PE. This data object must be -remotely accessible. -.TP -source -Data object containing the data to be copied. -.TP -nelems -Number of elements in the dest and source arrays. nelems must be -of type size_t for C. If you are using Fortran, it must be a constant, -variable, or array element of default integer type. -.TP -pe -PE number of the remote PE. pe must be of type integer. If you are using Fortran, it -must be a constant, variable, or array element of default integer type. -.PP -If you are using Fortran, data types must be of default size. For example, a real variable must -be declared as REAL, REAL*4, or REAL(KIND=4). -.SH NOTES - -See \fIintro_shmem\fP(3) -for a definition of the term remotely accessible. -.SH EXAMPLES - -Consider this simple example for C. -.Vb -#include -#include - -main() -{ - long source[10] = { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 }; - static long target[10]; - shmem_init(); - - if (shmem_my_pe() == 0) { - /* put 10 words into target on PE 1 */ - shmem_long_put_nbi(target, source, 10, 1); - shmem_quiet(); - } - shmem_barrier_all(); /* sync sender and receiver */ - if (shmem_my_pe() == 1) - shmem_udcflush(); /* not required on Altix systems */ - printf("target[0] on PE %d is %d\\n", shmem_my_pe(), target[0]); -} -.Ve -.SH SEE ALSO - -\fIintro_shmem\fP(3), -\fIshmem_quiet\fP(3) diff --git a/oshmem/shmem/man/man3/shmem_quiet.3in b/oshmem/shmem/man/man3/shmem_quiet.3in deleted file mode 100644 index cd92ae17944..00000000000 --- a/oshmem/shmem/man/man3/shmem_quiet.3in +++ /dev/null @@ -1,84 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright (c) 2015 University of Houston. All rights reserved. -.\" Copyright (c) 2015 Mellanox Technologies, Inc. -.\" $COPYRIGHT$ -.de Vb -.ft CW -.nf -.. -.de Ve -.ft R - -.fi -.. -.TH "SHMEM\\_QUIET" "3" "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME - -\fIshmem_quiet\fP(3) -\- Waits for completion of all outstanding remote writes issued by a -processing element (PE). -.SH SYNOPSIS - -C or C++: -.Vb -#include - -void shmem_quiet(void); -.Ve -Fortran: -.Vb -CALL SHMEM_QUIET -.Ve -.SH DESCRIPTION - -shmem_quiet ensures ordering of put (remote write) operations. All put operations issued to -any processing element (PE) prior to the call to shmem_quiet are guaranteed to be visible to -all other PEs no later than any subsequent memory load or store, remote put or get, or -synchronization operations that follow the call to shmem_quiet. -.SH NOTES - -shmem_quiet is most useful as a way of ensuring ordering of delivery of several put -operations. For example, you might use shmem_quiet to await delivery of a block of data -before issuing another put, which sets a completion flag on another PE. -.br -shmem_quiet is not usually needed if \fIshmem_barrier_all\fP(3) -or -\fIshmem_barrier\fP(3) -are called. The barrier routines all wait for the completion of -outstanding remote writes (puts). -.SH EXAMPLES - -.Vb -PROGRAM COMPFLAG - INCLUDE "mpp/shmem.fh" - - INTEGER FLAG_VAR, ARRAY(100), RECEIVER, SENDER - COMMON/FLAG/FLAG_VAR - COMMON/DATA/ARRAY - INTRINSIC MY_PE - - FLAG_VAR = 0 - CALL SHMEM_BARRIER_ALL ! wait for FLAG_VAR to be initialized - SENDER = 0 ! PE 0 sends the data - RECEIVER = 1 ! PE 1 receives the data - - IF (MY_PE() .EQ. 0) THEN - ARRAY = 33 - CALL SHMEM_PUT(ARRAY, ARRAY, 100, RECEIVER) ! start sending data - CALL SHMEM_QUIET ! wait for delivery - CALL SHMEM_PUT(FLAG_VAR, 1, 1, RECEIVER) ! send completion flag - ELSE IF (MY_PE() .EQ. RECEIVER) THEN - CALL SHMEM_UDCFLUSH - CALL SHMEM_WAIT(FLAG_VAR, 0) - PRINT *,ARRAY ! ARRAY has been delivered - ENDIF -END -.Ve -.SH SEE ALSO - -\fIintro_shmem\fP(3), -\fIshmem_barrier\fP(3), -\fIshmem_barrier_all\fP(3), -\fIshmem_fence\fP(3), -\fIshmem_put\fP(3), -\fIshmem_wait\fP(3) diff --git a/oshmem/shmem/man/man3/shmem_realloc.3in b/oshmem/shmem/man/man3/shmem_realloc.3in deleted file mode 100644 index 6cdb8014e9e..00000000000 --- a/oshmem/shmem/man/man3/shmem_realloc.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/shmem_malloc.3 diff --git a/oshmem/shmem/man/man3/shmem_set_cache_inv.3in b/oshmem/shmem/man/man3/shmem_set_cache_inv.3in deleted file mode 100644 index 4a6a361ef97..00000000000 --- a/oshmem/shmem/man/man3/shmem_set_cache_inv.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/shmem_udcflush.3 diff --git a/oshmem/shmem/man/man3/shmem_set_cache_line_inv.3in b/oshmem/shmem/man/man3/shmem_set_cache_line_inv.3in deleted file mode 100644 index 4a6a361ef97..00000000000 --- a/oshmem/shmem/man/man3/shmem_set_cache_line_inv.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/shmem_udcflush.3 diff --git a/oshmem/shmem/man/man3/shmem_set_lock.3in b/oshmem/shmem/man/man3/shmem_set_lock.3in deleted file mode 100644 index d3b57361ea3..00000000000 --- a/oshmem/shmem/man/man3/shmem_set_lock.3in +++ /dev/null @@ -1,78 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright (c) 2015 University of Houston. All rights reserved. -.\" Copyright (c) 2015 Mellanox Technologies, Inc. -.\" $COPYRIGHT$ -.de Vb -.ft CW -.nf -.. -.de Ve -.ft R - -.fi -.. -.TH "SHMEM\\_LOCK" "3" "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME - -\fIshmem_set_lock\fP(3), -\fIshmem_clear_lock\fP(3), -\fIshmem_test_lock\fP(3) -\- Releases, locks, and tests a mutual exclusion memory lock. -.SH SYNOPSIS - -C or C++: -.Vb -#include - -void shmem_clear_lock(volatile long *lock); - -void shmem_set_lock(volatile long *lock); - -int shmem_test_lock(volatile long *lock); -.Ve -Fortran: -.Vb -INCLUDE "mpp/shmem.fh" - -INTEGER lock, SHMEM_TEST_LOCK - -CALL SHMEM_CLEAR_LOCK(lock) - -CALL SHMEM_SET_LOCK(lock) - -I = SHMEM_TEST_LOCK(lock) -.Ve -.SH DESCRIPTION - -The shmem_set_lock routine sets a mutual exclusion lock after waiting for the lock to be -freed by any other PE currently holding the lock. Waiting PEs are assured of getting the lock -in a first\-come, first\-served manner. -.PP -The shmem_clear_lock routine releases a lock previously set by shmem_set_lock after -ensuring that all local and remote stores initiated in the critical region are complete. -.PP -The shmem_test_lock function sets a mutual exclusion lock only if it is currently cleared. -By using this function, a PE can avoid blocking on a set lock. If the lock is currently set, the -function returns without waiting. -These routines are appropriate for protecting a critical region from simultaneous update by -multiple PEs. -They accept the following arguments: -.TP -lock -A symmetric data object that is a scalar variable or an array of length 1. This -data object must be set to 0 on all processing elements (PEs) prior to the first use. lock must -be of type integer. If you are using Fortran, it must be of default kind. -.PP -.SH NOTES - -The term symmetric data object is defined on \fIintro_shmem\fP(3)\&. -.PP -.SH RETURN VALUES - -The shmem_test_lock function returns 0 if the lock was originally cleared and this call -was able to set the lock. A value of 1 is returned if the lock had been set and the call returned -without waiting to set the lock. -.PP -.SH SEE ALSO - -\fIintro_shmem\fP(3) diff --git a/oshmem/shmem/man/man3/shmem_short_and_to_all.3in b/oshmem/shmem/man/man3/shmem_short_and_to_all.3in deleted file mode 100644 index 065ee57fdc3..00000000000 --- a/oshmem/shmem/man/man3/shmem_short_and_to_all.3in +++ /dev/null @@ -1,206 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright (c) 2015 University of Houston. All rights reserved. -.\" Copyright (c) 2014-2016 Mellanox Technologies, Inc. -.\" $COPYRIGHT$ -.de Vb -.ft CW -.nf -.. -.de Ve -.ft R - -.fi -.. -.TH "SHMEM\\_AND" "3" "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME - -\fIshmem_int_and_to_all\fP(3), -\fIshmem_int4_and_to_all\fP(3), -\fIshmem_int8_and_to_all\fP(3), -\fIshmem_long_and_to_all\fP(3), -\fIshmem_longlong_and_to_all\fP(3), -\fIshmem_short_and_to_all\fP(3) -\- Performs a bitwise AND operation on symmetric -arrays over the active set of PEs. -.SH SYNOPSIS - -C or C++: -.Vb -#include - -void shmem_int_and_to_all(int *target, const int *source, - int nreduce, int PE_start, int logPE_stride, int PE_size, - int *pWrk, long *pSync); - -void shmem_long_and_to_all(long *target, const long *source, - int nreduce, int PE_start, int logPE_stride, int PE_size, - long *pWrk, long *pSync); - -void shmem_longlong_and_to_all(long long *target, - const long long *source, int nreduce, int PE_start, int logPE_stride, - int PE_size, long long *pWrk, long *pSync); - -void shmem_short_and_to_all(short *target, const short *source, - int nreduce, int PE_start, int logPE_stride, int PE_size, - short *pWrk, long *pSync); - -.Ve -Fortran: -.Vb -INCLUDE "mpp/shmem.fh" - -INTEGER pSync(SHMEM_REDUCE_SYNC_SIZE) -INTEGER nreduce, PE_start, logPE_stride, PE_size - -CALL SHMEM_INT4_AND_TO_ALL(target, source, nreduce, -& PE_start, logPE_stride, PE_size, pWrk, pSync) - -CALL SHMEM_INT8_AND_TO_ALL(target, source, nreduce, -& PE_start, logPE_stride, PE_size, pWrk, pSync) -.Ve -.PP -.SH DESCRIPTION - -The shared memory (SHMEM) reduction routines compute one or more reductions across -symmetric arrays on multiple virtual PEs. A reduction performs an associative binary -operation across a set of values. For a list of other SHMEM reduction routines, see -\fIintro_shmem\fP(3)\&. -.PP -The nreduce argument determines the number of separate reductions to perform. The source -array on all PEs in the active set provides one element for each reduction. The results of the -reductions are placed in the target array on all PEs in the active set. The active set is defined -by the PE_start, logPE_stride, PE_size triplet. -.PP -The source and target arrays may be the same array, but they may not be overlapping arrays. -As with all SHMEM collective routines, each of these routines assumes that only PEs in the -active set call the routine. If a PE not in the active set calls a SHMEM collective routine, -undefined behavior results. -.PP -The arguments are as follows: -.TP -target -A symmetric array, of length nreduce elements, to receive the result of the -reduction operations. The data type of target varies with the version of the reduction routine -being called. When calling from C/C++, refer to the SYNOPSIS section for data type -information. When calling from Fortran, the target data types are as follows: -.RS -.TP -\fBshmem_int8_and_to_all\fP: Integer, with an element size of 8 bytes -.TP -\fBshmem_int4_and_to_all\fP: Integer, with an element size of 4 bytes -.RE -.RS -.PP -.RE -.TP -source -A symmetric array, of length nreduce elements, that contains one element for -each separate reduction operation. The source argument must have the same data type as -target. -.TP -nreduce -The number of elements in the target and source arrays. nreduce must be of -type integer. If you are using Fortran, it must be a default integer value. -.TP -PE_start -The lowest virtual PE number of the active set of PEs. PE_start must be of -type integer. If you are using Fortran, it must be a default integer value. -.TP -logPE_stride -The log (base 2) of the stride between consecutive virtual PE numbers in -the active set. logPE_stride must be of type integer. If you are using Fortran, it must be a -default integer value. -.TP -PE_size -The number of PEs in the active set. PE_size must be of type integer. If you -are using Fortran, it must be a default integer value. -.TP -pWrk -A symmetric work array. The pWrk argument must have the same data type as -target. In C/C++, this contains max(nreduce/2 + 1, -_SHMEM_REDUCE_MIN_WRKDATA_SIZE) elements. In Fortran, this contains -max(nreduce/2 + 1, SHMEM_REDUCE_MIN_WRKDATA_SIZE) elements. -.TP -pSync -A symmetric work array. In C/C++, pSync must be of type long and size -_SHMEM_REDUCE_SYNC_SIZE. In Fortran, pSync must be of type integer and size -SHMEM_REDUCE_SYNC_SIZE. If you are using Fortran, it must be a default integer value. -Every element of this array must be initialized with the value _SHMEM_SYNC_VALUE (in -C/C++) or SHMEM_SYNC_VALUE (in Fortran) before any of the PEs in the active set enter -the reduction routine. -.PP -The values of arguments nreduce, PE_start, logPE_stride, and PE_size must be equal on all -PEs in the active set. The same target and source arrays, and the same pWrk and pSync work -arrays, must be passed to all PEs in the active set. -.PP -Before any PE calls a reduction routine, you must ensure that the following conditions exist -(synchronization via a barrier or some other method is often needed to ensure this): The -pWrk and pSync arrays on all PEs in the active set are not still in use from a prior call to a -collective SHMEM routine. The target array on all PEs in the active set is ready to accept the -results of the reduction. -.PP -Upon return from a reduction routine, the following are true for the local PE: The target array -is updated. The values in the pSync array are restored to the original values. -.PP -.SH NOTES - -The terms collective, symmetric, and cache aligned are defined in \fIintro_shmem\fP(3)\&. -All SHMEM reduction routines reset the values in pSync before they return, so a particular -pSync buffer need only be initialized the first time it is used. -.PP -You must ensure that the pSync array is not being updated on any PE in the active set while -any of the PEs participate in processing of a SHMEM reduction routine. Be careful to avoid the -following situations: If the pSync array is initialized at run time, some type of -synchronization is needed to ensure that all PEs in the working set have initialized pSync -before any of them enter a SHMEM routine called with the pSync synchronization array. A -pSync or pWrk array can be reused in a subsequent reduction routine call only if none of the -PEs in the active set are still processing a prior reduction routine call that used the same -pSync or pWrk arrays. In general, this can be assured only by doing some type of -synchronization. However, in the special case of reduction routines being called with the -same active set, you can allocate two pSync and pWrk arrays and alternate between them on -successive calls. -.PP -.SH EXAMPLES - -\fBExample 1\fP: -This Fortran example statically initializes the pSync array and finds the logical AND of the integer variable FOO across all even PEs. -.Vb -INCLUDE "mpp/shmem.fh" - -INTEGER PSYNC(SHMEM_REDUCE_SYNC_SIZE) -DATA PSYNC /SHMEM_REDUCE_SYNC_SIZE*SHMEM_SYNC_VALUE/ -PARAMETER (NR=1) -REAL PWRK(MAX(NR/2+1, SHMEM_REDUCE_MIN_WRKDATA_SIZE)) -INTEGER FOO, FOOAND -COMMON /COM/ FOO, FOOAND, PWRK -INTRINSIC MY_PE - -IF ( MOD(MY_PE(),2) .EQ. 0) THEN - CALL SHMEM_INT8_AND_TO_ALL(FOOAND, FOO, NR, 0, 1, N$PES/2, - & PWRK, PSYNC) - PRINT *, 'Result on PE ', MY_PE(), ' is ', FOOAND -ENDIF -.Ve -\fBExample 2\fP: -Consider the following C call: -.Vb -shmem_int_and_to_all( target, source, 3, 0, 0, 8, pwrk, psync ); -.Ve -The preceding call is more efficient, but semantically equivalent to, the combination of the -following calls: -.Vb -shmem_int_and_to_all(&(target[0]), &(source[0]), 1, 0, 0, 8, - pwrk1, psync1); - -shmem_int_and_to_all(&(target[1]), &(source[1]), 1, 0, 0, 8, - pwrk2, psync2); - -shmem_int_and_to_all(&(target[2]), &(source[2]), 1, 0, 0, 8, - pwrk1, psync1); -.Ve -Note that two sets of pWrk and pSync arrays are used alternately because no -synchronization is done between calls. -.SH SEE ALSO - -\fIf90\fP(1), -\fIintro_shmem\fP(3) diff --git a/oshmem/shmem/man/man3/shmem_short_g.3in b/oshmem/shmem/man/man3/shmem_short_g.3in deleted file mode 100644 index cd9ba27b1bc..00000000000 --- a/oshmem/shmem/man/man3/shmem_short_g.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/shmem_char_g.3 diff --git a/oshmem/shmem/man/man3/shmem_short_get.3in b/oshmem/shmem/man/man3/shmem_short_get.3in deleted file mode 100644 index 7cecf74d7ad..00000000000 --- a/oshmem/shmem/man/man3/shmem_short_get.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/shmem_char_get.3 diff --git a/oshmem/shmem/man/man3/shmem_short_get_nbi.3in b/oshmem/shmem/man/man3/shmem_short_get_nbi.3in deleted file mode 100644 index 50449e90a6d..00000000000 --- a/oshmem/shmem/man/man3/shmem_short_get_nbi.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/shmem_getmem_nbi.3 diff --git a/oshmem/shmem/man/man3/shmem_short_iget.3in b/oshmem/shmem/man/man3/shmem_short_iget.3in deleted file mode 100644 index 5538cd707b7..00000000000 --- a/oshmem/shmem/man/man3/shmem_short_iget.3in +++ /dev/null @@ -1,217 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright (c) 2015 University of Houston. All rights reserved. -.\" Copyright (c) 2015 Mellanox Technologies, Inc. -.\" $COPYRIGHT$ -.de Vb -.ft CW -.nf -.. -.de Ve -.ft R - -.fi -.. -.TH "SHMEM\\_IGET" "3" "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME - -\fIshmem_complex_iget\fP(3), -\fIshmem_double_iget\fP(3), -\fIshmem_float_iget\fP(3), -\fIshmem_iget4\fP(3), -\fIshmem_iget8\fP(3), -\fIshmem_iget32\fP(3), -\fIshmem_iget64\fP(3), -\fIshmem_iget128\fP(3), -\fIshmem_int_iget\fP(3), -\fIshmem_integer_iget\fP(3), -\fIshmem_logical_iget\fP(3), -\fIshmem_long_iget\fP(3), -\fIshmem_longdouble_iget\fP(3), -\fIshmem_longlong_iget\fP(3), -\fIshmem_real_iget\fP(3), -\fIshmem_short_iget\fP(3) -\- Transfers strided data from a specified processing element (PE) -.SH SYNOPSIS - -C or C++: -.Vb -#include -void shmem_iget32(void *target, const void *source, - ptrdiff_t tst, ptrdiff_t sst, size_t len, int pe); - -void shmem_iget64(void *target, const void *source, - ptrdiff_t tst, ptrdiff_t sst, size_t len, int pe); - -void shmem_iget128(void *target, const void *source, - ptrdiff_t tst, ptrdiff_t sst, size_t len, int pe); - -void shmem_int_iget(int *target, const int *source, - ptrdiff_t tst, ptrdiff_t sst, size_t len, int pe); - -void shmem_double_iget(double *target, const double *source, - ptrdiff_t tst, ptrdiff_t sst, size_t len, int pe); - -void shmem_float_iget(float *target, const float *source, - ptrdiff_t tst, ptrdiff_t sst, size_t len, int pe); - -void shmem_long_iget(long *target, const long *source, - ptrdiff_t tst, ptrdiff_t sst, size_t len, int pe); - -void shmem_longdouble_iget(long double *target, - const long double *source, ptrdiff_t tst, ptrdiff_t sst,size_t len, int pe); - -void shmem_longlong_iget(long long *target, - const long long *source, ptrdiff_t tst, ptrdiff_t sst, size_t len, int pe); - -void shmem_short_iget(short *target, - const short *source, ptrdiff_t tst, ptrdiff_t sst, size_t len, int pe); -.Ve -Fortran: -.Vb -INCLUDE "mpp/shmem.fh" - -INTEGER tst, sst, len, pe - -CALL SHMEM_COMPLEX_IGET(target, source, tst, sst, len, -& pe) - -CALL SHMEM_DOUBLE_IGET(target, source, tst, sst, len, -& pe) - -CALL SHMEM_IGET4(target, source, tst, sst, len, pe) - -CALL SHMEM_IGET8(target, source, tst, sst, len, pe) - -CALL SHMEM_IGET32(target, source, tst, sst, len, pe) - -CALL SHMEM_IGET64(target, source, tst, sst, len, pe) - -CALL SHMEM_IGET128(target, source, tst, sst, len, pe) - -CALL SHMEM_INTEGER_IGET(target, source, tst, sst, len, -& pe) - -CALL SHMEM_LOGICAL_IGET(target, source, tst, sst, len, -& pe) - -CALL SHMEM_REAL_IGET(target, source, tst, sst, len, pe) -.Ve -.SH DESCRIPTION - -The strided get routines retrieve array data available at address source on remote PE (pe). -The elements of the \fBsource\fP -array are separated by a stride \fBsst\fP\&. -Once the data is received, -it is stored at the local memory address \fBtarget\fP, -separated by stride \fBtst\fP\&. -The routines return -when the data has been copied into the local \fBtarget\fP -array. -.PP -The arguments are as follows: -.TP -target -Array to be updated on the local PE. -.TP -source -Array containing the data to be copied on the remote PE. -.TP -tst -The stride between consecutive elements of the target array. The stride is scaled by -the element size of the target array. A value of 1 indicates contiguous data. tst must be of -type integer. If you are calling from Fortran, it must be a default integer value. -.TP -sst -The stride between consecutive elements of the source array. The stride is scaled -by the element size of the source array. A value of 1 indicates contiguous data. sst must be -of type integer. If you are calling from Fortran, it must be a default integer value. -.TP -len -Number of elements in the target and source arrays. len must be of type integer. If -you are using Fortran, it must be a constant, variable, or array element of default integer -type. -.TP -pe -PE number of the remote PE. pe must be of type integer. If you are using Fortran, it -must be a constant, variable, or array element of default integer type. -.PP -The target and source data objects must conform to typing constraints, which are as -follows: -.TP -\fBshmem_iget32, shmem_iget4\fP: Any noncharacter type that has a storage size -equal to 32 bits. -.TP -\fBshmem_iget64, shmem_iget8\fP: Any noncharacter type that has a storage size -equal to 64 bits. -.TP -\fBshmem_iget128\fP: Any noncharacter type that has a storage size equal to -128 bits. -.TP -\fBshmem_short_iget\fP: Elements of type short. -.TP -\fBshmem_int_iget\fP: Elements of type int. -.TP -\fBshmem_long_iget\fP: Elements of type long. -.TP -\fBshmem_longlong_iget\fP: Elements of type long long. -.TP -\fBshmem_float_iget\fP: Elements of type float. -.TP -\fBshmem_double_iget\fP: Elements of type double. -.TP -\fBshmem_longdouble_iget\fP: Elements of type long double. -.TP -\fBSHMEM_COMPLEX_IGET\fP: Elements of type complex of default size. -.TP -\fBSHMEM_DOUBLE_IGET\fP: (Fortran) Elements of type double precision. -.TP -\fBSHMEM_INTEGER_IGET\fP: Elements of type integer. -.TP -\fBSHMEM_LOGICAL_IGET\fP: Elements of type logical. -.TP -\fBSHMEM_REAL_IGET\fP: Elements of type real. -.TP -\fBshmem_longdouble_iget\fP: Elements of type long double. -.TP -\fBSHMEM_COMPLEX_IGET\fP: Elements of type complex of default size. -.TP -\fBSHMEM_DOUBLE_IGET\fP: (Fortran) Elements of type double precision. -.TP -\fBSHMEM_INTEGER_IGET\fP: Elements of type integer. -.TP -\fBSHMEM_LOGICAL_IGET\fP: Elements of type logical. -.TP -\fBSHMEM_REAL_IGET\fP: Elements of type real. -.PP -If you are using Fortran, data types must be of default size. For example, a real variable must -be declared as REAL, REAL*4, or REAL(KIND=4). -.PP -.SH NOTES - -See \fIintro_shmem\fP(3) -for a definition of the term remotely accessible. -.PP -.SH EXAMPLES - -The following simple example uses shmem_logical_iget in a Fortran program. Compile -this example with the \-lsma compiler option. -.Vb -PROGRAM STRIDELOGICAL - LOGICAL SOURCE(10), TARGET(5) - SAVE SOURCE ! SAVE MAKES IT REMOTELY ACCESSIBLE - DATA SOURCE /.T.,.F.,.T.,.F.,.T.,.F.,.T.,.F.,.T.,.F./ - DATA TARGET / 5*.F. / - - CALL START_PES(2) - IF (MY_PE() .EQ. 0) THEN - CALL SHMEM_LOGICAL_IGET(TARGET, SOURCE, 1, 2, 5, 1) - PRINT*,'TARGET AFTER SHMEM_LOGICAL_IGET:',TARGET - ENDIF - CALL SHMEM_BARRIER_ALL -END -.Ve -.SH SEE ALSO - -\fIintro_shmem\fP(3), -\fIshmem_get\fP(3), -\fIshmem_quiet\fP(3) diff --git a/oshmem/shmem/man/man3/shmem_short_iput.3in b/oshmem/shmem/man/man3/shmem_short_iput.3in deleted file mode 100644 index 103bf2034a0..00000000000 --- a/oshmem/shmem/man/man3/shmem_short_iput.3in +++ /dev/null @@ -1,220 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright (c) 2015 University of Houston. All rights reserved. -.\" Copyright (c) 2015 Mellanox Technologies, Inc. -.\" $COPYRIGHT$ -.de Vb -.ft CW -.nf -.. -.de Ve -.ft R - -.fi -.. -.TH "SHMEM\\_IPUT" "3" "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME - -\fIshmem_complex_iput\fP(3), -\fIshmem_double_iput\fP(3), -\fIshmem_float_iput\fP(3), -\fIshmem_int_iput\fP(3), -\fIshmem_integer_iput\fP(3), -\fIshmem_iput4\fP(3), -\fIshmem_iput8\fP(3), -\fIshmem_iput32\fP(3), -\fIshmem_iput64\fP(3), -\fIshmem_iput128\fP(3), -\fIshmem_logical_iput\fP(3), -\fIshmem_long_iput\fP(3), -\fIshmem_longdouble_iput\fP(3), -\fIshmem_longlong_iput\fP(3), -\fIshmem_real_iput\fP(3), -\fIshmem_short_iput\fP(3) -\- Transfer strided data to a specified processing element (PE). -.SH SYNOPSIS - -C or C++: -.Vb -#include - -void shmem_double_iput(double *target, const double *source, - ptrdiff_t tst, ptrdiff_t sst, size_t len, int pe); - -void shmem_float_iput(float *target, const float *source, - ptrdiff_t tst, ptrdiff_t sst, size_t len, int pe); - -void shmem_int_iput(int *target, const int *source, - ptrdiff_t tst, ptrdiff_t sst, size_t len, int pe); - -void shmem_iput32(void *target, const void *source, - ptrdiff_t tst, ptrdiff_t sst, size_t len, int pe); - -void shmem_iput64(void *target, const void *source, - ptrdiff_t tst, ptrdiff_t sst, size_t len, int pe); - -void shmem_iput128(void *target, const void *source, - ptrdiff_t tst, ptrdiff_t sst, size_t len, int pe); - -void shmem_long_iput(long *target, const long *source, - ptrdiff_t tst, ptrdiff_t sst, size_t len, int pe); - -void shmem_longdouble_iput(long double *target, - const long double *source, ptrdiff_t tst, ptrdiff_t sst, - size_t len, int pe); - -void shmem_longlong_iput(long long *target, - const long long *source, ptrdiff_t tst, ptrdiff_t sst, - size_t len, int pe); - -void shmem_short_iput(short *target, const short *source, - ptrdiff_t tst, ptrdiff_t sst, size_t len, int pe); -.Ve -Fortran: -.Vb -INCLUDE "mpp/shmem.fh" - -INTEGER tst, sst, len, pe - -CALL SHMEM_COMPLEX_IPUT(target, source, tst, sst, len, -& pe) - -CALL SHMEM_DOUBLE_IPUT(target, source, tst, sst, len, -& pe) - -CALL SHMEM_INTEGER_IPUT(target, source, tst, sst, len, -& pe) - -CALL SHMEM_IPUT4(target, source, tst, sst, len, pe) - -CALL SHMEM_IPUT8(target, source, tst, sst, len, pe) - -CALL SHMEM_IPUT32(target, source, tst, sst, len, pe) - -CALL SHMEM_IPUT64(target, source, tst, sst, len, pe) - -CALL SHMEM_IPUT128(target, source, tst, sst, len, pe) - -CALL SHMEM_LOGICAL_IPUT(target, source, tst, sst, len, -& pe) - -CALL SHMEM_REAL_IPUT(target, source, tst, sst, len, pe) -.Ve -.PP -.SH DESCRIPTION - -The shmem_iput routines read the elements of a local array (\fBsource\fP) -and write them -to a remote array (\fBtarget\fP) -on the PE indicated by \fBpe\fP\&. -These routines return -when the data has been copied out of the source array on the local PE but not necessarily -before the data has been delivered to the remote data object. -.PP -The arguments are as follows: -.TP -target -Array to be updated on the remote PE. This data object must be remotely -accessible. -.TP -source -Array containing the data to be copied. -.TP -tst -The stride between consecutive elements of the target array. The stride is scaled by -the element size of the target array. A value of 1 indicates contiguous data. tst must be of -type integer. If you are using Fortran, it must be a default integer value. -.TP -sst -The stride between consecutive elements of the source array. The stride is scaled -by the element size of the source array. A value of 1 indicates contiguous data. sst must be -of type integer. If you are using Fortran, it must be a default integer value. -.TP -len -Number of elements in the target and source arrays. len must be of type integer. If -you are using Fortran, it must be a constant, variable, or array element of default integer -type. -.TP -pe -PE number of the remote PE. pe must be of type integer. If you are using Fortran, it -must be a constant, variable, or array element of default integer type. -.PP -The target and source data objects must conform to typing constraints, which are as follows: -.PP -.TP -\fBshmem_iput32, shmem_iput4\fP: Any noncharacter type that has a storage size equal -to 32 bits. -.TP -\fBshmem_iput64, shmem_iput8\fP: Any noncharacter type that has a storage size equal -to 64 bits. -.TP -\fBshmem_iput128\fP: Any noncharacter type that has a storage size equal to 128 bits. -.TP -\fBshmem_short_iput\fP: Elements of type short. -.TP -\fBshmem_int_iput\fP: Elements of type int. -.TP -\fBshmem_long_iput\fP: Elements of type long. -.TP -\fBshmem_longlong_iput\fP: Elements of type long long. -.TP -\fBshmem_float_iput\fP: Elements of type float. -.TP -\fBshmem_double_iput\fP: Elements of type double. -.TP -\fBshmem_longdouble_iput\fP: Elements of type long double. -.TP -\fBSHMEM_COMPLEX_IPUT\fP: Elements of type complex of default size. -.TP -\fBSHMEM_DOUBLE_IPUT\fP: (Fortran) Elements of type double precision. -.TP -\fBSHMEM_INTEGER_IPUT\fP: Elements of type integer. -.TP -\fBSHMEM_LOGICAL_IPUT\fP: Elements of type logical. -.TP -\fBSHMEM_REAL_IPUT\fP: Elements of type real. -.TP -\fBSHMEM_LOGICAL_IPUT\fP: Elements of type logical. -.TP -\fBSHMEM_REAL_IPUT\fP: Elements of type real. -.PP -If you are using Fortran, data types must be of default size. For example, a real variable must -be declared as REAL, REAL*4 or REAL(KIND=4). -.PP -.SH NOTES - -See \fIintro_shmem\fP(3) -for a definition of the term remotely accessible. -.PP -.SH EXAMPLES - -Consider the following simple shmem_long_iput example for C/C++ programs. -.Vb -#include - -main() -{ - short source[10] = { 1, 2, 3, 4, 5, - 6, 7, 8, 9, 10 }; - static short target[10]; - - shmem_init(); - if (shmem_my_pe() == 0) { - /* put 10 words into target on PE 1 */ - shmem_short_iput(target, source, 1, 2, 5, 1); - } - shmem_barrier_all(); /* sync sender and receiver */ - if (shmem_my_pe() == 1) { - shmem_udcflush(); /* not required on IRIX systems */ - printf("target on PE %d is %d %d %d %d %d0, shmem_my_pe(), - (int)target[0], (int)target[1], (int)target[2], - (int)target[3], (int)target[4] ); - } - shmem_barrier_all(); /* sync before exiting */ -} -.Ve -.SH SEE ALSO - -\fIintro_shmem\fP(3), -\fIshmem_iget\fP(3), -\fIshmem_put\fP(3), -\fIshmem_quiet\fP(3) diff --git a/oshmem/shmem/man/man3/shmem_short_max_to_all.3in b/oshmem/shmem/man/man3/shmem_short_max_to_all.3in deleted file mode 100644 index a522c3e7c2e..00000000000 --- a/oshmem/shmem/man/man3/shmem_short_max_to_all.3in +++ /dev/null @@ -1,238 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright (c) 2015 University of Houston. All rights reserved. -.\" Copyright (c) 2014-2016 Mellanox Technologies, Inc. -.\" $COPYRIGHT$ -.de Vb -.ft CW -.nf -.. -.de Ve -.ft R - -.fi -.. -.TH "SHMEM\\_MAX" "3" "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME - -\fIshmem_double_max_to_all\fP(3), -\fIshmem_float_max_to_all\fP(3), -\fIshmem_int_max_to_all\fP(3), -\fIshmem_int4_max_to_all\fP(3), -\fIshmem_int8_max_to_all\fP(3), -\fIshmem_long_max_to_all\fP(3), -\fIshmem_longdouble_max_to_all\fP(3), -\fIshmem_longlong_max_to_all\fP(3), -\fIshmem_real4_max_to_all\fP(3), -\fIshmem_real8_max_to_all\fP(3), -\fIshmem_real16_max_to_all\fP(3), -\fIshmem_short_max_to_all\fP(3) -\- Performs a maximum function reduction across a set of processing elements (PEs). -.SH SYNOPSIS - -C or C++: -.Vb -#include - -void shmem_double_max_to_all(double *target, const double *source, - int nreduce, int PE_start, int logPE_stride, int PE_size, - double *pWrk, long *pSync); - -void shmem_float_max_to_all(float *target, const float *source, - int nreduce, int PE_start, int logPE_stride, int PE_size, - float *pWrk, long *pSync); - -void shmem_int_max_to_all(int *target, const int *source, - int nreduce, int PE_start, int logPE_stride, int PE_size, - int *pWrk, long *pSync); - -void shmem_long_max_to_all(long *target, const long *source, - int nreduce, int PE_start, int logPE_stride, int PE_size, - long *pWrk, long *pSync); - -void shmem_longdouble_max_to_all(long double *target, - const long double *source, int nreduce, int PE_start, - int logPE_stride, int PE_size, long double *pWrk, long *pSync); - -void shmem_longlong_max_to_all(long long *target, - const long long *source, int nreduce, int PE_start, - int logPE_stride, int PE_size, long long *pWrk, long *pSync); - - void shmem_short_max_to_all(short *target, const short *source, - int nreduce, int PE_start, int logPE_stride, int PE_size, - short *pWrk, long *pSync); -.Ve -Fortran: -.Vb -INCLUDE "mpp/shmem.fh" - -INTEGER pSync(SHMEM_REDUCE_SYNC_SIZE) - -INTEGER nreduce, PE_start, logPE_stride, PE_size - -CALL SHMEM_INT4_MAX_TO_ALL(target, source, nreduce, -& PE_start, logPE_stride, PE_size, pWrk, pSync) - -CALL SHMEM_INT8_MAX_TO_ALL(target, source, nreduce, -& PE_start, logPE_stride, PE_size, pWrk, pSync) - -CALL SHMEM_REAL4_MAX_TO_ALL(target, source, nreduce, -& PE_start, logPE_stride, PE_size, pWrk, pSync) - -CALL SHMEM_REAL8_MAX_TO_ALL(target, source, nreduce, -& PE_start, logPE_stride, PE_size, pWrk, pSync) - -CALL SHMEM_REAL16_MAX_TO_ALL(target, source, nreduce, -& PE_start, logPE_stride, PE_size, pWrk, pSync) -.Ve -.SH DESCRIPTION - -The shared memory (SHMEM) reduction routines compute one or more reductions across -symmetric arrays on multiple virtual PEs. A reduction performs an associative binary -operation across a set of values. For a list of other SHMEM reduction routines, see -\fIintro_shmem\fP(3)\&. -.PP -As with all SHMEM collective routines, each of these routines assumes that only PEs in the -active set call the routine. If a PE not in the active set calls a SHMEM collective routine, -undefined behavior results. -.PP -The nreduce argument determines the number of separate reductions to perform. The source -array on all PEs in the active set provides one element for each reduction. The results of the -reductions are placed in the target array on all PEs in the active set. The active set is defined -by the PE_start, logPE_stride, PE_size triplet. -.PP -The source and target arrays may be the same array, but they may not be overlapping arrays. -.PP -The arguments are as follows: -.TP -target -A symmetric array of length nreduce elements to receive the results of the -reduction operations. The data type of target varies with the version of the reduction routine -being called. When calling from C, refer to the SYNOPSIS section for data type information. -.PP -When calling from Fortran, the target data types are as follows: -.RS -.TP -\fBshmem_comp8_max_to_all\fP: Complex, with an element size equal to two -8\-byte real values. -.TP -\fBshmem_int4_max_to_all\fP: Integer, with an element size of 4 bytes. -.TP -\fBshmem_int8_max_to_all\fP: Integer, with an element size of 8 bytes. -.TP -\fBshmem_real4_max_to_all\fP: Real, with an element size of 4 bytes. -.TP -\fBshmem_real16_max_to_all\fP: Real, with an element size of 16 bytes. -.RE -.RS -.PP -.RE -.TP -source -A symmetric array of length nreduce elements that contains one element for -each separate reduction operation. The source argument must have the same data type as -target. -.TP -nreduce -The number of elements in the target and source arrays. nreduce must be of -type integer. If you are using Fortran, it must be a default integer value. -.TP -PE_start -The lowest virtual PE number of the active set of PEs. PE_start must be of -type integer. If you are using Fortran, it must be a default integer value. -.TP -logPE_stride -The log (base 2) of the stride between consecutive virtual PE numbers in -the active set. logPE_stride must be of type integer. If you are using Fortran, it must be a -default integer value. -.TP -PE_size -The number of PEs in the active set. PE_size must be of type integer. If you -are using Fortran, it must be a default integer value. -.TP -pWrk -A symmetric work array. The pWrk argument must have the same data type as -target. In C/C++, this contains max(nreduce/2 + 1, -_SHMEM_REDUCE_MIN_WRKDATA_SIZE) elements. In Fortran, this contains -max(nreduce/2 + 1, SHMEM_REDUCE_MIN_WRKDATA_SIZE) elements. -.TP -pSync -A symmetric work array. In C/C++, pSync is of type long and size -_SHMEM_REDUCE_SYNC_SIZE. In Fortran, pSync is of type integer and size -SHMEM_REDUCE_SYNC_SIZE. If you are using Fortran, it must be a default integer value. -Every element of this array must be initialized with the value _SHMEM_SYNC_VALUE (in -C/C++) or SHMEM_SYNC_VALUE (in Fortran) before any of the PEs in the active set enter -the reduction routine. -.PP -The values of arguments nreduce, PE_start, logPE_stride, and PE_size must be equal on all -PEs in the active set. The same target and source arrays, and the same pWrk and pSync work -arrays, must be passed to all PEs in the active set. -.PP -Before any PE calls a reduction routine, you must ensure that the following conditions exist -(synchronization via a barrier or some other method is often needed to ensure this): The -pWrk and pSync arrays on all PEs in the active set are not still in use from a prior call to a -collective SHMEM routine. The target array on all PEs in the active set is ready to accept the -results of the reduction. -.PP -Upon return from a reduction routine, the following are true for the local PE: The target array -is updated. The values in the pSync array are restored to the original values. -.PP -.SH NOTES - -The terms collective, symmetric, and cache aligned are defined in \fIintro_shmem\fP(3)\&. -All SHMEM reduction routines reset the values in pSync before they return, so a particular -pSync buffer need only be initialized the first time it is used. -.PP -You must ensure that the pSync array is not being updated on any PE in the active set while -any of the PEs participate in processing of a SHMEM reduction routine. Be careful of the -following situations: If the pSync array is initialized at run time, some type of -synchronization is needed to ensure that all PEs in the working set have initialized pSync -before any of them enter a SHMEM routine called with the pSync synchronization array. A -pSync or pWrk array can be reused in a subsequent reduction routine call only if none -of the PEs in the active set are still processing a prior reduction routine call that used the -same pSync or pWrk arrays. -.PP -In general, this can be assured only by doing some type of synchronization. However, in the -special case of reduction routines being called with the same active set, you can allocate two -pSync and pWrk arrays and alternate between them on successive calls. -.PP -.SH EXAMPLES - -\fBExample 1:\fP -This Fortran example statically initializes the pSync array and finds the -maximum value of real variable FOO across all even PEs. -.Vb -INCLUDE "mpp/shmem.fh" - -INTEGER PSYNC(SHMEM_REDUCE_SYNC_SIZE) -DATA PSYNC /SHMEM_REDUCE_SYNC_SIZE*SHMEM_SYNC_VALUE/ -PARAMETER (NR=1) -REAL FOO, FOOMAX, PWRK(MAX(NR/2+1,SHMEM_REDUCE_MIN_WRKDATA_SIZE)) -COMMON /COM/ FOO, FOOMAX, PWRK -INTRINSIC MY_PE - -IF ( MOD(MY_PE(),2) .EQ. 0) THEN - CALL SHMEM_REAL8_MAX_TO_ALL(FOOMAX, FOO, NR, 0, 1, N$PES/2, - & PWRK, PSYNC) - PRINT *, 'Result on PE ', MY_PE(), ' is ', FOOMAX -ENDIF -.Ve -\fBExample 2:\fP -Consider the following C/C++ call: -.Vb -shmem_int_max_to_all( target, source, 3, 0, 0, 8, pwrk, psync ); -.Ve -The preceding call is more efficient, but semantically equivalent to, the combination of the -following calls: -.Vb -shmem_int_max_to_all(&(target[0]), &(source[0]), 1, 0, 0, 8, - pwrk1, psync1); -shmem_int_max_to_all(&(target[1]), &(source[1]), 1, 0, 0, 8, - pwrk2, psync2); -shmem_int_max_to_all(&(target[2]), &(source[2]), 1, 0, 0, 8, - pwrk1, psync1); -.Ve -Note that two sets of pWrk and pSync arrays are used alternately because no synchronization -is done between calls. -.SH SEE ALSO - -\fIintro_shmem\fP(3) diff --git a/oshmem/shmem/man/man3/shmem_short_min_to_all.3in b/oshmem/shmem/man/man3/shmem_short_min_to_all.3in deleted file mode 100644 index 6b0e3c4a0c5..00000000000 --- a/oshmem/shmem/man/man3/shmem_short_min_to_all.3in +++ /dev/null @@ -1,234 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright (c) 2015 University of Houston. All rights reserved. -.\" Copyright (c) 2014-2016 Mellanox Technologies, Inc. -.\" $COPYRIGHT$ -.de Vb -.ft CW -.nf -.. -.de Ve -.ft R - -.fi -.. -.TH "SHMEM\\_MIN" "3" "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME - -\fIshmem_double_min_to_all\fP(3), -\fIshmem_float_min_to_all\fP(3), -\fIshmem_int_min_to_all\fP(3), -\fIshmem_int4_min_to_all\fP(3), -\fIshmem_int8_min_to_all\fP(3), -\fIshmem_long_min_to_all\fP(3), -\fIshmem_longdouble_min_to_all\fP(3), -\fIshmem_longlong_min_to_all\fP(3), -\fIshmem_real4_min_to_all\fP(3), -\fIshmem_real8_min_to_all\fP(3), -\fIshmem_real16_min_to_all\fP(3), -\fIshmem_short_min_to_all\fP(3) -\- Performs a minimum function reduction across a set of processing elements (PEs) -.SH SYNOPSIS - -C or C++: -.Vb -#include - -void shmem_double_min_to_all(double *target, const double *source, - int nreduce, int PE_start, int logPE_stride, int PE_size, - double *pWrk, long *pSync); - -void shmem_float_min_to_all(float *target, const float *source, - int nreduce, int PE_start, int logPE_stride, int PE_size, - float *pWrk, long *pSync); - -void shmem_int_min_to_all(int *target, const int *source, - int nreduce, int PE_start, int logPE_stride, int PE_size, - int *pWrk, long *pSync); - -void shmem_long_min_to_all(long *target, const long *source, - int nreduce, int PE_start, int logPE_stride, int PE_size, - long *pWrk, long *pSync); - -void shmem_longdouble_min_to_all(long double *target, - const long double *source, int nreduce, int PE_start, - int logPE_stride, int PE_size, long double *pWrk, - long *pSync); - -void shmem_longlong_min_to_all(long long *target, - const long long *source, int nreduce, int PE_start, int logPE_stride, - int PE_size, long long *pWrk, long *pSync); - -void shmem_short_min_to_all(short *target, const short *source, - int nreduce, int PE_start, int logPE_stride, int PE_size, - short *pWrk, long *pSync); -.Ve -Fortran: -.Vb -INCLUDE "mpp/shmem.fh" - -INTEGER pSync(SHMEM_REDUCE_SYNC_SIZE) -INTEGER nreduce, PE_start, logPE_stride, PE_size - -CALL SHMEM_INT4_MIN_TO_ALL(target, source, nreduce, PE_start, -& logPE_stride, PE_size, pWrk, pSync) - -CALL SHMEM_INT8_MIN_TO_ALL(target, source, nreduce, PE_start, -& logPE_stride, PE_size, pWrk, pSync) - -CALL SHMEM_REAL4_MIN_TO_ALL(target, source, nreduce, PE_start, -& logPE_stride, PE_size, pWrk, pSync) - -CALL SHMEM_REAL8_MIN_TO_ALL(target, source, nreduce, PE_start, -& logPE_stride, PE_size, pWrk, pSync) - -CALL SHMEM_REAL16_MIN_TO_ALL(target, source, nreduce, PE_start, -& logPE_stride, PE_size, pWrk, pSync) -.Ve -.SH DESCRIPTION - -The shared memory (SHMEM) reduction routines compute one or more reductions across -symmetric arrays on multiple virtual PEs. A reduction performs an associative binary -operation across a set of values. For a list of other SHMEM reduction routines, see -\fIintro_shmem\fP(3)\&. -.PP -As with all SHMEM collective routines, each of these routines assumes that only PEs in the -active set call the routine. If a PE not in the active set calls a SHMEM collective routine, -undefined behavior results. -.PP -The nreduce argument determines the number of separate reductions to perform. The source -array on all PEs in the active set provides one element for each reduction. The results of the -reductions are placed in the target array on all PEs in the active set. The active set is defined -by the PE_start, logPE_stride, PE_size triplet. -.PP -The source and target arrays may be the same array, but they may not be overlapping arrays. -.PP -The arguments are as follows: -.TP -target -A symmetric array of length nreduce elements to receive the results of the -reduction operations. The data type of target varies with the version of the reduction routine -being called. When calling from C/C++, refer to the SYNOPSIS section for data type -information. When calling from Fortran, the target data types are as follows: -.RS -.TP -\fBshmem_int4_min_to_all\fP: Integer, with an element size of 4 bytes -.TP -\fBshmem_int8_min_to_all\fP: Integer, with an element size of 8 bytes -.TP -\fBshmem_real4_min_to_all\fP: Real, with an element size of 4 bytes -.TP -\fBshmem_real8_min_to_all\fP: Real, with an element size of 8 bytes -.TP -\fBshmem_real16_min_to_all\fP: Real, with an element size of 16 bytes -.TP -\fBsource A symmetric array\fP: of length nreduce elements, that contains one -element for each separate reduction operation. The source argument must have the same -data type as target. -.RE -.RS -.PP -.RE -.TP -nreduce -The number of elements in the target and source arrays. nreduce must be of -type integer. If you are using Fortran, it must be a default integer value. -.TP -PE_start -The lowest virtual PE number of the active set of PEs. PE_start must be of -type integer. If you are using Fortran, it must be a default integer value. -.TP -logPE_stride -The log (base 2) of the stride between consecutive virtual PE numbers in -the active set. logPE_stride must be of type integer. If you are using Fortran, it must be a -default integer value. -.TP -PE_size -The number of PEs in the active set. PE_size must be of type integer. If you -are using Fortran, it must be a default integer value. -.TP -pWrk -A symmetric work array. The pWrk argument must have the same data type as -target. In C/C++, this contains max(nreduce/2 + 1, -_SHMEM_REDUCE_MIN_WRKDATA_SIZE) elements. In Fortran, this contains -max(nreduce/2 + 1, SHMEM_REDUCE_MIN_WRKDATA_SIZE) elements. -.TP -pSync -A symmetric work array. In C/C++, pSync is of type long and size -_SHMEM_REDUCE_SYNC_SIZE. In Fortran, pSync is of type integer and size -SHMEM_REDUCE_SYNC_SIZE. If you are using Fortran, it must be a default integer value. -Every element of this array must be initialized with the value _SHMEM_SYNC_VALUE (in -C/C++) or SHMEM_SYNC_VALUE (in Fortran) before any of the PEs in the active set enter -the reduction routine. -.PP -The values of arguments nreduce, PE_start, logPE_stride, and PE_size must be equal on all -PEs in the active set. The same target and source arrays, and the same pWrk and pSync work -arrays, must be passed to all PEs in the active set. -.PP -Before any PE calls a reduction routine, you must ensure that the following conditions exist -(synchronization via a barrier or some other method is often needed to ensure this): The -pWrk and pSync arrays on all PEs in the active set are not still in use from a prior call to a -collective SHMEM routine. The target array on all PEs in the active set is ready to accept the -results of the reduction. -.PP -Upon return from a reduction routine, the following are true for the local PE: The target array -is updated. The values in the pSync array are restored to the original values. -.PP -.SH NOTES - -The terms collective, symmetric, and cache aligned are defined in \fIintro_shmem\fP(3)\&. -All SHMEM reduction routines reset the values in pSync before they return, so a particular -pSync buffer need only be initialized the first time it is used. -.PP -You must ensure that the pSync array is not being updated on any PE in the active set while -any of the PEs participate in processing of a SHMEM reduction routine. Be careful of the -following situations: If the pSync array is initialized at run time, some type of -synchronization is needed to ensure that all PEs in the working set have initialized pSync -before any of them enter a SHMEM routine called with the pSync synchronization array. A -pSync or pWrk array can be reused in a subsequent reduction routine call only if none -of the PEs in the active set are still processing a prior reduction routine call that used the -same pSync or pWrk arrays. In general, this can be assured only by doing some type of -synchronization. However, in the special case of reduction routines being called with the -same active set, you can allocate two pSync and pWrk arrays and alternate between them on -successive calls. -.PP -.SH EXAMPLES - -\fBExample 1:\fP -This Fortran example statically initializes the pSync array and finds the -minimum value of real variable FOO across all the even PEs. -.Vb -INCLUDE "mpp/shmem.fh" - -INTEGER PSYNC(SHMEM_REDUCE_SYNC_SIZE) -DATA PSYNC /SHMEM_REDUCE_SYNC_SIZE*SHMEM_SYNC_VALUE/ -PARAMETER (NR=1) -REAL FOO, FOOMIN, PWRK(MAX(NR/2+1,SHMEM_REDUCE_MIN_WRKDATA_SIZE)) -COMMON /COM/ FOO, FOOMIN, PWRK -INTRINSIC MY_PE - -IF ( MOD(MY_PE(),2) .EQ. 0) THEN - CALL SHMEM_REAL8_MIN_TO_ALL(FOOMIN, FOO, NR, 0, 1, N$PES/2, - & PWRK, PSYNC) - PRINT *, 'Result on PE ', MY_PE(), ' is ', FOOMIN -ENDIF -.Ve -\fBExample 2:\fP -Consider the following C/C++ call: -.Vb -shmem_int_min_to_all( target, source, 3, 0, 0, 8, pwrk, psync ); -.Ve -The preceding call is more efficient, but semantically equivalent to, the combination of the -following calls: -.Vb -shmem_int_min_to_all(&(target[0]), &(source[0]), 1, 0, 0, 8, - pwrk1, psync1); -shmem_int_min_to_all(&(target[1]), &(source[1]), 1, 0, 0, 8, - pwrk2, psync2); -shmem_int_min_to_all(&(target[2]), &(source[2]), 1, 0, 0, 8, - pwrk1, psync1); -.Ve -Note that two sets of pWrk and pSync arrays are used alternately because no synchronization -is done between calls. -.SH SEE ALSO - -\fIintro_shmem\fP(3) diff --git a/oshmem/shmem/man/man3/shmem_short_or_to_all.3in b/oshmem/shmem/man/man3/shmem_short_or_to_all.3in deleted file mode 100644 index bcb5a2c0c53..00000000000 --- a/oshmem/shmem/man/man3/shmem_short_or_to_all.3in +++ /dev/null @@ -1,202 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright (c) 2015 University of Houston. All rights reserved. -.\" Copyright (c) 2014-2016 Mellanox Technologies, Inc. -.\" $COPYRIGHT$ -.de Vb -.ft CW -.nf -.. -.de Ve -.ft R - -.fi -.. -.TH "SHMEM\\_OR" "3" "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME - -\fIshmem_int_or_to_all\fP(3), -\fIshmem_int4_or_to_all\fP(3), -\fIshmem_int8_or_to_all\fP(3), -\fIshmem_long_or_to_all\fP(3), -\fIshmem_longlong_or_to_all\fP(3), -\fIshmem_short_or_to_all\fP(3) -\- Performs a bitwise OR function reduction across a set of processing elements (PEs) -.SH SYNOPSIS - -C or C++: -.Vb -#include - -void shmem_int_or_to_all(int *target, const int *source, - int nreduce, int PE_start, int logPE_stride, int PE_size, - int *pWrk, long *pSync); - -void shmem_long_or_to_all(long *target, const long *source, - int nreduce, int PE_start, int logPE_stride, int PE_size, - long *pWrk, long *pSync); - -void shmem_longlong_or_to_all(long long *target, - const long long *source, int nreduce, int PE_start, int logPE_stride, - int PE_size, long long *pWrk, long *pSync); - -void shmem_short_or_to_all(short *target, const short *source, - int nreduce, int PE_start, int logPE_stride, int PE_size, - short *pWrk, long *pSync); -.Ve -Fortran: -.Vb -INCLUDE "mpp/shmem.fh" - -INTEGER pSync(SHMEM_REDUCE_SYNC_SIZE) -INTEGER nreduce, PE_start, logPE_stride, PE_size - -CALL SHMEM_INT4_OR_TO_ALL(target, source, nreduce, PE_start, -& logPE_stride, PE_size, pWrk, pSync) - -CALL SHMEM_INT8_OR_TO_ALL(target, source, nreduce, PE_start, -& logPE_stride, PE_size, pWrk, pSync) -.Ve -.SH DESCRIPTION - -The shared memory (SHMEM) reduction routines compute one or more reductions across -symmetric arrays on multiple virtual PEs. A reduction performs an associative binary -operation across a set of values. For a list of other SHMEM reduction routines, see -intro_shmem(3). -.PP -As with all SHMEM collective routines, each of these routines assumes that only PEs in the -active set call the routine. If a PE not in the active set calls a SHMEM collective routine, -undefined behavior results. -.PP -The nreduce argument determines the number of separate reductions to perform. The source -array on all PEs in the active set provides one element for each reduction. The results of the -reductions are placed in the target array on all PEs in the active set. The active set is defined -by the PE_start, logPE_stride, PE_size triplet. -.PP -The source and target arrays may be the same array, but they may not be overlapping arrays. -.PP -The arguments are as follows: -.TP -target -A symmetric array of length nreduce elements to receive the results of the -reduction operations. The data type of target varies with the version of the reduction routine -being called. When calling from C/C++, refer to the SYNOPSIS section for data type -information. When calling from Fortran, the target data types are as follows: -.RS -.TP -\fBshmem_int8_or_to_all\fP Integer, with an element size of 8 bytes. -.TP -\fBshmem_int4_or_to_all\fP Integer, with an element size of 4 bytes. -.RE -.RS -.PP -.RE -.TP -source -A symmetric array, of length nreduce elements, that contains one element for -each separate reduction operation. The source argument must have the same data type as -target. -.TP -nreduce -The number of elements in the target and source arrays. nreduce must be of -type integer. If you are using Fortran, it must be a default integer value. -.TP -PE_start -The lowest virtual PE number of the active set of PEs. PE_start must be of -type integer. If you are using Fortran, it must be a default integer value. -.TP -logPE_stride -The log (base 2) of the stride between consecutive virtual PE numbers in -the active set. logPE_stride must be of type integer. If you are using Fortran, it must be a -default integer value. -.TP -PE_size -The number of PEs in the active set. PE_size must be of type integer. If you -are using Fortran, it must be a default integer value. -.TP -pWrk -A symmetric work array. The pWrk argument must have the same data type as -target. In C/C++, this contains max(nreduce/2 + 1, -_SHMEM_REDUCE_MIN_WRKDATA_SIZE) elements. In Fortran, this contains -max(nreduce/2 + 1, SHMEM_REDUCE_MIN_WRKDATA_SIZE) elements. -.TP -pSync -A symmetric work array. In C/C++, pSync is of type long and size -_SHMEM_REDUCE_SYNC_SIZE. In Fortran, pSync is of type integer and size -SHMEM_REDUCE_SYNC_SIZE. If you are using Fortran, it must be a default integer value. -Every element of this array must be initialized with the value _SHMEM_SYNC_VALUE (in -C/C++) or SHMEM_SYNC_VALUE (in Fortran) before any of the PEs in the active set enter -the reduction routine. -.PP -The values of arguments nreduce, PE_start, logPE_stride, and PE_size must be equal on -all PEs in the active set. The same target and source arrays, and the same pWrk and pSync -work arrays, must be passed to all PEs in the active set. -.PP -Before any PE calls a reduction routine, you must ensure that the following conditions exist -(synchronization via a barrier or some other method is often needed to ensure this): The -pWrk and pSync arrays on all PEs in the active set are not still in use from a prior call to a -collective SHMEM routine. The target array on all PEs in the active set is ready to accept the -results of the reduction. -.PP -Upon return from a reduction routine, the following are true: The target array is updated. The -values in the pSync array are restored to the original values. -.PP -.SH NOTES - -The terms collective, symmetric, and cache aligned are defined in \fIintro_shmem\fP(3)\&. -All SHMEM reduction routines reset the values in pSync before they return, so a particular -pSync buffer need only be initialized the first time it is used. -.PP -You must ensure that the pSync array is not being updated on any PE in the active set while -any of the PEs participate in processing of a SHMEM reduction routine. Be careful to avoid -these situations: If the pSync array is initialized at run time, some type of synchronization is -needed to ensure that all PEs in the working set have initialized pSync before any of them -enter a SHMEM routine called with the pSync synchronization array. A pSync or pWrk array -can be reused in a subsequent reduction routine call only if none of the PEs in the active set -are still processing a prior reduction routine call that used the same pSync or pWrk arrays. In -general, this can be assured only by doing some type of synchronization. However, in the -special case of reduction routines being called with the same active set, you can allocate two -pSync and pWrk arrays and alternate between them on successive calls. -.PP -.SH EXAMPLES - -\fBExample 1:\fP -This Fortran example statically initializes the pSync array and finds the -logical OR of the integer variable FOO across all even PEs. -.Vb -INCLUDE "mpp/shmem.fh" - -INTEGER PSYNC(SHMEM_REDUCE_SYNC_SIZE) -DATA PSYNC /SHMEM_REDUCE_SYNC_SIZE*SHMEM_SYNC_VALUE/ -PARAMETER (NR=1) -REAL PWRK(MAX(NR/2+1,SHMEM_REDUCE_MIN_WRKDATA_SIZE)) -INTEGER FOO, FOOOR -COMMON /COM/ FOO, FOOOR, PWRK -INTRINSIC MY_PE - -IF ( MOD(MY_PE(),2) .EQ. 0) THEN - CALL SHMEM_INT8_OR_TO_ALL(FOOOR, FOO, NR, 0, 1, N$PES/2, - & PWRK, PSYNC) - PRINT *,'Result on PE ',MY_PE(),' is ',FOOOR -ENDIF -.Ve -\fBExample 2:\fP -Consider the following C/C++ call: -.Vb -shmem_int_or_to_all( target, source, 3, 0, 0, 8, pwrk, psync ); -.Ve -The preceding call is more efficient, but semantically equivalent to, the combination of the -following calls: -.Vb -shmem_int_or_to_all(&(target[0]), &(source[0]), 1, 0, 0, 8, - pwrk1, psync1); -shmem_int_or_to_all(&(target[1]), &(source[1]), 1, 0, 0, 8, - pwrk2, psync2); -shmem_int_or_to_all(&(target[2]), &(source[2]), 1, 0, 0, 8, - pwrk1, psync1); -.Ve -Note that two sets of pWrk and pSync arrays are used alternately because no synchronization -is done between calls. -.PP -.SH SEE ALSO - -\fIintro_shmem\fP(3) diff --git a/oshmem/shmem/man/man3/shmem_short_p.3in b/oshmem/shmem/man/man3/shmem_short_p.3in deleted file mode 100644 index 42b9fd0e81f..00000000000 --- a/oshmem/shmem/man/man3/shmem_short_p.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/shmem_char_p.3 diff --git a/oshmem/shmem/man/man3/shmem_short_prod_to_all.3in b/oshmem/shmem/man/man3/shmem_short_prod_to_all.3in deleted file mode 100644 index b03b3dc18e1..00000000000 --- a/oshmem/shmem/man/man3/shmem_short_prod_to_all.3in +++ /dev/null @@ -1,259 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright (c) 2015 University of Houston. All rights reserved. -.\" Copyright (c) 2014-2016 Mellanox Technologies, Inc. -.\" $COPYRIGHT$ -.de Vb -.ft CW -.nf -.. -.de Ve -.ft R - -.fi -.. -.TH "SHMEM\\_PROD" "3" "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME - -\fIshmem_comp4_prod_to_all\fP(3), -\fIshmem_comp8_prod_to_all\fP(3), -\fIshmem_complexd_prod_to_all\fP(3), -\fIshmem_complexf_prod_to_all\fP(3), -\fIshmem_double_prod_to_all\fP(3), -\fIshmem_float_prod_to_all\fP(3), -\fIshmem_int_prod_to_all\fP(3), -\fIshmem_int4_prod_to_all\fP(3), -\fIshmem_int8_prod_to_all\fP(3), -\fIshmem_long_prod_to_all\fP(3), -\fIshmem_longdouble_prod_to_all\fP(3), -\fIshmem_longlong_prod_to_all\fP(3), -\fIshmem_real8_prod_to_all\fP(3), -\fIshmem_real16_prod_to_all\fP(3), -\fIshmem_real4_prod_to_all\fP(3), -\fIshmem_short_prod_to_all\fP(3) -\- Performs -a product reduction across a set of processing elements (PEs) -.SH SYNOPSIS - -C or C++: -.Vb -#include - -void shmem_complexd_prod_to_all(double complex *target, - const double complex *source, int nreduce, int PE_start, - int logPE_stride, int PE_size, double complex *pWrk, - long *pSync); - -void shmem_complexf_prod_to_all(float complex *target, - const float complex *source, int nreduce, int PE_start, - int logPE_stride, int PE_size, float complex *pWrk, - long *pSync); - -void shmem_double_prod_to_all(double *target, const double *source, - int nreduce, int PE_start, int logPE_stride, int PE_size, - double *pWrk, long *pSync); - -void shmem_float_prod_to_all(float *target, const float *source, - int nreduce, int PE_start, int logPE_stride, int PE_size, - float *pWrk, long *pSync); - -void shmem_int_prod_to_all(int *target, const int *source, - int nreduce, int PE_start, int logPE_stride, int PE_size, - int *pWrk, long *pSync); - -void shmem_long_prod_to_all(long *target, const long *source, - int nreduce, int PE_start, int logPE_stride, int PE_size, - long *pWrk, long *pSync); - -void shmem_longdouble_prod_to_all(long double *target, - const long double *source, int nreduce, int PE_start, - int logPE_stride, int PE_size, long double *pWrk, - long *pSync); - -void shmem_longlong_prod_to_all(long long *target, - const long long *source, int nreduce, int PE_start, - int logPE_stride, int PE_size, long long *pWrk, - long *pSync); - -void shmem_short_prod_to_all(short *target, const short *source, - int nreduce, int PE_start, int logPE_stride, int PE_size, - short *pWrk, long *pSync); -.Ve -Fortran: -.Vb -INCLUDE "mpp/shmem.fh" - -INTEGER pSync(SHMEM_REDUCE_SYNC_SIZE) -INTEGER nreduce, PE_start, logPE_stride, PE_size - -CALL SHMEM_COMP4_PROD_TO_ALL(target, source, nreduce, PE_start, -& logPE_stride, PE_size, pWrk, pSync) - -CALL SHMEM_COMP8_PROD_TO_ALL(target, source, nreduce, PE_start, -& logPE_stride, PE_size, pWrk, pSync) - -CALL SHMEM_INT4_PROD_TO_ALL(target, source, nreduce, PE_start, -& logPE_stride, PE_size, pWrk, pSync) - -CALL SHMEM_INT8_PROD_TO_ALL(target, source, nreduce, PE_start, -& logPE_stride, PE_size, pWrk, pSync) - -CALL SHMEM_REAL4_PROD_TO_ALL(target, source, nreduce, PE_start, -& logPE_stride, PE_size, pWrk, pSync) - -CALL SHMEM_REAL8_PROD_TO_ALL(target, source, nreduce, PE_start, -& logPE_stride, PE_size, pWrk, pSync) - -CALL SHMEM_REAL16_PROD_TO_ALL(target, source, nreduce, PE_start, -& logPE_stride, PE_size, pWrk, pSync) -.Ve -.SH DESCRIPTION - -The shared memory (SHMEM) reduction routines compute one or more reductions across -symmetric arrays on multiple virtual PEs. A reduction performs an associative binary -operation across a set of values. For a list of other SHMEM reduction routines, see -\fIintro_shmem\fP(3)\&. -.PP -As with all SHMEM collective routines, each of these routines assumes that only PEs in the -active set call the routine. If a PE not in the active set calls a SHMEM collective routine, -undefined behavior results. -.PP -The nreduce argument determines the number of separate reductions to perform. The source -array on all PEs in the active set provides one element for each reduction. The results of the -reductions are placed in the target array on all PEs in the active set. The active set is defined -by the PE_start, logPE_stride, PE_size triplet. -.PP -The source and target arrays may be the same array, but they may not be overlapping arrays. -.PP -The arguments are as follows: -.TP -target -A symmetric array of length nreduce elements to receive the results of the -reduction operations. The data type of target varies with the version of the reduction routine -being called and the language used. When calling from C/C++, refer to the SYNOPSIS section -for data type information. When calling from Fortran, the target data types are as follows: -.RS -.TP -\fBshmem_comp4_prod_to_all\fP: Complex, with an element size equal to two -4\-byte real values. -.TP -\fBshmem_comp8_prod_to_all\fP: Complex, with an element size equal to two -8\-byte real values. -.TP -\fBshmem_int4_prod_to_all\fP: Integer, with an element size of 4 bytes -.TP -\fBshmem_int8_prod_to_all\fP: Integer, with an element size of 8 bytes -.TP -\fBshmem_real4_prod_to_all\fP: Real, with an element size of 4 bytes -.TP -\fBshmem_real8_prod_to_all\fP: Real, with an element size of 8 bytes -.TP -\fBshmem_real16_prod_to_all\fP: Real, with an element size of 16 bytes -.RE -.RS -.PP -.RE -.TP -source -A symmetric array, of length nreduce elements, that contains one element for -each separate reduction operation. The source argument must have the same data type as -target. -.TP -nreduce -The number of elements in the target and source arrays. nreduce must be of -type integer. If you are using Fortran, it must be a default integer value. -.TP -PE_start -The lowest virtual PE number of the active set of PEs. PE_start must be of -type integer. If you are using Fortran, it must be a default integer value. -.TP -logPE_stride -The log (base 2) of the stride between consecutive virtual PE numbers in -the active set. logPE_stride must be of type integer. If you are using Fortran, it must be a -default integer value. -.TP -PE_size -The number of PEs in the active set. PE_size must be of type integer. If you -are using Fortran, it must be a default integer value. -.TP -pWrk -A symmetric work array. The pWrk argument must have the same data type as -target. In C/C++, this contains max(nreduce/2 + 1, -_SHMEM_REDUCE_MIN_WRKDATA_SIZE) elements. In Fortran, this contains -max(nreduce/2 + 1, SHMEM_REDUCE_MIN_WRKDATA_SIZE) elements. -.TP -pSync -A symmetric work array. In C/C++, pSync is of type long and size -_SHMEM_REDUCE_SYNC_SIZE. In Fortran, pSync is of type integer and size -SHMEM_REDUCE_SYNC_SIZE. If you are using Fortran, it must be a default integer value. -Before any of the PEs in the active set enter the reduction routine, every element of this array -must be initialized with the value _SHMEM_SYNC_VALUE (in C/C++) or -SHMEM_SYNC_VALUE (in Fortran). -.PP -The values of arguments nreduce, PE_start, logPE_stride, and PE_size must be equal on all -PEs in the active set. The same target and source arrays, and the same pWrk and pSync work -arrays, must be passed to all PEs in the active set. Before any PE calls a reduction routine, you -must ensure that the following conditions exist (synchronization via a barrier or some -other method is often needed to ensure this): The pWrk and pSync arrays on all PEs in the -active set are not still in use from a prior call to a collective SHMEM routine. The target array -on all PEs in the active set is ready to accept the results of the reduction. -.PP -Upon return from a reduction routine, the following are true for the local PE: The target array -is updated. The values in the pSync array are restored to the original values. -.SH NOTES - -The terms collective, symmetric, and cache aligned are defined in \fIintro_shmem\fP(3)\&. -All SHMEM reduction routines reset the values in pSync before they return, so a particular -pSync buffer need only be initialized the first time it is used. -.PP -You must ensure that the pSync array is not being updated on any PE in the active set while -any of the PEs participate in processing of a SHMEM reduction routine. Be careful of the -following situations: If the pSync array is initialized at run time, some type of -synchronization is needed to ensure that all PEs in the working set have initialized pSync -before any of them enter a SHMEM routine called with the pSync synchronization array. A -pSync or pWrk array can be reused in a subsequent reduction routine call only if none of the -PEs in the active set are still processing a prior reduction routine call that used the same -pSync or pWrk arrays. In general, this can be assured only by doing some type of -synchronization. However, in the special case of reduction routines being called with the -same active set, you can allocate two pSync and pWrk arrays and alternate between them on -successive calls. -.SH EXAMPLES - -\fBExample 1:\fP -This Fortran example statically initializes the pSync array and finds the -product of the real variable FOO across all the even PEs. -.Vb -INCLUDE "mpp/shmem.fh" - -INTEGER PSYNC(SHMEM_REDUCE_SYNC_SIZE) -DATA PSYNC /SHMEM_REDUCE_SYNC_SIZE*SHMEM_SYNC_VALUE/ -PARAMETER (NR=1) -REAL FOO, FOOPROD, PWRK(MAX(NR/2+1,SHMEM_REDUCE_MIN_WRKDATA_SIZE)) -COMMON /COM/ FOO, FOOPROD, PWRK -INTRINSIC MY_PE - -IF ( MOD(MY_PE(),2) .EQ. 0) THEN - CALL SHMEM_COMP8_PROD_TO_ALL(FOOPROD, FOO, NR, 0, 1, N$PES/2, - & PWRK, PSYNC) - PRINT *, 'Result on PE ', MY_PE(), ' is ', FOOPROD -ENDIF -.Ve -\fBExample 2:\fP -Consider the following C/C++ call: -.Vb -shmem_short_prod_to_all(target, source, 3, 0, 0, 8, pwrk, psync); -.Ve -The preceding call is more efficient, but semantically equivalent to, the combination of the -following calls: -.Vb -shmem_short_prod_to_all(&(target[0]), &(source[0]), 1, 0, 0, 8, - pwrk1, psync1); -shmem_short_prod_to_all(&(target[1]), &(source[1]), 1, 0, 0, 8, - pwrk2, psync2); -shmem_short_prod_to_all(&(target[2]), &(source[2]), 1, 0, 0, 8, - pwrk1, psync1); -.Ve -Note that two sets of pWrk and pSync arrays are used alternately because no synchronization -is done between calls. -.SH SEE ALSO - -\fIintro_shmem\fP(3) diff --git a/oshmem/shmem/man/man3/shmem_short_put.3in b/oshmem/shmem/man/man3/shmem_short_put.3in deleted file mode 100644 index 9c7b2e25452..00000000000 --- a/oshmem/shmem/man/man3/shmem_short_put.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/shmem_char_put.3 diff --git a/oshmem/shmem/man/man3/shmem_short_put_nbi.3in b/oshmem/shmem/man/man3/shmem_short_put_nbi.3in deleted file mode 100644 index fb4ad1413b0..00000000000 --- a/oshmem/shmem/man/man3/shmem_short_put_nbi.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/shmem_putmem_nbi.3 diff --git a/oshmem/shmem/man/man3/shmem_short_sum_to_all.3in b/oshmem/shmem/man/man3/shmem_short_sum_to_all.3in deleted file mode 100644 index d94c72c1c67..00000000000 --- a/oshmem/shmem/man/man3/shmem_short_sum_to_all.3in +++ /dev/null @@ -1,281 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright (c) 2015 University of Houston. All rights reserved. -.\" Copyright (c) 2014-2016 Mellanox Technologies, Inc. -.\" $COPYRIGHT$ -.de Vb -.ft CW -.nf -.. -.de Ve -.ft R - -.fi -.. -.TH "SHMEM\\_SUM" "3" "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME - -\fIshmem_comp4_sum_to_all\fP(3), -\fIshmem_comp8_sum_to_all\fP(3), -\fIshmem_complexd_sum_to_all\fP(3), -\fIshmem_complexf_sum_to_all\fP(3), -\fIshmem_double_sum_to_all\fP(3), -\fIshmem_float_sum_to_all\fP(3), -\fIshmem_int_sum_to_all\fP(3), -\fIshmem_int4_sum_to_all\fP(3), -\fIshmem_int8_sum_to_all\fP(3), -\fIshmem_long_sum_to_all\fP(3), -\fIshmem_longdouble_sum_to_all\fP(3), -\fIshmem_longlong_sum_to_all\fP(3), -\fIshmem_real4_sum_to_all\fP(3), -\fIshmem_real8_sum_to_all\fP(3), -\fIshmem_real16_sum_to_all\fP(3), -\fIshmem_short_sum_to_all\fP(3) -\- Performs -a sum reduction across a set of processing elements (PEs) -.SH SYNOPSIS - -C or C++: -.Vb -#include - -void shmem_complexd_sum_to_all(double complex *target, - const double complex *source, int nreduce, int PE_start, - int logPE_stride, int PE_size, double complex *pWrk, - long *pSync); - -void shmem_complexf_sum_to_all(float complex *target, - const float complex *source, int nreduce, int PE_start, - int logPE_stride, int PE_size, float complex *pWrk, - long *pSync); - -void shmem_double_sum_to_all(double *target, - const double *source, int nreduce, int PE_start, int logPE_stride, - int PE_size, double *pWrk, long *pSync); - -void shmem_float_sum_to_all(float *target, const float *source, - int nreduce, int PE_start, int logPE_stride, int PE_size, - float *pWrk, long *pSync); - -void shmem_int_sum_to_all(int *target, const int *source, - int nreduce, int PE_start, int logPE_stride, int PE_size, - int *pWrk, long *pSync); - -void shmem_long_sum_to_all(long *target, const long *source, - int nreduce, int PE_start, int logPE_stride, int PE_size, - long *pWrk, long *pSync); - -void shmem_longdouble_sum_to_all(long double *target, - const long double *source, int nreduce, int PE_start, int - logPE_stride, int PE_size, long double *pWrk, long *pSync); - -void shmem_longlong_sum_to_all(long long *target, - const long long *source, int nreduce, int PE_start, - int logPE_stride, int PE_size, long long *pWrk, - long *pSync); - -void shmem_short_sum_to_all(short *target, const short *source, - int nreduce, int PE_start, int logPE_stride, int PE_size, - short *pWrk, long *pSync); -.Ve -Fortran: -.Vb -INCLUDE "mpp/shmem.fh" - -INTEGER pSync(SHMEM_REDUCE_SYNC_SIZE) -INTEGER nreduce, PE_start, logPE_stride, PE_size - -CALL SHMEM_COMP4_SUM_TO_ALL(target, source, nreduce, -& PE_start, logPE_stride, PE_size, pWrk, pSync) - -CALL SHMEM_COMP8_SUM_TO_ALL(target, source, nreduce, -& PE_start, logPE_stride, PE_size, pWrk, pSync) - -CALL SHMEM_INT4_SUM_TO_ALL(target, source, nreduce, -& PE_start, logPE_stride, PE_size, pWrk, pSync) - -CALL SHMEM_INT8_SUM_TO_ALL(target, source, nreduce, -& PE_start, logPE_stride, PE_size, pWrk, pSync) - -CALL SHMEM_REAL4_SUM_TO_ALL(target, source, nreduce, -& PE_start, logPE_stride, PE_size, pWrk, pSync) - -CALL SHMEM_REAL8_SUM_TO_ALL(target, source, nreduce, -& PE_start, logPE_stride, PE_size, pWrk, pSync) - -CALL SHMEM_REAL16_SUM_TO_ALL(target, source, nreduce, -& PE_start, logPE_stride, PE_size, pWrk, pSync) -.Ve -.SH DESCRIPTION - -The shared memory (SHMEM) reduction routines compute one or more reductions across -symmetric arrays on multiple virtual PEs. A reduction performs an associative binary -operation across a set of values. For a list of other SHMEM reduction routines, see -\fIintro_shmem\fP(3)\&. -.PP -As with all SHMEM collective routines, each of these routines assumes that only PEs in the -active set call the routine. If a PE not in the active set calls a SHMEM collective routine, -undefined behavior results. -.PP -The nreduce argument determines the number of separate reductions to perform. The source -array on all PEs in the active set provides one element for each reduction. The results of the -reductions are placed in the target array on all PEs in the active set. The active set is defined -by the PE_start, logPE_stride, PE_size triplet. -.PP -The source and target arrays may be the same array, but they may not be overlapping arrays. -.PP -The arguments are as follows: -.TP -target -The remotely accessible integer data object to be updated on the remote PE. If -you are using C/C++, the type of target should match that implied in the SYNOPSIS section. -If you are using the Fortran compiler, it must be of type integer with an element size of 4 -bytes for SHMEM_INT4_ADD and 8 bytes for SHMEM_INT8_ADD. -.TP -value -The value to be atomically added to target. If you are using C/C++, the type of -value should match that implied in the SYNOPSIS section. If you are using Fortran, it must be -of type integer with an element size of target. -.TP -pe -An integer that indicates the PE number upon which target is to be updated. If you -are using Fortran, it must be a default integer value. -.TP -target -A symmetric array of length nreduce elements to receive the results of the -reduction operations. -.br -The data type of target varies with the version of the reduction routine being called and the -language used. When calling from C/C++, refer to the SYNOPSIS section for data type -information. When calling from Fortran, the target data types are as follows: -.RS -.TP -\fBshmem_comp4_sum_to_all:\fP COMPLEX(KIND=4). -.TP -\fBshmem_comp8_sum_to_all:\fP Complex. If you are using Fortran, it must be -a default complex value. -.TP -\fBshmem_int4_sum_to_all:\fP INTEGER(KIND=4). -.TP -\fBshmem_int8_sum_to_all:\fP Integer. If you are using Fortran, it must be a -default integer value. -.TP -\fBshmem_real4_sum_to_all:\fP REAL(KIND=4). -.TP -\fBshmem_real8_sum_to_all:\fP Real. If you are using Fortran, it must be a -default real value. -.TP -\fBshmem_real16_sum_to_all:\fP Real. If you are using Fortran, it must be a -default real value. -.RE -.RS -.PP -.RE -.TP -source -A symmetric array, of length nreduce elements, that contains one element for -each separate reduction operation. The source argument must have the same data type as -target. -.TP -nreduce -The number of elements in the target and source arrays. nreduce must be of -type integer. If you are using Fortran, it must be a default integer value. -.TP -PE_start -The lowest virtual PE number of the active set of PEs. PE_start must be of -type integer. If you are using Fortran, it must be a default integer value. -.TP -logPE_stride -The log (base 2) of the stride between consecutive virtual PE numbers in -the active set. logPE_stride must be of type integer. If you are using Fortran, it must be a -default integer value. -.TP -PE_size -The number of PEs in the active set. PE_size must be of type integer. If you -are using Fortran, it must be a default integer value. -.TP -pWrk -A symmetric work array. The pWrk argument must have the same data type as -target. In C/C++, this contains max(nreduce/2 + 1, -_SHMEM_REDUCE_MIN_WRKDATA_SIZE) elements. In Fortran, this contains -max(nreduce/2 + 1, SHMEM_REDUCE_MIN_WRKDATA_SIZE) elements. -.TP -pSync -A symmetric work array. In C/C++, pSync is of type long and size -_SHMEM_REDUCE_SYNC_SIZE. In Fortran, pSync is of type integer and size -SHMEM_REDUCE_SYNC_SIZE. It must be a default integer value. Every element of this array -must be initialized with the value _SHMEM_SYNC_VALUE (in C/C++) or -SHMEM_SYNC_VALUE (in Fortran) before any of the PEs in the active set enter the reduction -routine. -.PP -The values of arguments nreduce, PE_start, logPE_stride, and PE_size must be equal on all -PEs in the active set. The same target and source arrays, and the same pWrk and pSync work -arrays, must be passed to all PEs in the active set. -.PP -Before any PE calls a reduction routine, you must ensure that the following conditions exist -(synchronization via a barrier or some other method is often needed to ensure this): The -pWrk and pSync arrays on all PEs in the active set are not still in use from a prior call to a -collective SHMEM routine. The target array on all PEs in the active set is ready to accept the -results of the reduction. -.PP -Upon return from a reduction routine, the following are true for the local PE: The target array -is updated. The values in the pSync array are restored to the original values. -.SH NOTES - -The terms collective, symmetric, and cache aligned are defined in \fIintro_shmem\fP(3)\&. -.PP -All SHMEM reduction routines reset the values in pSync before they return, so a particular -pSync buffer need only be initialized the first time it is used. -.PP -You must ensure that the pSync array is not being updated on any PE in the active set while -any of the PEs participate in processing of a SHMEM reduction routine. Be careful of the -following situations: If the pSync array is initialized at run time, some type of -synchronization is needed to ensure that all PEs in the working set have initialized pSync -before any of them enter a SHMEM routine called with the pSync synchronization array. A -pSync or pWrk array can be reused in a subsequent reduction routine call only if none -of the PEs in the active set are still processing a prior reduction routine call that used the -same pSync or pWrk arrays. In general, this can be assured only by doing some -type of synchronization. However, in the special case of reduction routines being called with -the same active set, you can allocate two pSync and pWrk arrays and alternate between them -on successive calls. -.SH EXAMPLES - -\fBExample 1:\fP -This Fortran example statically initializes the pSync array and finds the -sum of the real variable FOO across all even PEs. -.Vb -INCLUDE "mpp/shmem.fh" - -INTEGER PSYNC(SHMEM_REDUCE_SYNC_SIZE) -DATA PSYNC /SHMEM_REDUCE_SYNC_SIZE*SHMEM_SYNC_VALUE/ -PARAMETER (NR=1) -REAL FOO, FOOSUM, PWRK(MAX(NR/2+1,SHMEM_REDUCE_MIN_WRKDATA_SIZE)) -COMMON /COM/ FOO, FOOSUM, PWRK -INTRINSIC MY_PE - -IF ( MOD(MY_PE(),2) .EQ. 0) THEN - CALL SHMEM_INT4_SUM_TO_ALL(FOOSUM, FOO, NR, 0, 1, N$PES/2, - & PWRK, PSYNC) - PRINT *, 'Result on PE ', MY_PE(), ' is ', FOOSUM -ENDIF -.Ve -\fBExample 2:\fP -Consider the following C/C++ call: -.Vb -shmem_int_sum_to_all( target, source, 3, 0, 0, 8, pwrk, psync ); -.Ve -The preceding call is more efficient, but semantically equivalent to, the combination of the -following calls: -.Vb -shmem_int_sum_to_all(&(target[0]), &(source[0]), 1, 0, 0, 8, - pwrk1, psync1); -shmem_int_sum_to_all(&(target[1]), &(source[1]), 1, 0, 0, 8, - pwrk2, psync2); -shmem_int_sum_to_all(&(target[2]), &(source[2]), 1, 0, 0, 8, - pwrk1, psync1); - -Note that two sets of pWrk and pSync arrays are used alternately because no -synchronization is done between calls. -.Ve -.SH SEE ALSO - -\fIintro_shmem\fP(3) diff --git a/oshmem/shmem/man/man3/shmem_short_wait.3in b/oshmem/shmem/man/man3/shmem_short_wait.3in deleted file mode 100644 index 03267ffbc55..00000000000 --- a/oshmem/shmem/man/man3/shmem_short_wait.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/shmem_wait.3 diff --git a/oshmem/shmem/man/man3/shmem_short_wait_until.3in b/oshmem/shmem/man/man3/shmem_short_wait_until.3in deleted file mode 100644 index 03267ffbc55..00000000000 --- a/oshmem/shmem/man/man3/shmem_short_wait_until.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/shmem_wait.3 diff --git a/oshmem/shmem/man/man3/shmem_short_xor_to_all.3in b/oshmem/shmem/man/man3/shmem_short_xor_to_all.3in deleted file mode 100644 index be88b2c4d2e..00000000000 --- a/oshmem/shmem/man/man3/shmem_short_xor_to_all.3in +++ /dev/null @@ -1,215 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright (c) 2015 University of Houston. All rights reserved. -.\" Copyright (c) 2014-2016 Mellanox Technologies, Inc. -.\" $COPYRIGHT$ -.de Vb -.ft CW -.nf -.. -.de Ve -.ft R - -.fi -.. -.TH "SHMEM\\_XOR" "3" "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME - -\fIshmem_comp4_xor_to_all\fP(3), -\fIshmem_int_xor_to_all\fP(3), -\fIshmem_int4_xor_to_all\fP(3), -\fIshmem_int8_xor_to_all\fP(3), -\fIshmem_long_xor_to_all\fP(3), -\fIshmem_longlong_xor_to_all\fP(3), -\fIshmem_short_xor_to_all\fP(3) -\- Performs a bitwise XOR operation on symmetric -arrays over the active set of PEs. -.SH SYNOPSIS - -C or C++: -.Vb -#include - -void shmem_int_xor_to_all(int *target, const int *source, - int nreduce, int PE_start, int logPE_stride, int PE_size, - int *pWrk, long *pSync); - -void shmem_long_xor_to_all(long *target, const long *source, - int nreduce, int PE_start, int logPE_stride, int PE_size, - long *pWrk, long *pSync); - -void shmem_longlong_xor_to_all(long long *target, - const long long *source, int nreduce, int PE_start, int logPE_stride, - int PE_size, long long *pWrk, long *pSync); - -void shmem_short_xor_to_all(short *target, const short *source, - int nreduce, int PE_start, int logPE_stride, int PE_size, - short *pWrk, long *pSync); -.Ve -Fortran: -.Vb -INCLUDE "mpp/shmem.fh" - -INTEGER pSync(SHMEM_REDUCE_SYNC_SIZE) -INTEGER nreduce, PE_start, logPE_stride, PE_size - -CALL SHMEM_COMP4_XOR_TO_ALL(target, source, nreduce, -& PE_start, logPE_stride, PE_size, pWrk, pSync) - -CALL SHMEM_INT4_XOR_TO_ALL(target, source, nreduce, -& PE_start, logPE_stride, PE_size, pWrk, pSync) - -CALL SHMEM_INT8_XOR_TO_ALL(target, source, nreduce, -& PE_start, logPE_stride, PE_size, pWrk, pSync) -.Ve -.SH DESCRIPTION - -The shared memory (SHMEM) reduction routines compute one or more reductions across -symmetric arrays on multiple virtual PEs. A reduction performs an associative binary -operation across a set of values. For a list of other SHMEM reduction routines, see -\fIintro_shmem\fP(3)\&. -.PP -As with all SHMEM collective routines, each of these routines assumes that only PEs in the -active set call the routine. If a PE not in the active set calls a SHMEM collective routine, -undefined behavior results. -.PP -The nreduce argument determines the number of separate reductions to perform. The source -array on all PEs in the active set provides one element for each reduction. The results of the -reductions are placed in the target array on all PEs in the active set. The active set is defined -by the PE_start, logPE_stride, PE_size triplet. -.PP -The source and target arrays may be the same array, but they may not be overlapping arrays. -.PP -The arguments are as follows: -.TP -target -A symmetric array of length nreduce elements to receive the results of the -reduction operations. -The data type of target varies with the version of the reduction routine being called and the -language used. When calling from C/C++, refer to the SYNOPSIS section for data type -information. When calling from Fortran, the target data types are as follows: -.RS -.TP -\fBshmem_comp8_xor_to_all:\fP Complex, with an element size equal to two 8\- -byte real values -.TP -\fBshmem_comp4_xor_to_all:\fP Complex, with an element size equal to two 4\- -byte real values -.TP -\fBshmem_int8_xor_to_all:\fP Integer, with an element size of 8 bytes -.TP -\fBshmem_int4_xor_to_all:\fP Integer, with an element size of 4 bytes -.TP -\fBshmem_real8_xor_to_all:\fP Real, with an element size of 8 bytes -.TP -\fBshmem_real4_xor_to_all:\fP Real, with an element size of 4 bytes -.RE -.RS -.PP -.RE -.TP -source -A symmetric array, of length nreduce elements, that contains one element for -each separate reduction operation. The source argument must have the same data type as -target. -.TP -nreduce -The number of elements in the target and source arrays. nreduce must be of -type integer. If you are using Fortran, it must be a default integer value. -.TP -PE_start -The lowest virtual PE number of the active set of PEs. PE_start must be of -type integer. If you are using Fortran, it must be a default integer value. -.TP -logPE_stride -The log (base 2) of the stride between consecutive virtual PE numbers in -the active set. logPE_stride must be of type integer. If you are using Fortran, it must be a -default integer value. -.TP -PE_size -The number of PEs in the active set. PE_size must be of type integer. If you -are using Fortran, it must be a default integer value. -.TP -pWrk -A symmetric work array. The pWrk argument must have the same data type as -target. In C/C++, this contains max(nreduce/2 + 1, -_SHMEM_REDUCE_MIN_WRKDATA_SIZE) elements. In Fortran, this contains -max(nreduce/2 + 1, SHMEM_REDUCE_MIN_WRKDATA_SIZE) elements. -.TP -pSync -A symmetric work array. In C/C++, pSync is of type long and size -_SHMEM_REDUCE_SYNC_SIZE. In Fortran, pSync is of type integer and size -SHMEM_REDUCE_SYNC_SIZE. If you are using Fortran, it must be a default integer value. -Every element of this array must be initialized with the value _SHMEM_SYNC_VALUE (in -C/C++) or SHMEM_SYNC_VALUE (in Fortran) before any of the PEs in the active set enter -the reduction routine. -.PP -The values of arguments nreduce, PE_start, logPE_stride, and PE_size must be equal on all -PEs in the active set. The same target and source arrays, and the same pWrk and pSync -work arrays, must be passed to all PEs in the active set. -.PP -Before any PE calls a reduction routine, you must ensure that the following conditions exist -(synchronization via a barrier or some other method is often needed to ensure this): The -pWrk and pSync arrays on all PEs in the active set are not still in use from a prior call to a -collective SHMEM routine. The target array on all PEs in the active set is ready to accept the -results of the reduction. -.PP -Upon return from a reduction routine, the following are true for the local PE: The target array -is updated. The values in the pSync array are restored to the original values. -.SH NOTES - -The terms collective, symmetric, and cache aligned are defined in \fIintro_shmem\fP(3)\&. -All SHMEM reduction routines reset the values in pSync before they return, so a particular -pSync buffer need only be initialized the first time it is used. -.PP -You must ensure that the pSync array is not being updated on any PE in the active set while -any of the PEs participate in processing of a SHMEM reduction routine. Be careful of the -following situations: If the pSync array is initialized at run time, some type of -synchronization is needed to ensure that all PEs in the working set have initialized pSync -before any of them enter a SHMEM routine called with the pSync synchronization array. A -pSync or pWrk array can be reused in a subsequent reduction routine call only if none of the -PEs in the active set are still processing a prior reduction routine call that used the same -pSync or pWrk arrays. In general, this can be assured only by doing some type of -synchronization. However, in the special case of reduction routines being called with the -same active set, you can allocate two pSync and pWrk arrays and alternate between them on -successive calls. -.SH EXAMPLES - -\fBExample 1:\fP -This Fortran example statically initializes the pSync array and computes -the exclusive OR of variable FOO across all even PEs. -.Vb -INCLUDE "mpp/shmem.fh" - -INTEGER PSYNC(SHMEM_REDUCE_SYNC_SIZE) -DATA PSYNC /SHMEM_REDUCE_SYNC_SIZE*SHMEM_SYNC_VALUE/ -PARAMETER (NR=1) -REAL FOO, FOOXOR, PWRK(MAX(NR/2+1,SHMEM_REDUCE_MIN_WRKDATA_SIZE)) -COMMON /COM/ FOO, FOOXOR, PWRK -INTRINSIC MY_PE - -IF ( MOD(MY_PE(),2) .EQ. 0) THEN - CALL SHMEM_REAL8_XOR_TO_ALL(FOOXOR, FOO, NR, 0, 1, N$PES/2, - & PWRK, PSYNC) - PRINT *, 'Result on PE ', MY_PE(), ' is ', FOOXOR -ENDIF -.Ve -\fBExample 2:\fP -Consider the following C/C++ call: -.Vb -shmem_short_xor_to_all( target, source, 3, 0, 0, 8, pwrk, psync ); -.Ve -The preceding call is more efficient, but semantically equivalent to, the combination of the -following calls: -.Vb -shmem_short_xor_to_all(&(target[0]), &(source[0]), 1, 0, 0, 8, - pwrk1, psync1); -shmem_short_xor_to_all(&(target[1]), &(source[1]), 1, 0, 0, 8, - pwrk2, psync2); -shmem_short_xor_to_all(&(target[2]), &(source[2]), 1, 0, 0, 8, - pwrk1, psync1); -.Ve -Note that two sets of pWrk and pSync arrays are used alternately because no synchronization -is done between calls. -.SH SEE ALSO - -\fIintro_shmem\fP(3) diff --git a/oshmem/shmem/man/man3/shmem_swap.3in b/oshmem/shmem/man/man3/shmem_swap.3in deleted file mode 100644 index bc8daafe0ed..00000000000 --- a/oshmem/shmem/man/man3/shmem_swap.3in +++ /dev/null @@ -1,115 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright (c) 2015 University of Houston. All rights reserved. -.\" Copyright (c) 2015 Mellanox Technologies, Inc. -.\" $COPYRIGHT$ -.de Vb -.ft CW -.nf -.. -.de Ve -.ft R - -.fi -.. -.TH "SHMEM\\_SWAP" "3" "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME - -\fIshmem_double_swap\fP(3), -\fIshmem_float_swap\fP(3), -\fIshmem_int_swap\fP(3), -\fIshmem_long_swap\fP(3), -\fIshmem_swap\fP(3), -\fIshmem_int4_swap\fP(3), -\fIshmem_int8_swap\fP(3), -\fIshmem_real4_swap\fP(3), -\fIshmem_real8_swap\fP(3), -\fIshmem_longlong_swap\fP(3) -\- Performs an atomic swap to a remote data object -.SH SYNOPSIS - -C or C++: -.Vb -#include - -double shmem_double_swap(double *target, double value, - int pe); - -float shmem_float_swap(float *target, float value, int pe); - -int shmem_int_swap(int *target, int value, int pe); - -long shmem_long_swap(long *target, long value, int pe); - -long long shmem_longlong_swap(long long *target, - long long value, int pe); - -long shmem_swap(long *target, long value, int pe); -.Ve -Fortran: -.Vb -INCLUDE "mpp/shmem.fh" - -INTEGER pe - -INTEGER SHMEM_SWAP -ires = SHMEM_SWAP(target, value, pe) - -INTEGER(KIND=4) SHMEM_INT4_SWAP -ires = SHMEM_INT4_SWAP(target, value, pe) - -INTEGER(KIND=8) SHMEM_INT8_SWAP -ires = SHMEM_INT8_SWAP(target, value, pe) - -REAL(KIND=4) SHMEM_REAL4_SWAP -res = SHMEM_REAL4_SWAP(target, value, pe) - -REAL(KIND=8) SHMEM_REAL8_SWAP -res = SHMEM_REAL8_SWAP(target, value, pe) -.Ve -.SH DESCRIPTION - -The atomic swap routines write \fBvalue\fP -to address target on PE \fBpe\fP, -and return -the previous contents of \fBtarget\fP -in one atomic operation. -.PP -The arguments are as follows: -.TP -target -The remotely accessible integer data object to be updated on the remote PE. If -you are using C/C++, the type of target should match that implied in the SYNOPSIS section. If -you are using Fortran, it must be of the following type: -.RS -.TP -\fBSHMEM_SWAP:\fP Integer of default kind -.TP -\fBSHMEM_INT4_SWAP:\fP 4\-byte integer -.TP -\fBSHMEM_INT8_SWAP:\fP 8\-byte integer -.TP -\fBSHMEM_REAL4_SWAP:\fP 4\-byte real -.TP -\fBSHMEM_REAL8_SWAP:\fP 8\-byte real -.RE -.RS -.PP -.RE -.TP -value -Value to be atomically written to the remote PE. value is the same type as target. -.TP -pe -An integer that indicates the PE number on which target is to be updated. If you are -using Fortran, it must be a default integer value. -.PP -.SH NOTES - -The term remotely accessible is defined in \fIintro_shmem\fP(3)\&. -.SH RETURN VALUES - -The contents that had been at the target address on the remote PE prior to the swap is -returned. -.SH SEE ALSO - -\fIintro_shmem\fP(3) diff --git a/oshmem/shmem/man/man3/shmem_test_lock.3in b/oshmem/shmem/man/man3/shmem_test_lock.3in deleted file mode 100644 index 9a1ee8ffc2e..00000000000 --- a/oshmem/shmem/man/man3/shmem_test_lock.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/shmem_set_lock.3 diff --git a/oshmem/shmem/man/man3/shmem_udcflush.3in b/oshmem/shmem/man/man3/shmem_udcflush.3in deleted file mode 100644 index 3edeb74023d..00000000000 --- a/oshmem/shmem/man/man3/shmem_udcflush.3in +++ /dev/null @@ -1,94 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright (c) 2015 University of Houston. All rights reserved. -.\" Copyright (c) 2015 Mellanox Technologies, Inc. -.\" $COPYRIGHT$ -.de Vb -.ft CW -.nf -.. -.de Ve -.ft R - -.fi -.. -.TH "SHMEM\\_CACHE" "3" "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME - -\fIshmem_clear_cache_inv\fP(3), -\fIshmem_set_cache_inv\fP(3), -\fIshmem_set_cache_line_inv\fP(3), -\fIshmem_udcflush\fP(3), -\fIshmem_udcflush_line\fP(3) -\- Controls data cache utilities -.SH SYNOPSIS - -C or C++: -.Vb -#include - -void shmem_clear_cache_inv(void); -void shmem_clear_cache_line_inv(void *target); -void shmem_set_cache_inv(void); -void shmem_set_cache_line_inv(void *target); -void shmem_udcflush(void); -void shmem_udcflush_line(void *target); -.Ve -Fortran: -.Vb -INCLUDE "mpp/shmem.fh" - -CALL SHMEM_CLEAR_CACHE_INV -CALL SHMEM_CLEAR_CACHE_LINE_INV(target) -CALL SHMEM_SET_CACHE_INV -CALL SHMEM_SET_CACHE_LINE_INV(target) - -CALL SHMEM_UDCFLUSH -CALL SHMEM_UDCFLUSH_LINE(target) -.Ve -.SH DESCRIPTION - -The following argument is passed to the cache line control routines: -.TP -target -A data object that is local to the processing element (PE). target can be of -any noncharacter type. If you are using Fortran, it can be of any kind. -.PP -\fBshmem_clear_cache_inv\fP -disables automatic cache coherency mode previously -enabled by shmem_set_cache_inv or shmem_set_cache_line_inv. -.PP -\fBshmem_clear_cache_line_inv\fP -disables automatic cache coherency mode for the -cache line associated with the address of \fBtarget\fP -only. -.PP -\fBshmem_set_cache_inv\fP -enables the OpenSHMEM API to automatically decide the -best strategy for cache coherency. -.PP -\fBshmem_set_cache_line_inv\fP -enables automatic cache coherency mode for the -cache line associated with the address of \fBtarget\fP -only. -.PP -\fBshmem_clear_cache_inv\fP -disables automatic cache coherency mode previously -enabled by shmem_set_cache_inv or shmem_set_cache_line_inv. -.PP -\fBshmem_udcflush\fP -makes the entire user data cache coherent. -.PP -\fBshmem_udcflush_line\fP -makes coherent the cache line that corresponds with -the address specified by target. -.PP -.SH NOTES - -These routines have been retained for improved backward compatability with legacy -architectures. -.PP -.SH SEE ALSO - -\fIintro_shmem\fP(3), -\fIshmem_put\fP(3), -\fIshmem_swap\fP(3) diff --git a/oshmem/shmem/man/man3/shmem_udcflush_line.3in b/oshmem/shmem/man/man3/shmem_udcflush_line.3in deleted file mode 100644 index 4a6a361ef97..00000000000 --- a/oshmem/shmem/man/man3/shmem_udcflush_line.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/shmem_udcflush.3 diff --git a/oshmem/shmem/man/man3/shmem_wait.3in b/oshmem/shmem/man/man3/shmem_wait.3in deleted file mode 100644 index c664ae4c1ac..00000000000 --- a/oshmem/shmem/man/man3/shmem_wait.3in +++ /dev/null @@ -1,205 +0,0 @@ -.\" -*- nroff -*- -.\" Copyright (c) 2015 University of Houston. All rights reserved. -.\" Copyright (c) 2015 Mellanox Technologies, Inc. -.\" $COPYRIGHT$ -.de Vb -.ft CW -.nf -.. -.de Ve -.ft R - -.fi -.. -.TH "SHMEM\\_WAIT" "3" "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#" -.SH NAME - -\fIshmem_int_wait\fP(3), -\fIshmem_int_wait\fP(3)_until, -\fIshmem_int4_wait\fP(3), -\fIshmem_int4_wait\fP(3)_until, -\fIshmem_int8_wait\fP(3), -\fIshmem_int8_wait\fP(3)_until, -\fIshmem_long_wait\fP(3), -\fIshmem_long_wait\fP(3)_until, -\fIshmem_longlong_wait\fP(3), -\fIshmem_longlong_wait\fP(3)_until, -\fIshmem_short_wait\fP(3), -\fIshmem_short_wait\fP(3)_until, -\fIshmem_wait\fP(3), -\fIshmem_wait\fP(3)_until -\- Waits for a variable on the local processing element (PE) to change -.SH SYNOPSIS - -C or C++: -.Vb -#include - -void shmem_int_wait(volatile int *var, int value); - -void shmem_int_wait_until(volatile int *var, int cond, int value); - -void shmem_long_wait(volatile long *var, long value); - -void shmem_long_wait_until(volatile long *var, int cond, long value); - -void shmem_longlong_wait(volatile long long *var, long long value); - -void shmem_longlong_wait_until(volatile long long *var, int cond, - long long value); - -void shmem_short_wait(volatile short *var, short value); - -void shmem_short_wait_until(volatile short *var, int cond, - short value); - -void shmem_wait(volatile long *ivar, long cmp_value); - -void shmem_wait_until(volatile long *ivar, int cmp, long value); -.Ve -Fortran: -.Vb -INCLUDE "mpp/shmem.fh" - -CALL SHMEM_INT4_WAIT(ivar, cmp_value) - -CALL SHMEM_INT4_WAIT_UNTIL(ivar, cmp, cmp_value) - -CALL SHMEM_INT8_WAIT(ivar, cmp_value) - -CALL SHMEM_INT8_WAIT_UNTIL(ivar, cmp, cmp_value) - -CALL SHMEM_WAIT(ivar, cmp_value) - -CALL SHMEM_WAIT_UNTIL(ivar, cmp, cmp_value) -.Ve -.SH DESCRIPTION - -shmem_wait and shmem_wait_until wait for \fBivar\fP -to be changed by a remote write -or atomic swap issued by a different processor. These routines can be used for point\-to\- -point directed synchronization. A call to shmem_wait does not return until some other -processor writes a value, not equal to cmp_value, into \fBivar\fP -on the waiting -processor. A call to shmem_wait_until does not return until some other processor changes -\fBivar\fP -to satisfy the condition implied by cmp and cmp_value. This mechanism is -useful when a processor needs to tell another processor that it has completed some action. -.PP -The arguments are as follows: -.TP -target -The remotely accessible integer data object to be updated on the remote PE. If -you are using C/C++, the type of target should match that implied in the SYNOPSIS section. -If you are using the Fortran compiler, it must be of type integer with an element size of 4 -bytes for SHMEM_INT4_ADD and 8 bytes for SHMEM_INT8_ADD. -.TP -value -The value to be atomically added to target. If you are using C/C++, the type of -value should match that implied in the SYNOPSIS section. If you are using Fortran, it must be -of type integer with an element size of target. -.TP -pe -An integer that indicates the PE number upon which target is to be updated. If you -are using Fortran, it must be a default integer value. -.TP -ivar -A remotely accessible integer variable that is being updated by another PE. If you -are using C/C++, the type of ivar should match that implied in the SYNOPSIS section. If you -are using Fortran, ivar must be a specific sized integer type according to -the function being called, as follows: -.RS -.TP -\fBshmem_wait, shmem_wait_until:\fP default INTEGER -.TP -\fBshmem_int4_wait, shmem_int4_wait_until:\fP INTEGER*4 -.TP -\fBshmem_int8_wait, shmem_int8_wait_until:\fP INTEGER*8 -.RE -.RS -.PP -.RE -.TP -cmp -The compare operator that compares ivar with cmp_value. cmp must be of type -integer. If you are using Fortran, it must be of default kind. If you are using C/C++, the type -of cmp should match that implied in the SYNOPSIS section. The following cmp values are -supported: -.RS -.TP -SHMEM_CMP_EQ -Equal -.TP -SHMEM_CMP_NE -Not equal -.TP -SHMEM_CMP_GT -Greater than -.TP -SHMEM_CMP_LE -Less than or equal to -.TP -SHMEM_CMP_LT -Less than -.TP -SHMEM_CMP_GE -Greater than or equal to -.RE -.RS -.PP -.RE -.TP -cmp_value -cmp_value must be of type integer. If you are using C/C++, the type of -cmp_value should match thatimplied in the SYNOPSIS section. If you are using Fortran, -cmp_value must be an integer of the same size and kind as ivar. -The shmem_wait routines return when ivar is no longer equal to cmp_value. -The shmem_wait_until routines return when the compare condition is true. The compare -condition is defined by the ivar argument compared with the cmp_value using the -comparison operator, cmp. -.PP -.SH EXAMPLES - -\fBExample 1:\fP -The following call returns when variable ivar is not equal to 100: -.Vb -INTEGER*8 IVAR - -CALL SHMEM_INT8_WAIT(IVAR, INT8(100)) -.Ve -\fBExample 2:\fP -The following call to SHMEM_INT8_WAIT_UNTIL is equivalent to the -call to SHMEM_INT8_WAIT in example 1: -.Vb -INTEGER*8 IVAR - -CALL SHMEM_INT8_WAIT_UNTIL(IVAR, SHMEM_CMP_NE, INT8(100)) -.Ve -\fBExample 3:\fP -The following C/C++ call waits until the sign bit in ivar is set by a -transfer from a remote PE: -.Vb -int ivar; - -shmem_int_wait_until(&ivar, SHMEM_CMP_LT, 0); -.Ve -\fBExample 4:\fP -The following Fortran example is in the context of a subroutine: -.Vb -SUBROUTINE EXAMPLE() - INTEGER FLAG_VAR - COMMON/FLAG/FLAG_VAR - . . . - FLAG_VAR = FLAG_VALUE ! initialize the event variable - . . . - IF (FLAG_VAR .EQ. FLAG_VALUE) THEN - CALL SHMEM_WAIT(FLAG_VAR, FLAG_VALUE) - ENDIF - FLAG_VAR = FLAG_VALUE ! reset the event variable for next time - . . . -END -.Ve -.SH SEE ALSO - -\fIintro_shmem\fP(3), -\fIshmem_put\fP(3) diff --git a/oshmem/shmem/man/man3/shmem_wait_until.3in b/oshmem/shmem/man/man3/shmem_wait_until.3in deleted file mode 100644 index 03267ffbc55..00000000000 --- a/oshmem/shmem/man/man3/shmem_wait_until.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/shmem_wait.3 diff --git a/oshmem/shmem/man/man3/shmemalign.3in b/oshmem/shmem/man/man3/shmemalign.3in deleted file mode 100644 index 6cdb8014e9e..00000000000 --- a/oshmem/shmem/man/man3/shmemalign.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/shmem_malloc.3 diff --git a/oshmem/shmem/man/man3/shrealloc.3in b/oshmem/shmem/man/man3/shrealloc.3in deleted file mode 100644 index 6cdb8014e9e..00000000000 --- a/oshmem/shmem/man/man3/shrealloc.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/shmem_malloc.3 diff --git a/oshmem/shmem/man/man3/start_pes.3in b/oshmem/shmem/man/man3/start_pes.3in deleted file mode 100644 index c50c47089ee..00000000000 --- a/oshmem/shmem/man/man3/start_pes.3in +++ /dev/null @@ -1 +0,0 @@ -.so man3/shmem_init.3 diff --git a/oshmem/tools/oshmem_info/Makefile.am b/oshmem/tools/oshmem_info/Makefile.am index 57f237f8e4e..06a9b881318 100644 --- a/oshmem/tools/oshmem_info/Makefile.am +++ b/oshmem/tools/oshmem_info/Makefile.am @@ -1,7 +1,7 @@ # # Copyright (c) 2014 Mellanox Technologies, Inc. # All rights reserved. -# Copyright (c) 2014-2020 Cisco Systems, Inc. All rights reserved. +# Copyright (c) 2014-2022 Cisco Systems, Inc. All rights reserved. # Copyright (c) 2016 IBM Corporation. All rights reserved. # Copyright (c) 2017 Research Organization for Information Science # and Technology (RIST). All rights reserved. @@ -44,27 +44,16 @@ endif include $(top_srcdir)/Makefile.ompi-rules -nodist_man_MANS = bin_PROGRAMS = -man_pages = oshmem_info.1 - -EXTRA_DIST = $(man_pages:.1=.1in) - if PROJECT_OSHMEM -# Only build/install the binary and man pages if we're building oshmem +# Only build/install the binary pages if we're building oshmem bin_PROGRAMS += oshmem_info -nodist_man_MANS += $(man_pages) dist_ompidata_DATA = \ help-oshmem-info.txt endif -# Ensure that the man pages are rebuilt if the opal_config.h file -# changes; a "good enough" way to know if configure was run again (and -# therefore the release date or version may have changed) -$(nodist_man_MANS): $(top_builddir)/opal/include/opal_config.h - oshmem_info_SOURCES = \ oshmem_info.h \ oshmem_info.c \ @@ -73,6 +62,3 @@ oshmem_info_SOURCES = \ oshmem_info_LDADD = $(top_builddir)/ompi/lib@OMPI_LIBMPI_NAME@.la oshmem_info_LDADD += $(top_builddir)/oshmem/liboshmem.la oshmem_info_LDADD += $(top_builddir)/opal/lib@OPAL_LIB_NAME@.la - -distclean-local: - rm -f $(man_pages) diff --git a/oshmem/tools/wrappers/Makefile.am b/oshmem/tools/wrappers/Makefile.am index ef859477135..f026266eb6e 100644 --- a/oshmem/tools/wrappers/Makefile.am +++ b/oshmem/tools/wrappers/Makefile.am @@ -12,10 +12,7 @@ include $(top_srcdir)/Makefile.ompi-rules -man_pages = oshcc.1 shmemcc.1 oshc++.1 shmemc++.1 oshcxx.1 shmemcxx.1 oshfort.1 shmemfort.1 - if PROJECT_OSHMEM -man_MANS = $(man_pages) pkgconfigdir = $(libdir)/pkgconfig pkgconfig_DATA = oshmem.pc oshmem-c.pc oshmem-cxx.pc oshmem-fort.pc @@ -84,23 +81,11 @@ install-data-cs: mk_pkgdatadir uninstall-local-cs: rm -f $(DESTDIR)$(bindir)/shmemCC$(EXEEXT) \ - $(DESTDIR)$(mandir)/man1/shmemCC.1 \ $(DESTDIR)$(pkgdatadir)/shmemCC-wrapper-data.txt rm -f $(DESTDIR)$(bindir)/oshCC$(EXEEXT) \ - $(DESTDIR)$(mandir)/man1/oshCC.1 \ $(DESTDIR)$(pkgdatadir)/oshCC-wrapper-data.txt -oshCC.1: $(top_builddir)/opal/tools/wrappers/generic_wrapper.1 - rm -f oshCC.1 - sed -e 's/#COMMAND#/oshCC/g' -e 's/#PROJECT#/Open SHMEM/g' -e 's/#PROJECT_SHORT#/OSHMEM/g' -e 's/#LANGUAGE#/C++/g' < $(top_builddir)/opal/tools/wrappers/generic_wrapper.1 > oshCC.1 - -shmemCC.1: $(top_builddir)/opal/tools/wrappers/generic_wrapper.1 - rm -f shmemCC.1 - sed -e 's/#COMMAND#/shmemCC/g' -e 's/#PROJECT#/Open SHMEM/g' -e 's/#PROJECT_SHORT#/OSHMEM/g' -e 's/#LANGUAGE#/C++/g' < $(top_builddir)/opal/tools/wrappers/generic_wrapper.1 > shmemCC.1 - if CASE_SENSITIVE_FS -man_MANS += oshCC.1 shmemCC.1 - targets_install_exec += install-exec-cs targets_install_data += install-data-cs @@ -112,48 +97,4 @@ install-exec-hook: $(targets_install_exec) install-data-hook: $(targets_install_data) uninstall-local: $(targets_uninstall_local) - -######################################################## -# -# Man page generation / handling -# -######################################################## -distclean-local: - rm -f $(man_MANS) - -$(top_builddir)/opal/tools/wrappers/generic_wrapper.1: - (cd $(top_builddir)/opal/tools/wrappers && $(MAKE) $(AM_MAKEFLAGS) generic_wrapper.1) - -oshcc.1: $(top_builddir)/opal/tools/wrappers/generic_wrapper.1 - rm -f oshcc.1 - sed -e 's/#COMMAND#/oshcc/g' -e 's/#PROJECT#/Open SHMEM/g' -e 's/#PROJECT_SHORT#/OSHMEM/g' -e 's/#LANGUAGE#/C/g' < $(top_builddir)/opal/tools/wrappers/generic_wrapper.1 > oshcc.1 - -shmemcc.1: $(top_builddir)/opal/tools/wrappers/generic_wrapper.1 - rm -f shmemcc.1 - sed -e 's/#COMMAND#/shmemcc/g' -e 's/#PROJECT#/Open SHMEM/g' -e 's/#PROJECT_SHORT#/OSHMEM/g' -e 's/#LANGUAGE#/C/g' < $(top_builddir)/opal/tools/wrappers/generic_wrapper.1 > shmemcc.1 - -oshc++.1: $(top_builddir)/opal/tools/wrappers/generic_wrapper.1 - rm -f oshc++.1 - sed -e 's/#COMMAND#/oshc++/g' -e 's/#PROJECT#/Open SHMEM/g' -e 's/#PROJECT_SHORT#/OSHMEM/g' -e 's/#LANGUAGE#/C++/g' < $(top_builddir)/opal/tools/wrappers/generic_wrapper.1 > oshc++.1 - -shmemc++.1: $(top_builddir)/opal/tools/wrappers/generic_wrapper.1 - rm -f shmemc++.1 - sed -e 's/#COMMAND#/shmemc++/g' -e 's/#PROJECT#/Open SHMEM/g' -e 's/#PROJECT_SHORT#/OSHMEM/g' -e 's/#LANGUAGE#/C++/g' < $(top_builddir)/opal/tools/wrappers/generic_wrapper.1 > shmemc++.1 - -oshcxx.1: $(top_builddir)/opal/tools/wrappers/generic_wrapper.1 - rm -f oshcxx.1 - sed -e 's/#COMMAND#/oshcxx/g' -e 's/#PROJECT#/Open SHMEM/g' -e 's/#PROJECT_SHORT#/OSHMEM/g' -e 's/#LANGUAGE#/C++/g' < $(top_builddir)/opal/tools/wrappers/generic_wrapper.1 > oshcxx.1 - -shmemcxx.1: $(top_builddir)/opal/tools/wrappers/generic_wrapper.1 - rm -f shmemcxx.1 - sed -e 's/#COMMAND#/shmemcxx/g' -e 's/#PROJECT#/Open SHMEM/g' -e 's/#PROJECT_SHORT#/OSHMEM/g' -e 's/#LANGUAGE#/C++/g' < $(top_builddir)/opal/tools/wrappers/generic_wrapper.1 > shmemcxx.1 - -oshfort.1: $(top_builddir)/opal/tools/wrappers/generic_wrapper.1 - rm -f oshfort.1 - sed -e 's/#COMMAND#/oshfort/g' -e 's/#PROJECT#/Open SHMEM/g' -e 's/#PROJECT_SHORT#/OSHMEM/g' -e 's/#LANGUAGE#/Fortran/g' < $(top_builddir)/opal/tools/wrappers/generic_wrapper.1 > oshfort.1 - -shmemfort.1: $(top_builddir)/opal/tools/wrappers/generic_wrapper.1 - rm -f shmemfort.1 - sed -e 's/#COMMAND#/shmemfort/g' -e 's/#PROJECT#/Open SHMEM/g' -e 's/#PROJECT_SHORT#/OSHMEM/g' -e 's/#LANGUAGE#/Fortran/g' < $(top_builddir)/opal/tools/wrappers/generic_wrapper.1 > shmemfort.1 - endif # PROJECT_OSHMEM