Anonymous avatar Anonymous committed 1cfbad7

added homework6 solutions

Comments (0)

Files changed (7)

notes/project.rst

 * part2/quadrature_mc.f90
 * part2/random_util.f90
 * part2/test_quad_mc.f90
+* part2/plot_mc_quad_error.py
 * part2/Makefile
 
 * part3/problem_description.f90

solutions/homework6/Makefile

+
+OBJECTS = functions.o quadrature.o test.o
+OBJECTS2 = functions.o quadrature.o test2.o
+FFLAGS = 
+NUM_PROCS ?= 4   # default if not specified on command line or env variable
+
+.PHONY: test clean 
+
+test: test.exe
+	mpiexec -n $(NUM_PROCS) test.exe
+
+test.exe: $(OBJECTS)
+	mpif90 $(FFLAGS) $(OBJECTS) -o test.exe
+
+test2: test2.exe
+	mpiexec -n $(NUM_PROCS) test2.exe
+
+test2.exe: $(OBJECTS2)
+	mpif90 $(FFLAGS) $(OBJECTS2) -o test2.exe
+
+%.o : %.f90
+	mpif90 $(FFLAGS) -c  $< 
+
+clean:
+	rm -f *.o *.exe *.mod
+

solutions/homework6/copyvalue2.f90

+!
+! Set value in Process numprocs-1 and copy this through a chain of processes
+! Subtract 1 from it each time, ! and finally print result from Process 0.
+!
+
+program copyvalue2
+
+    use mpi
+
+    implicit none
+
+    integer :: i, proc_num, num_procs,ierr
+    integer, dimension(MPI_STATUS_SIZE) :: status
+
+    call MPI_INIT(ierr)
+    call MPI_COMM_SIZE(MPI_COMM_WORLD, num_procs, ierr)
+    call MPI_COMM_RANK(MPI_COMM_WORLD, proc_num, ierr)
+
+    if (num_procs==1) then
+        print *, "Only one process, cannot do anything"
+        call MPI_FINALIZE(ierr)
+        stop
+        endif
+
+    if (proc_num==num_procs-1) then
+        i = 55
+        print '("Process ",i3," setting      i = ",i3)', proc_num, i
+
+        call MPI_SEND(i, 1, MPI_INTEGER, num_procs-2, 21, &
+                      MPI_COMM_WORLD, ierr)
+
+      else if (proc_num > 0) then
+
+        call MPI_RECV(i, 1, MPI_INTEGER, proc_num+1, 21, &
+                      MPI_COMM_WORLD, status, ierr)
+
+        print '("Process ",i3," receives     i = ",i3)', proc_num, i
+        i = i-1
+        print '("Process ",i3," sends        i = ",i3)', proc_num, i
+
+        call MPI_SEND(i, 1, MPI_INTEGER, proc_num-1, 21, &
+                      MPI_COMM_WORLD, ierr)
+
+
+      else if (proc_num == 0) then
+
+        call MPI_RECV(i, 1, MPI_INTEGER, proc_num+1, 21, &
+                      MPI_COMM_WORLD, status, ierr)
+
+        print '("Process ",i3," ends up with i = ",i3)', proc_num, i
+      endif
+
+    call MPI_FINALIZE(ierr)
+
+end program copyvalue2

solutions/homework6/functions.f90

+
+module functions
+
+    implicit none
+    integer :: fevals_proc
+    real(kind=8) :: k
+    save
+
+contains
+
+    real(kind=8) function f(x)
+        implicit none
+        real(kind=8), intent(in) :: x 
+        integer proc_num, ierr
+
+        ! keep track of number of function evaluations by
+        ! each process:
+        fevals_proc = fevals_proc + 1
+        
+        f = 1.d0 + x**3 + sin(k*x)
+        
+    end function f
+
+end module functions

solutions/homework6/quadrature.f90

+
+module quadrature
+
+contains
+
+real(kind=8) function trapezoid(f, a, b, n)
+
+    ! Estimate the integral of f(x) from a to b using the
+    ! Trapezoid Rule with n points.
+
+    ! Input:
+    !   f:  the function to integrate
+    !   a:  left endpoint
+    !   b:  right endpoint
+    !   n:  number of points to use
+    ! Returns:
+    !   the estimate of the integral
+     
+    implicit none
+    real(kind=8), intent(in) :: a,b
+    real(kind=8), external :: f
+    integer, intent(in) :: n
+
+    ! Local variables:
+    integer :: j
+    real(kind=8) :: h, trap_sum, xj
+
+    h = (b-a)/(n-1)
+    trap_sum = 0.5d0*(f(a) + f(b))  ! endpoint contributions
+    
+    do j=2,n-1
+        xj = a + (j-1)*h
+        trap_sum = trap_sum + f(xj)
+        enddo
+
+    trapezoid = h * trap_sum
+
+end function trapezoid
+
+end module quadrature
+

solutions/homework6/test.f90

+
+program test
+
+    use mpi
+
+    use quadrature, only: trapezoid
+    use functions, only: f, fevals_proc, k
+
+    implicit none
+    real(kind=8) :: a,b,int_true, int_approx
+
+    integer :: proc_num, num_procs, ierr, n, fevals_total
+    integer, dimension(MPI_STATUS_SIZE) :: status
+
+    call MPI_INIT(ierr)
+    call MPI_COMM_SIZE(MPI_COMM_WORLD, num_procs, ierr)
+    call MPI_COMM_RANK(MPI_COMM_WORLD, proc_num, ierr)
+
+    ! All processes set these values so we don't have to broadcast:
+    k = 1.d3   ! functions module variable 
+    a = 0.d0
+    b = 2.d0
+    int_true = (b-a) + (b**4 - a**4) / 4.d0 - (1.d0/k) * (cos(k*b) - cos(k*a))
+    n = 1000
+
+    ! Each process keeps track of number of fevals:
+    fevals_proc = 0
+
+    if (proc_num==0) then
+        print '("Using ",i3," processes")', num_procs
+        print '("true integral: ", es22.14)', int_true
+        print *, " "  ! blank line
+        endif
+
+    call MPI_BARRIER(MPI_COMM_WORLD,ierr) ! wait for process 0 to print
+
+    ! Note: In this version all processes call trap and repeat the
+    !       same work, so each should get the same answer.  
+    int_approx = trapezoid(f,a,b,n)
+    print '("Process ",i3," with n = ",i8," computes int_approx = ",es22.14)', &
+            proc_num,n, int_approx
+
+    call MPI_BARRIER(MPI_COMM_WORLD,ierr) ! wait for all process to print
+
+    ! print the number of function evaluations by each thread:
+    print '("fevals by Process ",i2,": ",i13)',  proc_num, fevals_proc
+
+    call MPI_BARRIER(MPI_COMM_WORLD,ierr) ! wait for all process to print
+
+    ! Compute fevals_total:
+    call MPI_REDUCE(fevals_proc, fevals_total, 1, MPI_INTEGER, MPI_SUM, 0, &
+                    MPI_COMM_WORLD, ierr)
+
+    if (proc_num==0) then
+        print '("Total number of fevals: ",i10)', fevals_total
+        endif
+
+    call MPI_FINALIZE(ierr)
+
+end program test

solutions/homework6/test2.f90

+
+program test2
+
+    use mpi
+
+    use quadrature, only: trapezoid
+    use functions, only: f, fevals_proc, k
+
+    implicit none
+    real(kind=8) :: a,b,int_true, int_approx, int_sub, dx_sub
+    real(kind=8) :: ab_sub(2)   ! to hold a and b for each subinterval
+
+    integer :: i,j,jj,n_proc,proc_num, num_procs,ierr,n,fevals_total,nsub
+    integer, dimension(MPI_STATUS_SIZE) :: status
+    logical :: debug
+
+    debug = .false.
+
+    call MPI_INIT(ierr)
+    call MPI_COMM_SIZE(MPI_COMM_WORLD, num_procs, ierr)
+    call MPI_COMM_RANK(MPI_COMM_WORLD, proc_num, ierr)
+
+    ! All processes set these values so we don't have to broadcast:
+    k = 1.d3   ! functions module variable 
+    a = 0.d0
+    b = 2.d0
+    int_true = (b-a) + (b**4 - a**4) / 4.d0 - (1.d0/k) * (cos(k*b) - cos(k*a))
+    n = 1000   ! points per subinterval
+    nsub = num_procs - 1   ! number of subintervals
+
+    ! Each process keeps track of number of fevals:
+    fevals_proc = 0
+
+    if (proc_num==0) then
+        print '("Using ",i3," processes")', num_procs
+        print '("true integral: ", es22.14)', int_true
+        print *, " "  ! blank line
+
+        int_approx = 0.d0  ! initialize variable for accumulating integral
+
+        endif
+
+    call MPI_BARRIER(MPI_COMM_WORLD,ierr) ! wait for process 0 to print
+
+    ! -----------------------------------------
+    ! code for Master (Processor 0):
+    ! -----------------------------------------
+
+    if (proc_num == 0) then
+
+      dx_sub = (b-a) / nsub
+
+      do j=1,nsub
+        ab_sub(1) = a + (j-1)*dx_sub
+        ab_sub(2) = a + j*dx_sub
+        call MPI_SEND(ab_sub, 2, MPI_DOUBLE_PRECISION, j, j, &
+                      MPI_COMM_WORLD, ierr)
+        enddo
+
+      do j=1,nsub
+          call MPI_RECV(int_sub, 1, MPI_DOUBLE_PRECISION, &
+                        MPI_ANY_SOURCE, MPI_ANY_TAG, &
+                        MPI_COMM_WORLD, status, ierr)
+          jj = status(MPI_TAG)
+          if (debug) then
+              print *,"+++ int_sub, int_approx: ",int_sub, int_approx
+              endif
+          int_approx = int_approx + int_sub
+
+          enddo
+        endif
+
+    ! -----------------------------------------
+    ! code for Workers (Processors 1, 2, ...):
+    ! -----------------------------------------
+    if (proc_num /= 0) then
+
+        call MPI_RECV(ab_sub, 2, MPI_DOUBLE_PRECISION, &
+                      0, MPI_ANY_TAG, &
+                      MPI_COMM_WORLD, status, ierr)
+
+        j = status(MPI_TAG)   ! this is the subinterval number
+
+        if (debug) then
+            print '("+++ Process ",i4,"  received message with tag ",i6)', &
+               proc_num, j       ! for debugging
+            endif
+
+        int_sub = trapezoid(f,ab_sub(1),ab_sub(2),n)
+
+        call MPI_SEND(int_sub, 1, MPI_DOUBLE_PRECISION, &
+                    0, j, MPI_COMM_WORLD, ierr)
+
+        endif
+
+
+    call MPI_BARRIER(MPI_COMM_WORLD,ierr) ! wait for all process to print
+
+    ! print the number of function evaluations by each thread:
+    print '("fevals by Process ",i2,": ",i13)',  proc_num, fevals_proc
+
+    call MPI_BARRIER(MPI_COMM_WORLD,ierr) ! wait for all process to print
+
+    call MPI_REDUCE(fevals_proc, fevals_total, 1, MPI_INTEGER, MPI_SUM, 0, &
+                    MPI_COMM_WORLD, ierr)
+
+    if (proc_num==0) then
+        print '("Trapezoid approximation with ",i8," total points: ",es22.14)',&
+                nsub*n, int_approx
+        print '("Total number of fevals: ",i10)', fevals_total
+        endif
+
+    call MPI_FINALIZE(ierr)
+
+end program test2
Tip: Filter by directory path e.g. /media app.js to search for public/media/app.js.
Tip: Use camelCasing e.g. ProjME to search for ProjectModifiedEvent.java.
Tip: Filter by extension type e.g. /repo .js to search for all .js files in the /repo directory.
Tip: Separate your search with spaces e.g. /ssh pom.xml to search for src/ssh/pom.xml.
Tip: Use ↑ and ↓ arrow keys to navigate and return to view the file.
Tip: You can also navigate files with Ctrl+j (next) and Ctrl+k (previous) and view the file with Ctrl+o.
Tip: You can also navigate files with Alt+j (next) and Alt+k (previous) and view the file with Alt+o.