2D AMRCLAW
advanc.f
Go to the documentation of this file.
1 c
2 c --------------------------------------------------------------
3 c
4  subroutine advanc (level,nvar,dtlevnew,vtime,naux)
5 c
6  use amr_module
7  implicit double precision (a-h,o-z)
8 
9 
10  logical vtime
11  integer omp_get_thread_num, omp_get_max_threads
12  integer mythread/0/, maxthreads/1/
13  integer listgrids(numgrids(level))
14  integer clock_start, clock_finish, clock_rate
15  integer clock_startstepgrid,clock_startbound,clock_finishbound
16  real(kind=8) cpu_start, cpu_finish
17  real(kind=8) cpu_startbound, cpu_finishbound
18  real(kind=8) cpu_startstepgrid, cpu_finishstepgrid
19 
20 c maxgr is maximum number of grids many things are
21 c dimensioned at, so this is overall. only 1d array
22 c though so should suffice. problem is
23 c not being able to dimension at maxthreads
24 
25 
26 c
27 c ::::::::::::::; ADVANC :::::::::::::::::::::::::::::::::::::::::::
28 c integrate all grids at the input 'level' by one step of its delta(t)
29 c this includes: setting the ghost cells
30 c advancing the solution on the grid
31 c adjusting fluxes for flux conservation step later
32 c :::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
33 c
34 c get start time for more detailed timing by level
35  call system_clock(clock_start,clock_rate)
36  call cpu_time(cpu_start)
37 
38 
39  hx = hxposs(level)
40  hy = hyposs(level)
41  delt = possk(level)
42 c this is linear alg.
43 c call prepgrids(listgrids,numgrids(level),level)
44 c
45 
46  call system_clock(clock_startbound,clock_rate)
47  call cpu_time(cpu_startbound)
48 
49 
50 c maxthreads initialized to 1 above in case no openmp
51 !$ maxthreads = omp_get_max_threads()
52 
53 c We want to do this regardless of the threading type
54 !$OMP PARALLEL DO PRIVATE(j,locnew, locaux, mptr,nx,ny,mitot,
55 !$OMP& mjtot,time,levSt),
56 !$OMP& SHARED(level, nvar, naux, alloc, intrat, delt,
57 !$OMP& listOfGrids,listStart,nghost,
58 !$OMP& node,rnode,numgrids,listgrids),
59 !$OMP& SCHEDULE (dynamic,1)
60 !$OMP& DEFAULT(none)
61  do j = 1, numgrids(level)
62  !mptr = listgrids(j)
63  levst = liststart(level)
64  mptr = listofgrids(levst+j-1)
65  !write(*,*)"old ",listgrids(j)," new",mptr
66  nx = node(ndihi,mptr) - node(ndilo,mptr) + 1
67  ny = node(ndjhi,mptr) - node(ndjlo,mptr) + 1
68  mitot = nx + 2*nghost
69  mjtot = ny + 2*nghost
70  locnew = node(store1,mptr)
71  locaux = node(storeaux,mptr)
72  time = rnode(timemult,mptr)
73 c
74  call bound(time,nvar,nghost,alloc(locnew),mitot,mjtot,mptr,
75  1 alloc(locaux),naux)
76 
77  end do
78 !$OMP END PARALLEL DO
79  call system_clock(clock_finishbound,clock_rate)
80  call cpu_time(cpu_finishbound)
81  timebound = timebound + clock_finishbound - clock_startbound
82  timeboundcpu=timeboundcpu+cpu_finishbound-cpu_startbound
83 
84 c
85 c save coarse level values if there is a finer level for wave fixup
86  if (level+1 .le. mxnest) then
87  if (lstart(level+1) .ne. null) then
88  call saveqc(level+1,nvar,naux)
89  endif
90  endif
91 c
92  dtlevnew = rinfinity
93  cfl_level = 0.d0 !# to keep track of max cfl seen on each level
94 
95 c
96  call system_clock(clock_startstepgrid,clock_rate)
97  call cpu_time(cpu_startstepgrid)
98 
99 
100 !$OMP PARALLEL DO PRIVATE(j,mptr,nx,ny,mitot,mjtot)
101 !$OMP& PRIVATE(mythread,dtnew)
102 !$OMP& SHARED(rvol,rvoll,level,nvar,mxnest,alloc,intrat)
103 !$OMP& SHARED(nghost,intratx,intraty,hx,hy,naux,listsp)
104 !$OMP& SHARED(node,rnode,dtlevnew,numgrids,listgrids)
105 !$OMP& SHARED(listOfGrids,listStart,levSt)
106 !$OMP& SCHEDULE (DYNAMIC,1)
107 !$OMP& DEFAULT(none)
108  do j = 1, numgrids(level)
109  !mptr = listgrids(j)
110  levst = liststart(level)
111  mptr = listofgrids(levst+j-1)
112  nx = node(ndihi,mptr) - node(ndilo,mptr) + 1
113  ny = node(ndjhi,mptr) - node(ndjlo,mptr) + 1
114  mitot = nx + 2*nghost
115  mjtot = ny + 2*nghost
116 c
117  call par_advanc(mptr,mitot,mjtot,nvar,naux,dtnew)
118 !$OMP CRITICAL (newdt)
119  dtlevnew = dmin1(dtlevnew,dtnew)
120 !$OMP END CRITICAL (newdt)
121 
122  end do
123 !$OMP END PARALLEL DO
124 c
125  call system_clock(clock_finish,clock_rate)
126  call cpu_time(cpu_finish)
127  tvoll(level) = tvoll(level) + clock_finish - clock_start
128  tvollcpu(level) = tvollcpu(level) + cpu_finish - cpu_start
129  timestepgrid = timestepgrid +clock_finish-clock_startstepgrid
130  timestepgridcpu=timestepgridcpu+cpu_finish-cpu_startstepgrid
131 
132  cflmax = dmax1(cflmax, cfl_level)
133 
134 c
135  return
136  end
137 c
138 c -------------------------------------------------------------
139 c
140  subroutine prepgrids(listgrids, num, level)
141 
142  use amr_module
143  implicit double precision (a-h,o-z)
144  integer listgrids(num)
145 
146  mptr = lstart(level)
147  do j = 1, num
148  listgrids(j) = mptr
149  mptr = node(levelptr, mptr)
150  end do
151 
152  if (mptr .ne. 0) then
153  write(*,*)" Error in routine setting up grid array "
154  stop
155  endif
156 
157  return
158  end
159 
160 c
161 c --------------------------------------------------------------
162 c
163  subroutine par_advanc (mptr,mitot,mjtot,nvar,naux,dtnew)
164 c
165  use amr_module
166  use gauges_module, only: update_gauges, num_gauges
167  implicit double precision (a-h,o-z)
168 
169 
170  integer omp_get_thread_num, omp_get_max_threads
171  integer mythread/0/, maxthreads/1/
172 
173  double precision fp(nvar,mitot,mjtot),fm(nvar,mitot,mjtot)
174  double precision gp(nvar,mitot,mjtot),gm(nvar,mitot,mjtot)
175 
176 
177 c
178 c :::::::::::::: PAR_ADVANC :::::::::::::::::::::::::::::::::::::::::::
179 c integrate this grid. grids are done in parallel.
180 c extra subr. used to allow for stack based allocation of
181 c flux arrays. They are only needed temporarily. If used alloc
182 c array for them it has too long a lendim, makes too big
183 c a checkpoint file, and is a big critical section.
184 c :::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
185 c
186  level = node(nestlevel,mptr)
187  hx = hxposs(level)
188  hy = hyposs(level)
189  delt = possk(level)
190  nx = node(ndihi,mptr) - node(ndilo,mptr) + 1
191  ny = node(ndjhi,mptr) - node(ndjlo,mptr) + 1
192  time = rnode(timemult,mptr)
193 
194 !$ mythread = omp_get_thread_num()
195 
196  locold = node(store2, mptr)
197  locnew = node(store1, mptr)
198 
199 c
200 c copy old soln. values into next time step's soln. values
201 c since integrator will overwrite it. only for grids not at
202 c the finest level. finest level grids do not maintain copies
203 c of old and new time solution values.
204 c
205  if (level .lt. mxnest) then
206  ntot = mitot * mjtot * nvar
207 cdir$ ivdep
208  do 10 i = 1, ntot
209  10 alloc(locold + i - 1) = alloc(locnew + i - 1)
210  endif
211 c
212  xlow = rnode(cornxlo,mptr) - nghost*hx
213  ylow = rnode(cornylo,mptr) - nghost*hy
214 
215 !$OMP CRITICAL(rv)
216  rvol = rvol + nx * ny
217  rvoll(level) = rvoll(level) + nx * ny
218 !$OMP END CRITICAL(rv)
219 
220 
221  locaux = node(storeaux,mptr)
222 c
223  if (node(ffluxptr,mptr) .ne. 0) then
224  lenbc = 2*(nx/intratx(level-1)+ny/intraty(level-1))
225  locsvf = node(ffluxptr,mptr)
226  locsvq = locsvf + nvar*lenbc
227  locx1d = locsvq + nvar*lenbc
228  call qad(alloc(locnew),mitot,mjtot,nvar,
229  1 alloc(locsvf),alloc(locsvq),lenbc,
230  2 intratx(level-1),intraty(level-1),hx,hy,
231  3 naux,alloc(locaux),alloc(locx1d),delt,mptr)
232  endif
233 
234 c # See if the grid about to be advanced has gauge data to output.
235 c # This corresponds to previous time step, but output done
236 c # now to make linear interpolation easier, since grid
237 c # now has boundary conditions filled in.
238 
239 c should change the way print_gauges does io - right now is critical section
240 c no more, each gauge has own array.
241 
242  if (num_gauges > 0) then
243  call update_gauges(alloc(locnew:locnew+nvar*mitot*mjtot),
244  . alloc(locaux:locaux+nvar*mitot*mjtot),
245  . xlow,ylow,nvar,mitot,mjtot,naux,mptr)
246  endif
247 
248 c
249  if (dimensional_split .eq. 0) then
250 c # Unsplit method
251  call stepgrid(alloc(locnew),fm,fp,gm,gp,
252  2 mitot,mjtot,nghost,
253  3 delt,dtnew,hx,hy,nvar,
254  4 xlow,ylow,time,mptr,naux,alloc(locaux))
255  else if (dimensional_split .eq. 1) then
256 c # Godunov splitting
257  call stepgrid_dimsplit(alloc(locnew),fm,fp,gm,gp,
258  2 mitot,mjtot,nghost,
259  3 delt,dtnew,hx,hy,nvar,
260  4 xlow,ylow,time,mptr,naux,alloc(locaux))
261  else
262 c # should never get here due to check in amr2
263  write(6,*) '*** Strang splitting not supported'
264  stop
265  endif
266 
267  if (node(cfluxptr,mptr) .ne. 0)
268  2 call fluxsv(mptr,fm,fp,gm,gp,
269  3 alloc(node(cfluxptr,mptr)),mitot,mjtot,
270  4 nvar,listsp(level),delt,hx,hy)
271  if (node(ffluxptr,mptr) .ne. 0) then
272  lenbc = 2*(nx/intratx(level-1)+ny/intraty(level-1))
273  locsvf = node(ffluxptr,mptr)
274  call fluxad(fm,fp,gm,gp,
275  2 alloc(locsvf),mptr,mitot,mjtot,nvar,
276  4 lenbc,intratx(level-1),intraty(level-1),
277  5 nghost,delt,hx,hy)
278  endif
279 c
280 c write(outunit,969) mythread,delt, dtnew
281 c969 format(" thread ",i4," updated by ",e15.7, " new dt ",e15.7)
282  rnode(timemult,mptr) = rnode(timemult,mptr)+delt
283 c
284  return
285  end
subroutine saveqc(level, nvar, naux)
Definition: saveqc.f:3
subroutine fluxad(xfluxm, xfluxp, yfluxm, yfluxp, svdflx, mptr, mitot, mjtot, nvar, lenbc, lratiox, lratioy, ng, dtf, dx, dy)
Definition: fluxad.f:4
subroutine qad(valbig, mitot, mjtot, nvar, svdflx, qc1d, lenbc, lratiox, lratioy, hx, hy, maux, aux, auxc1d, delt, mptr)
Definition: qad.f:4
subroutine update_gauges(q, aux, xlow, ylow, num_eqn, mitot, mjtot, num_aux, mptr)
subroutine advanc(level, nvar, dtlevnew, vtime, naux)
Definition: advanc.f:4
subroutine prepgrids(listgrids, num, level)
Definition: advanc.f:140
subroutine stepgrid(q, fm, fp, gm, gp, mitot, mjtot, mbc, dt, dtnew, dx, dy, nvar, xlow, ylow, time, mptr, maux, aux)
Definition: stepgrid.f:4
subroutine bound(time, nvar, ng, valbig, mitot, mjtot, mptr, aux, naux)
This routine sets the boundary values for a given grid at level level.
Definition: bound.f90:51
subroutine par_advanc(mptr, mitot, mjtot, nvar, naux, dtnew)
Definition: advanc.f:163
subroutine fluxsv(mptr, xfluxm, xfluxp, yfluxm, yfluxp, listbc, ndimx, ndimy, nvar, maxsp, dtc, hx, hy)
Definition: fluxsv.f:4
subroutine stepgrid_dimsplit(q, fm, fp, gm, gp, mitot, mjtot, mbc, dt, dtnew, dx, dy, nvar, xlow, ylow, time, mptr, maux, aux)