2D AMRCLAW
step2_sweep.f90
Go to the documentation of this file.
1 subroutine step2(maxm,meqn,maux,mbc,mx,my,qold,aux,dx,dy,dt,cflgrid,fm,fp,gm,gp,rpn2,rpt2)
2 !
3 ! clawpack routine ... modified for AMRCLAW
4 !
5 ! Take one time step, updating q.
6 ! On entry, qold gives
7 ! initial data for this step
8 ! and is unchanged in this version.
9 !
10 ! fm, fp are fluxes to left and right of single cell edge
11 ! See the flux2 documentation for more information.
12 !
13 ! Converted to f90 2012-1-04 (KTM)
14 !
15 
16  use amr_module
17 
18  implicit none
19 
20  external rpn2, rpt2
21 
22  ! Arguments
23  integer, intent(in) :: maxm,meqn,maux,mbc,mx,my
24  real(kind=8), intent(in) :: dx,dy,dt
25  real(kind=8), intent(inout) :: cflgrid
26  real(kind=8), intent(inout) :: qold(meqn, 1-mbc:mx+mbc, 1-mbc:my+mbc)
27  real(kind=8), intent(inout) :: aux(maux,1-mbc:mx+mbc, 1-mbc:my+mbc)
28  real(kind=8), intent(inout) :: fm(meqn, 1-mbc:mx+mbc, 1-mbc:my+mbc)
29  real(kind=8), intent(inout) :: fp(meqn,1-mbc:mx+mbc, 1-mbc:my+mbc)
30  real(kind=8), intent(inout) :: gm(meqn,1-mbc:mx+mbc, 1-mbc:my+mbc)
31  real(kind=8), intent(inout) :: gp(meqn,1-mbc:mx+mbc, 1-mbc:my+mbc)
32 
33  ! Local storage for flux accumulation
34  real(kind=8) :: faddm(meqn,1-mbc:maxm+mbc)
35  real(kind=8) :: faddp(meqn,1-mbc:maxm+mbc)
36  real(kind=8) :: gaddm(meqn,1-mbc:maxm+mbc,2)
37  real(kind=8) :: gaddp(meqn,1-mbc:maxm+mbc,2)
38 
39  ! Scratch storage for Sweeps and Riemann problems
40  real(kind=8) :: q1d(meqn,1-mbc:maxm+mbc)
41  real(kind=8) :: aux1(maux,1-mbc:maxm+mbc)
42  real(kind=8) :: aux2(maux,1-mbc:maxm+mbc)
43  real(kind=8) :: aux3(maux,1-mbc:maxm+mbc)
44  real(kind=8) :: dtdx1d(1-mbc:maxm+mbc)
45  real(kind=8) :: dtdy1d(1-mbc:maxm+mbc)
46 
47  real(kind=8) :: wave(meqn, mwaves, 1-mbc:maxm+mbc)
48  real(kind=8) :: s(mwaves, 1-mbc:maxm + mbc)
49  real(kind=8) :: amdq(meqn,1-mbc:maxm + mbc)
50  real(kind=8) :: apdq(meqn,1-mbc:maxm + mbc)
51  real(kind=8) :: cqxx(meqn,1-mbc:maxm + mbc)
52  real(kind=8) :: bmadq(meqn,1-mbc:maxm + mbc)
53  real(kind=8) :: bpadq(meqn,1-mbc:maxm + mbc)
54 
55  ! Looping scalar storage
56  integer :: i,j,thread_num
57  real(kind=8) :: dtdx,dtdy,cfl1d
58 
59  ! Common block storage
60  integer :: icom,jcom
61  real(kind=8) :: dtcom,dxcom,dycom,tcom
62  common /comxyt/ dtcom,dxcom,dycom,tcom,icom,jcom
63 
64  ! Store mesh parameters in common block
65  dxcom = dx
66  dycom = dy
67  dtcom = dt
68 
69  cflgrid = 0.d0
70  dtdx = dt/dx
71  dtdy = dt/dy
72 
73  fm = 0.d0
74  fp = 0.d0
75  gm = 0.d0
76  gp = 0.d0
77 
78  ! ============================================================================
79  ! Perform X-Sweeps
80  !$OMP PARALLEL DO PRIVATE(j,jcom,thread_num) &
81  !$OMP PRIVATE(faddm,faddp,gaddm,gaddp,q1d,dtdx1d) &
82  !$OMP PRIVATE(aux1,aux2,aux3) &
83  !$OMP PRIVATE(wave,s,amdq,apdq,cqxx,bmadq,bpadq) &
84  !$OMP PRIVATE(cfl1d) &
85  !$OMP SHARED(mx,my,maxm,maux,mcapa,mbc,meqn,dtdx) &
86  !$OMP SHARED(cflgrid,fm,fp,gm,gp,qold,aux) &
87  !$OMP DEFAULT(none)
88  do j = 0,my+1
89  ! For 1D AMR - cannot be used in conjunction with sweep threading
90 
91 ! if (my == 1 .and. j /= 1) then
92 ! exit
93 ! endif
94 
95  ! Copy old q into 1d slice
96  q1d(:,1-mbc:mx+mbc) = qold(:,1-mbc:mx+mbc,j)
97 
98  ! Set dtdx slice if a capacity array exists
99  if (mcapa > 0) then
100  dtdx1d(1-mbc:mx+mbc) = dtdx / aux(mcapa,1-mbc:mx+mbc,j)
101  else
102  dtdx1d = dtdx
103  endif
104 
105  ! Copy aux array into slices
106  if (maux > 0) then
107  aux1(:,1-mbc:mx+mbc) = aux(:,1-mbc:mx+mbc,j-1)
108  aux2(:,1-mbc:mx+mbc) = aux(:,1-mbc:mx+mbc,j )
109  aux3(:,1-mbc:mx+mbc) = aux(:,1-mbc:mx+mbc,j+1)
110  endif
111 
112  ! Store value of j along the slice into common block
113  ! *** WARNING *** This may not working with threading
114  jcom = j
115 
116  ! Compute modifications fadd and gadd to fluxes along this slice:
117  call flux2(1,maxm,meqn,maux,mbc,mx,q1d,dtdx1d,aux1,aux2,aux3, &
118  faddm,faddp,gaddm,gaddp,cfl1d,wave,s, &
119  amdq,apdq,cqxx,bmadq,bpadq,rpn2,rpt2)
120 
121  !$OMP CRITICAL (cfl_row_x)
122  cflgrid = max(cflgrid,cfl1d)
123  !$OMP END CRITICAL (cfl_row_x)
124 
125 
126  ! Update fluxes
127  !$OMP CRITICAL (flux_accumulation_x)
128 
129  fm(:,1:mx+1,j) = fm(:,1:mx+1,j) + faddm(:,1:mx+1)
130  fp(:,1:mx+1,j) = fp(:,1:mx+1,j) + faddp(:,1:mx+1)
131  gm(:,1:mx+1,j) = gm(:,1:mx+1,j) + gaddm(:,1:mx+1,1)
132  gp(:,1:mx+1,j) = gp(:,1:mx+1,j) + gaddp(:,1:mx+1,1)
133  gm(:,1:mx+1,j+1) = gm(:,1:mx+1,j+1) + gaddm(:,1:mx+1,2)
134  gp(:,1:mx+1,j+1) = gp(:,1:mx+1,j+1) + gaddp(:,1:mx+1,2)
135 
136  !$OMP END CRITICAL (flux_accumulation_x)
137  enddo
138  !$OMP END PARALLEL DO
139 
140  ! ============================================================================
141  ! y-sweeps
142  !
143  !$OMP PARALLEL DO PRIVATE(i,icom) &
144  !$OMP PRIVATE(faddm,faddp,gaddm,gaddp,q1d,dtdy1d) &
145  !$OMP PRIVATE(aux1,aux2,aux3) &
146  !$OMP PRIVATE(wave,s,amdq,apdq,cqxx,bmadq,bpadq) &
147  !$OMP PRIVATE(cfl1d) &
148  !$OMP SHARED(mx,my,maxm,maux,mcapa,mbc,meqn,dtdy) &
149  !$OMP SHARED(cflgrid,fm,fp,gm,gp,qold,aux) &
150  !$OMP DEFAULT(none)
151  do i = 0,mx+1
152 
153  ! Copy data along a slice into 1d arrays:
154  q1d(:,1-mbc:my+mbc) = qold(:,i,1-mbc:my+mbc)
155 
156  ! Set dt/dy ratio in slice
157  if (mcapa > 0) then
158  dtdy1d(1-mbc:my+mbc) = dtdy / aux(mcapa,i,1-mbc:my+mbc)
159  else
160  dtdy1d = dtdy
161  endif
162 
163  ! Copy aux slices
164  if (maux .gt. 0) then
165  aux1(:,1-mbc:my+mbc) = aux(:,i-1,1-mbc:my+mbc)
166  aux2(:,1-mbc:my+mbc) = aux(:,i,1-mbc:my+mbc)
167  aux3(:,1-mbc:my+mbc) = aux(:,i+1,1-mbc:my+mbc)
168  endif
169 
170  ! Store the value of i along this slice in the common block
171  ! *** WARNING *** This may not working with threading
172  icom = i
173 
174  ! Compute modifications fadd and gadd to fluxes along this slice
175  call flux2(2,maxm,meqn,maux,mbc,my,q1d,dtdy1d,aux1,aux2,aux3, &
176  faddm,faddp,gaddm,gaddp,cfl1d,wave,s,amdq,apdq,cqxx, &
177  bmadq,bpadq,rpn2,rpt2)
178 
179  !$OMP CRITICAL (cfl_row_y)
180  cflgrid = max(cflgrid,cfl1d)
181  !$OMP END CRITICAL (cfl_row_y)
182 
183 
184  ! Update fluxes
185  !$OMP CRITICAL (flux_accumulation_y)
186 
187  gm(:,i,1:my+1) = gm(:,i,1:my+1) + faddm(:,1:my+1)
188  gp(:,i,1:my+1) = gp(:,i,1:my+1) + faddp(:,1:my+1)
189  fm(:,i,1:my+1) = fm(:,i,1:my+1) + gaddm(:,1:my+1,1)
190  fp(:,i,1:my+1) = fp(:,i,1:my+1) + gaddp(:,1:my+1,1)
191  fm(:,i+1,1:my+1) = fm(:,i+1,1:my+1) + gaddm(:,1:my+1,2)
192  fp(:,i+1,1:my+1) = fp(:,i+1,1:my+1) + gaddp(:,1:my+1,2)
193 
194  !$OMP END CRITICAL (flux_accumulation_y)
195 
196  enddo
197  !$OMP END PARALLEL DO
198 
199 end subroutine step2
subroutine rpn2(ixy, maxm, meqn, mwaves, maux, mbc, mx, ql, qr, auxl, auxr, wave, s, amdq, apdq)
Definition: rpn2from1d.f:5
subroutine flux2(ixy, maxm, meqn, maux, mbc, mx, q1d, dtdx1d, aux1, aux2, aux3, faddm, faddp, gaddm, gaddp, cfl1d, wave, s, amdq, apdq, cqxx, bmasdq, bpasdq, rpn2, rpt2)
Definition: flux2.f:4
subroutine step2(maxm, meqn, maux, mbc, mx, my, qold, aux, dx, dy, dt, cflgrid, fm, fp, gm, gp, rpn2, rpt2)
Definition: step2.f90:1