Newer
Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
!------------------------------------------------------------------------------|
!------------------------------------------------------------------------------|
! |
! ||===\\ |
! || \\ |
! || || //==\\ || || //==|| ||/==\\ |
! || || || || || || || || || || |
! || // || || || || || || || |
! ||===// \\==// \\==\\ \\==\\ || |
! |
!------------------------------------------------------------------------------|
!------------------------------------------------------------------------------|
! |
! MOVE_CLOUD Apr. 2007 |
! |
!------------------------------------------------------------------------------|
!------------------------------------------------------------------------------|
subroutine move_cloud (params,cl,ov)
!------------------------------------------------------------------------------|
!(((((((((((((((( Purpose of the routine ))))))))))))))))))))))))))))))))))))))
!------------------------------------------------------------------------------|
! subroutine to move a set of particles from a velocity
! field known at the nodes of an octree
! cl is the cloud of points to be moved
! octree is the velocity octree
! noctree is its size
! unode,vnode and wnode are the components of the velocity
! icon_octree is the connectivity matrix on the octree
! nleaves is the number of leaves in the octree
! dt is the time step
! niter_move is the number of iterations used to move the particles
! it is read in input.txt
! here we use a simple method that performs a given (niter_move)
! number of iterations in a mid_point algorithm
!------------------------------------------------------------------------------|
!(((((((((((((((( declaration of the subroutine arguments ))))))))))))))))))))
!------------------------------------------------------------------------------|
use definitions
implicit none
type (parameters) params
type (cloud) cl
type (octreev) ov
!------------------------------------------------------------------------------|
!(((((((((((((((( declaration of the subroutine internal variables )))))))))))))
!------------------------------------------------------------------------------|
integer ierr,iproc,nproc,i,iter,err
double precision u,v,w,rat
double precision x0,y0,z0,xi,yi,zi,x,y,z
double precision,dimension(:),allocatable::xbuf,ybuf,zbuf
!------------------------------------------------------------------------------|
!------------------------------------------------------------------------------|
INCLUDE 'mpif.h'
call mpi_comm_size (mpi_comm_world,nproc,ierr)
call mpi_comm_rank (mpi_comm_world,iproc,ierr)
allocate (xbuf(cl%np),stat=err) ; if (err.ne.0) call stop_run ('Error alloc xbuf in move_cloud$')
allocate (ybuf(cl%np),stat=err) ; if (err.ne.0) call stop_run ('Error alloc ybuf in move_cloud$')
allocate (zbuf(cl%np),stat=err) ; if (err.ne.0) call stop_run ('Error alloc zbuf in move_cloud$')
xbuf=0.d0
ybuf=0.d0
zbuf=0.d0
rat=1.d0-1.d-10
do i=1+iproc,cl%np,nproc
u=0.d0
v=0.d0
w=0.d0
x0=cl%x(i)*rat
y0=cl%y(i)*rat
z0=cl%z(i)*rat
x=x0
y=y0
z=z0
do iter=1,params%niter_move
xi=(x0+x)/2.d0
yi=(y0+y)/2.d0
zi=(z0+z)/2.d0
call octree_interpolate_many(3,ov%octree,ov%noctree,ov%icon,ov%nleaves,ov%nnode,xi,yi,zi,ov%unode,u,ov%vnode,v,ov%wnode,w)
! call octree_interpolate3 (ov%octree,ov%noctree,ov%icon,ov%nleaves,ov%unode,ov%x,ov%y,ov%z,ov%nnode,xi,yi,zi,u)
! call octree_interpolate3 (ov%octree,ov%noctree,ov%icon,ov%nleaves,ov%vnode,ov%x,ov%y,ov%z,ov%nnode,xi,yi,zi,v)
! call octree_interpolate3 (ov%octree,ov%noctree,ov%icon,ov%nleaves,ov%wnode,ov%x,ov%y,ov%z,ov%nnode,xi,yi,zi,w)
if (cl%x(i)*(cl%x(i)-1.d0).lt.0.d0) x=x0+params%dt*u
if (cl%y(i)*(cl%y(i)-1.d0).lt.0.d0) y=y0+params%dt*v
if (cl%z(i)*(cl%z(i)-1.d0).lt.0.d0) z=z0+params%dt*w
enddo
xbuf(i)=x
ybuf(i)=y
zbuf(i)=z
enddo
cl%x=0.d0
cl%y=0.d0
cl%z=0.d0
call mpi_allreduce (xbuf,cl%x,cl%np,mpi_double_precision,mpi_sum,mpi_comm_world,ierr)
call mpi_allreduce (ybuf,cl%y,cl%np,mpi_double_precision,mpi_sum,mpi_comm_world,ierr)
call mpi_allreduce (zbuf,cl%z,cl%np,mpi_double_precision,mpi_sum,mpi_comm_world,ierr)
deallocate (xbuf)
deallocate (ybuf)
deallocate (zbuf)
cl%x=max(cl%x,0.d0)
cl%x=min(cl%x,1.d0)
cl%y=max(cl%y,0.d0)
cl%y=min(cl%y,1.d0)
cl%z=max(cl%z,0.d0)
cl%z=min(cl%z,1.d0)
return
end
!------------------------------------------------------------------------------|
!------------------------------------------------------------------------------|