diff --git a/user/psava/Msmopick.py b/user/psava/Msmopick.py index 35c5474fc..e344098e2 100755 --- a/user/psava/Msmopick.py +++ b/user/psava/Msmopick.py @@ -4,8 +4,40 @@ ''' import rsf.api as rsf import numpy as np +from scipy.sparse import spdiags +from scipy.sparse.linalg import spsolve import sys +# ------------------------------------------------------------ +def idnop1D(n): + e = np.ones( n, dtype='float') + Lopr = spdiags([e], [0], n,n) + return Lopr + +def idnop2D(nx,nz): + n = nx*nz + e = np.ones( n, dtype='float') + + Lopr = spdiags([e], [0], n,n) + return Lopr +# ------------------------------------------------------------ +def lapop1D(n): + e = np.ones( n, dtype='float') + Lopr = spdiags([e, -2*e, e], [-1, 0, +1], n,n) + return Lopr + +def lapop2D(nx,nz): + n = nx*nz + e = np.ones( n, dtype='float') + + u = e.copy() + u[::nz]=0 + + l = e.copy() + l[nz-1::nz]=0 + + Lopr = spdiags([e, l, -4*e, u,e], [-nz, -1, 0, +1, +nz], n,n) + return Lopr # ------------------------------------------------------------ def myMean(a,n): @@ -33,13 +65,16 @@ def myMedian(a,n): b[i] = np.median( a[wLO:wHI] ) return b +# ------------------------------------------------------------ - +# ------------------------------------------------------------ par = rsf.Par() verb = par.bool('verb',False) # verbosity flag -mean = par.bool('mean',False) # verbosity flag -nmed = par.int('ns',1) # number of filter samples +mode = par.int ('mode',0) # smoothing mode + +if mode == 0 or mode == 1: + nwin = par.int('nwin',1) # window size (mean & median) # ------------------------------------------------------------ Fin = rsf.Input() # input file @@ -47,14 +82,19 @@ def myMedian(a,n): o1 = Fin.float("o1") d1 = Fin.float("d1") -nd = Fin.size(1); # number of traces +n2 = Fin.int ("n2") +o2 = Fin.float("o2") +d2 = Fin.float("d2") + +nd = n2 # number of samples nm = nd # ------------------------------------------------------------ Fou = rsf.Output() # output file -Fou.put("n1",nd) -Fou.put("o1",0.0) -Fou.put('d1',1.0) + +Fou.put("n1",n2) +Fou.put("o1",o2) +Fou.put('d1',d2) Fou.put("n2",3) Fou.put("n3",1) @@ -68,17 +108,79 @@ def myMedian(a,n): wgh = np.zeros(nd,'f') # weights # ------------------------------------------------------------ +l1 = 0.5 * n1*d1 # xcorr time window + for i in range(nd): Fin.read(din) pck[i] = o1 + np.argmax(din) * d1 # raw picks wgh[i] = np.max(din) # data weight -#spk = signal.medfilt(pck,2751) -if mean: - spk = myMean(pck,nmed) -else: - spk = myMedian(pck,nmed) +wgh /= np.max(wgh) +pckmed = np.median(pck) +#print(pckmed,file=sys.stderr) + +wgh = np.where(np.abs(pck) < 0.9*l1, wgh, 0.0) +pck = np.where(np.abs(pck) < 0.9*l1, pck, pckmed) + +if mode == 0: # use mean + print("USE MEAN",mode,file=sys.stderr) + spk = myMean(pck,nwin) + +elif mode == 1: # use median + print("USE MEDIAN",mode,file=sys.stderr) + spk = myMedian(pck,nwin) + +else: # solve inverse problem + print("USE INVERSION",mode,file=sys.stderr) + + #print(np.mean(pck),file=sys.stderr) + #print(np.mean(wgh),file=sys.stderr) + + # ------------------------------------------------------------ + mbar = pck*0 + pckmed # reference model + dbar = pck # rough picks + + WDop = spdiags(wgh,[0],nd,nd) # data weight operator + Gop = idnop1D(nd) # mapping operator + Rop = lapop1D(nm) # regularization operator + + # ------------------------------------------------------------ + # L-curve + # ------------------------------------------------------------ + ne = par.int ('ne',1) + oe = par.float('oe',0.0) + de = par.float('de',+0.1) + ee = np.arange(oe, oe+ne*de, de) + se = np.power(10,ee) + + ME = np.zeros( (nm,ne), dtype='float') + rd = np.zeros( ne, dtype='float') + rm = np.zeros( ne, dtype='float') + + for ie in range(ne): + + # scale the regularization operator + WMop = Rop / se[ie] + + # solve the IP + modE = spsolve( (WDop*Gop).T * (WDop*Gop) + WMop.T * WMop , \ + (WDop*Gop).T * WDop * dbar + WMop.T * WMop * mbar) + + # store the model + ME[:,ie] = modE + + # compute residual norms + rd[ie] = np.linalg.norm( WDop * (Gop * modE - dbar)) + rm[ie] = np.linalg.norm( Rop * ( modE - mbar)) + + rdn = rd / np.max(rd) # normalized the data residual norm + rmn = rm / np.max(rm) # normalized the model residual norm + rrn = rdn**2 + rmn**2 # compute the distance from origin + + je = np.argmin(rrn) # index of the optimal model + spk = ME[:,je] # optimal model + # ------------------------------------------------------------ # write picks and weights