diff --git a/tetrax/core/experimental_setup.py b/tetrax/core/experimental_setup.py index 6636b4a755fef90daae0ea74e7582289ec06a64a..ce0f02c847ad61b02f9da0cbe202898585ad8160 100644 --- a/tetrax/core/experimental_setup.py +++ b/tetrax/core/experimental_setup.py @@ -104,13 +104,15 @@ class ExperimentalSetup: self.sample.mag = minimize_gibbs_free_energy(self.sample, self._Bext, tol_cg=tol, return_last=return_last, continue_least_with_squares=continue_least_with_squares) - def eigenmodes(self, kmin=-40e6, kmax=40e6, Nk=81, num_modes=20, k=None, no_dip=False, num_cpus=1, save_modes=True, - save_local=False, save_magphase=False): + def eigenmodes(self, kmin=-40e6, kmax=40e6, Nk=81, num_modes=20, k=None, no_dip=False, h0_dip=False, num_cpus=1, + save_modes=True, + save_local=False, save_magphase=False, perturbed_dispersion=False, save_stiffness_fields=False): if self.sample.mag is None: print("Your sample does not have a magnetization field yet.") else: dispersion = self.sample.eigensolver(self.sample, self.Bext, self.name, kmin, kmax, Nk, num_modes, k, - no_dip, num_cpus, save_modes, save_local, save_magphase) + no_dip, h0_dip, num_cpus, save_modes, save_local, save_magphase, + perturbed_dispersion, save_stiffness_fields) self._modes_available = save_modes self.dispersion = dispersion return dispersion diff --git a/tetrax/core/operators.py b/tetrax/core/operators.py index a3ddaeb23a01d21827f1cf98d37e07b1a8a3694f..68a265cb7f1d1c3445e3b4289699b580fd719868 100644 --- a/tetrax/core/operators.py +++ b/tetrax/core/operators.py @@ -187,6 +187,7 @@ class CubicAnisotropyLinearOperator(LinearOperator): self.Msat = sample.Msat self.nx = sample.nx self.m0 = sample.mag + self.sample = sample self.fac = 2 * self.Kc / (mu_0 * self.Msat ** 2) # homogenous anisotropy @@ -197,12 +198,15 @@ class CubicAnisotropyLinearOperator(LinearOperator): self.C2 = flattened_mesh_vec_tensor_product(self.c2, self.c2) self.C3 = flattened_mesh_vec_tensor_product(self.c3, self.c3) - self.sparse_mat = self.make_sparse_mat() + self.sparse_mat = csr_matrix((3 * self.nx, 3 * self.nx)) + self.shape = self.sparse_mat.shape - def make_sparse_mat(self): - # create bare dyads + def make_sparse_mat_with_current_m0(self): + # get current m0 + self.m0 = self.sample.mag.T.flatten() + # create bare dyads M0 = flattened_mesh_vec_tensor_product(self.m0, self.m0) # these are spatially dependent! @@ -216,9 +220,9 @@ class CubicAnisotropyLinearOperator(LinearOperator): # A factors are pulled into the tensor product since they are spatially dependent Nc1 = flattened_mesh_vec_tensor_product(A1 * self.c1, self.c1) + 2 * self.C1.dot(M0).dot(self.C2 + self.C3) Nc2 = flattened_mesh_vec_tensor_product(A2 * self.c2, self.c2) + 2 * self.C1.dot(M0).dot(self.C2 + self.C3) - Nc3 = flattened_mesh_vec_tensor_product(A1 * self.c3, self.c3) + 2 * self.C1.dot(M0).dot(self.C2 + self.C3) + Nc3 = flattened_mesh_vec_tensor_product(A3 * self.c3, self.c3) + 2 * self.C1.dot(M0).dot(self.C2 + self.C3) - return self.fac * (Nc1 + Nc2 + Nc3) + self.sparse_mat = self.fac * (Nc1 + Nc2 + Nc3) def set_new_param(self, conf, nx): """ diff --git a/tetrax/core/sample.py b/tetrax/core/sample.py index 85192800f800be0fec3799c528d45d5aaf993185..3d9834cf650b5df76e2e64d4b0f97f8e99e553d6 100644 --- a/tetrax/core/sample.py +++ b/tetrax/core/sample.py @@ -6,7 +6,7 @@ import numpy as np from .fem_core.cythoncore import get_matrices from .operators import ExchangeOperator, DipolarOperator, UniAxialAnisotropyOperator, BulkDMIOperator, \ - InterfacialDMIOperator + InterfacialDMIOperator, CubicAnisotropyLinearOperator from ..experiments.eigen import calculate_normal_modes, not_implemented_eigensolver from ..helpers.io import check_mesh_shape from ..helpers.io import get_mesh_dimension @@ -158,6 +158,7 @@ class FerromagnetMixin: self.N_uni = UniAxialAnisotropyOperator(self) self.N_DMI = BulkDMIOperator(self) self.N_iDMI = InterfacialDMIOperator(self) + self.N_cub = CubicAnisotropyLinearOperator(self) def show(self, show_node_labels=False, comp="vec", scale=5, show_mag=True): if self.mesh == None: diff --git a/tetrax/experiments/eigen.py b/tetrax/experiments/eigen.py index a65342faaf5bcb138d3f8f52ee64b9a5e1d0fe4a..a5f9a941a1046c37b9b5e0333daaff7a80adc923 100644 --- a/tetrax/experiments/eigen.py +++ b/tetrax/experiments/eigen.py @@ -7,15 +7,17 @@ import numpy as np import pandas as pd import tqdm from scipy.constants import mu_0 -from scipy.sparse import csc_matrix +from scipy.sparse import csc_matrix, csr_matrix from scipy.sparse.linalg import spilu, eigs from ..core.fem_core.cythoncore import rotation_matrix_py from ..core.operators import cross_operator, TotalDynMat, SuperLUInv, InvTotalDynMat -from ..helpers.math import flattened_mesh_vec_scalar_product, diag_matrix_from_vec +from ..helpers.math import flattened_mesh_vec_scalar_product, diag_matrix_from_vec, flattened_mesh_vec_scalar_product2d, \ + matrix_elem -def not_implemented_eigensolver(sample, Bext, exp_name, kmin=-40e6, kmax=40e6, Nk=81, num_modes=20, k=None, no_dip=False,num_cpus=1, save_modes=False,save_local=False, save_magphase=False): +def not_implemented_eigensolver(sample, Bext, exp_name, kmin=-40e6, kmax=40e6, Nk=81, num_modes=20, k=None, + no_dip=False, num_cpus=1, save_modes=False, save_local=False, save_magphase=False): print("This eigensolver will be implemented in a future release.") @@ -24,18 +26,22 @@ def test_rotation(mag, rotation, nx): Test of rotation matrix and equilibrium vector match, i.e. rotation rotates mag into the z direction. """ test_ez = rotation.dot(mag) - e_z = np.concatenate([np.zeros(2*nx), np.ones(nx)]) + e_z = np.concatenate([np.zeros(2 * nx), np.ones(nx)]) if not np.allclose(test_ez, e_z): raise ValueError("Rotation matrix and equilibrium files don't belong together. " "Please create a new rotation matrix.") -def calculate_normal_modes(sample, Bext, exp_name, kmin=-40e6, kmax=40e6, Nk=81, num_modes=20, k=None, no_dip=False,num_cpus=1, save_modes=False,save_local=False, save_magphase=False): +def calculate_normal_modes(sample, Bext, exp_name, kmin=-40e6, kmax=40e6, Nk=81, num_modes=20, k=None, no_dip=False, + h0_dip=False, + num_cpus=1, save_modes=False, save_local=False, save_magphase=False, + perturbed_dispersion=False, save_stiffness_fields=False): """ 2D version """ - modes_path = "./{}/{}/mode-profiles".format(sample.name,exp_name) + experiment_path = "./{}/{}".format(sample.name, exp_name) + modes_path = "./{}/{}/mode-profiles".format(sample.name, exp_name) if not os.path.exists(modes_path): os.makedirs(modes_path) @@ -45,7 +51,8 @@ def calculate_normal_modes(sample, Bext, exp_name, kmin=-40e6, kmax=40e6, Nk=81, N_uni = sample.N_uni N_DMI = sample.N_DMI N_iDMI = sample.N_iDMI - + N_cub = sample.N_cub + N_cub.make_sparse_mat_with_current_m0() N_iDMI.set_k(0) N_DMI.set_k(0) @@ -58,7 +65,6 @@ def calculate_normal_modes(sample, Bext, exp_name, kmin=-40e6, kmax=40e6, Nk=81, rotation = rotation_matrix_py(mag) test_rotation(mag, rotation, sample.nx) - nx = sample.nx hext = Bext.T.flatten() / (mu_0 * sample.Msat) hexc = -N_exc.dot(mag) @@ -66,28 +72,29 @@ def calculate_normal_modes(sample, Bext, exp_name, kmin=-40e6, kmax=40e6, Nk=81, hDMI = -N_DMI.dot(mag) hiDMI = -N_iDMI.dot(mag) hani = -N_uni.dot(mag) - #hcub = N_cub.nonlinear_field(mag) + hcub = N_cub.nonlinear_field(mag) if no_dip: - heff = hexc + hext + hani + hDMI + hdip + hiDMI # ++ hcub + heff = hexc + hext + hani + hDMI + hiDMI + hcub + if h0_dip: + heff += hdip else: - heff = hexc + hdip + hext + hani + hDMI + hiDMI #+ hcub + heff = hexc + hdip + hext + hani + hDMI + hiDMI + hcub h0 = flattened_mesh_vec_scalar_product(heff, mag) + h0_avrg = np.sum(h0 * sample.dA) / np.sum(sample.dA) rotation = rotation[:2 * nx, :] rotation_T = rotation.transpose() - # create k and convert to mesh units if k == None: - k_ = np.linspace(kmin,kmax,Nk) * sample.scale + k_ = np.linspace(kmin, kmax, Nk) * sample.scale else: k_ = [k * sample.scale] - v0 = np.full(2*nx, 1+1j, dtype=complex) - + v0 = np.full(2 * nx, 1 + 1j, dtype=complex) D_tot = TotalDynMat(N_exc.sparse_mat, N_dip, 1j * mu0_cross[:2 * nx, :2 * nx].dot(rotation), rotation_T) @@ -95,7 +102,11 @@ def calculate_normal_modes(sample, Bext, exp_name, kmin=-40e6, kmax=40e6, Nk=81, bndint_order = 6 - modes_per_k_partial = partial(modes_per_k, N_exc, N_uni, N_DMI, N_iDMI, h0, Lambda, rotation, rotation_T, bndint_order, D_tot, v0, no_dip, sample.scale, sample.Msat, sample.gamma, sample, num_modes, modes_path, save_modes,save_local, save_magphase) + modes_per_k_partial = partial(modes_per_k, N_exc, N_uni, N_cub, N_DMI, N_iDMI, h0, h0_avrg, Lambda, rotation, + rotation_T, + bndint_order, D_tot, v0, no_dip, sample.scale, sample.Msat, sample.gamma, sample, + num_modes, modes_path, save_modes, save_local, save_magphase, perturbed_dispersion, + save_stiffness_fields) if num_cpus == 0 or num_cpus < -1: num_cpus = 1 @@ -110,28 +121,39 @@ def calculate_normal_modes(sample, Bext, exp_name, kmin=-40e6, kmax=40e6, Nk=81, else: pass - with mp.Pool(processes=num_cpus) as p: res_list = [] with tqdm.tqdm(total=len(k_)) as pbar: for i, res in enumerate(p.imap_unordered(modes_per_k_partial, k_)): res_list.append(res) pbar.update() - df_freqs = pd.concat(res_list,sort=False).sort_values("k (rad/m)").reset_index(drop=True).fillna("nan") + df_freqs = pd.concat(res_list, sort=False).sort_values("k (rad/m)").reset_index(drop=True).fillna("nan") + + if perturbed_dispersion: + + if save_stiffness_fields: + df_freqs["h0_arvg"] = h0_avrg * np.ones_like(k_) + + df_freqs.to_csv(experiment_path + "/dispersion_perturbedmodes.csv") + + else: + df_freqs.to_csv(experiment_path + "/dispersion.csv") return df_freqs -def modes_per_k(N_exc, N_uni, N_DMI, N_iDMI, h0, Lambda, rotation, rotation_T, bndint_order, D_tot, v0, no_dip, scale, Msat, gamma,sample, num_modes, modes_path, save_modes, save_local, save_magphase, k): +def modes_per_k(N_exc, N_uni, N_cub, N_DMI, N_iDMI, h0, h0_avrg, Lambda, rotation, rotation_T, bndint_order, D_tot, v0, + no_dip, scale, Msat, gamma, sample, num_modes, modes_path, save_modes, save_local, save_magphase, + perturbed_dispersion, save_stiffness_fields, k): N_exc.set_k(k) N_DMI.set_k(k) N_iDMI.set_k(k) - N_nodip_k_mat = N_exc.sparse_mat + N_uni.sparse_mat + N_DMI.sparse_mat + N_iDMI.sparse_mat - N_nodip_k_mat += diag_matrix_from_vec(np.tile(h0, 3)) + N_nodip_k_mat = N_exc.sparse_mat + N_uni.sparse_mat + N_DMI.sparse_mat + N_iDMI.sparse_mat + N_cub.sparse_mat + h0_operator = diag_matrix_from_vec(np.tile(h0, 3)) + N_nodip_k_mat += h0_operator dyn_mat_k = 1j * Lambda.dot(rotation).dot(N_nodip_k_mat).dot(rotation_T) - dyn_mat_k_csc = csc_matrix(dyn_mat_k) if no_dip: @@ -157,8 +179,7 @@ def modes_per_k(N_exc, N_uni, N_DMI, N_iDMI, h0, Lambda, rotation, rotation_T, b error_success = this_error.already_sucess ef = np.zeros(num_modes) - eigenvectors = np.zeros((2*D_tot.nx, num_modes)) - + eigenvectors = np.zeros((2 * D_tot.nx, num_modes)) ef_org = ef.copy() @@ -174,7 +195,6 @@ def modes_per_k(N_exc, N_uni, N_DMI, N_iDMI, h0, Lambda, rotation, rotation_T, b k_m = k / scale - nx = sample.nx cells = sample.mesh.cells xyz = sample.xyz @@ -190,13 +210,13 @@ def modes_per_k(N_exc, N_uni, N_DMI, N_iDMI, h0, Lambda, rotation, rotation_T, b ev_lab_y = ev_lab[nx:-nx] ev_lab_z = ev_lab[-nx:] - save_dict = {"Re(m_x)" : ev_lab_x.real, - "Im(m_x)" : ev_lab_x.imag, - "Re(m_y)" : ev_lab_y.real, - "Im(m_y)" : ev_lab_y.imag, - "Re(m_z)" : ev_lab_z.real, + save_dict = {"Re(m_x)": ev_lab_x.real, + "Im(m_x)": ev_lab_x.imag, + "Re(m_y)": ev_lab_y.real, + "Im(m_y)": ev_lab_y.imag, + "Re(m_z)": ev_lab_z.real, "Im(m_z)": ev_lab_z.imag, - } + } if save_local: ev_1 = ev[:nx] @@ -222,7 +242,7 @@ def modes_per_k(N_exc, N_uni, N_DMI, N_iDMI, h0, Lambda, rotation, rotation_T, b save_dict["Arg(m_y)"] = np.angle(ev_lab_y) save_dict["Arg(m_z)"] = np.angle(ev_lab_z) - meshio.write_points_cells(filename="{}/mode_k{}radperum_{:03d}.vtk".format(modes_path,k_m * 1e-6, i), + meshio.write_points_cells(filename="{}/mode_k{}radperum_{:03d}.vtk".format(modes_path, k_m * 1e-6, i), points=xyz, cells=cells, point_data=save_dict @@ -233,4 +253,70 @@ def modes_per_k(N_exc, N_uni, N_DMI, N_iDMI, h0, Lambda, rotation, rotation_T, b df_k = pd.DataFrame(columns=["k (rad/m)"] + ["f" + str(j) + " (GHz)" for j in range(len(ef))]) df_k.loc[0] = [k_m] + np.real(ef).tolist() - return df_k \ No newline at end of file + + if perturbed_dispersion: + # for details of calcultions here, see Phys. Rev. B 104, 174414 + dA = sample.dA + + D_tot.set_k(k, csr_matrix((3 * nx, 3 * nx))) + + operator_dict = { + "exc": 1j * Lambda.dot(rotation).dot(N_exc.sparse_mat).dot(rotation_T), + "uni": 1j * Lambda.dot(rotation).dot(N_uni.sparse_mat).dot(rotation_T), + "cub": 1j * Lambda.dot(rotation).dot(N_cub.sparse_mat).dot(rotation_T), + "bDMI": 1j * Lambda.dot(rotation).dot(N_DMI.sparse_mat).dot(rotation_T), + "iDMI": 1j * Lambda.dot(rotation).dot(N_iDMI.sparse_mat).dot(rotation_T), + "dip": D_tot + } + + for i in range(eigenvectors.shape[1]): + Im_N21_ = [] + Re_N21_ = [] + N11_ = [] + N22_ = [] + mode_local = eigenvectors[:, i] + + # calculate orthonormal basis from mode profiles + s1 = np.concatenate([mode_local[:nx], np.zeros(nx)]) + s2 = np.concatenate([np.zeros(nx), mode_local[nx:]]) + s1 /= np.sqrt(np.sum(flattened_mesh_vec_scalar_product2d(np.conjugate(s1), s1) * dA)) + s2 /= np.sqrt(np.sum(flattened_mesh_vec_scalar_product2d(np.conjugate(s2), s2) * dA)) + + for name in operator_dict: + + # in Phys. Rev. B 104, 174414, this is D + partial_dynmat = operator_dict[name] + + # Calculate matrix elements of diagonal part of partial dynamic matrices + C11 = matrix_elem(s1, s1, partial_dynmat, dA) + C22 = matrix_elem(s2, s2, partial_dynmat, dA) + C12 = matrix_elem(s1, s2, partial_dynmat, dA) + C21 = matrix_elem(s2, s1, partial_dynmat, dA) + + # convert to stiffness fields + Im_N21 = 0.5 * (C11 + C22).real + Re_N21 = (1j * 0.5 * (C11 - C22)).real + N11 = (C21).real + N22 = (C12).real + + Im_N21_.append(Im_N21) + Re_N21_.append(Re_N21) + N11_.append(N11) + N22_.append(N22) + + if save_stiffness_fields: + pd.concat([df_k, pd.DataFrame({ + f"Im(N21_{name})_{i}": [Im_N21], + f"Re(N21_{name})_{i}": [Re_N21], + f"N11_{name}_{i}": [N11], + f"N22_{name}_{i}": [N22], + })], axis=1) + + # print(Re_N12) + omega_i = np.sum(Im_N21_) + np.sqrt( + (np.sum(N11_) + h0_avrg) * (np.sum(N22_) + h0_avrg) - np.sum(Re_N21_) ** 2) + + f_k = omega_i.real * mu_0 * sample.Msat * sample.gamma / 2 / np.pi * 1e-9 + df_k[f"f_pert_{i} (GHz)"] = f_k + + return df_k diff --git a/tetrax/experiments/relax.py b/tetrax/experiments/relax.py index f2cce0a280d6465a2b9ba3a1fee197050fdb95a6..a6d00022cb69a609e2756c8f1db15416c8eea16f 100644 --- a/tetrax/experiments/relax.py +++ b/tetrax/experiments/relax.py @@ -28,7 +28,7 @@ def energy_per_length_spherical(mag_spherical, nx, dA, h_ext, N, N_cub): # print(mag.shape) # print((h_ext - 0.5 * N.dot(mag)).shape) # energy_per_volume = -tetramagvec_scalar_product(mag, h_ext + 0.5*N_cub.nonlinear_field(mag) - 0.5 * N.dot(mag)).real - energy_per_volume = -flattened_mesh_vec_scalar_product(mag, h_ext - 0.5 * N.dot(mag)).real + energy_per_volume = -flattened_mesh_vec_scalar_product(mag, h_ext - 0.5 * N.dot(mag) + 0.5*N_cub.nonlinear_field(mag)).real return (energy_per_volume * dA).sum() @@ -52,7 +52,7 @@ def jac_spherical(mag_spherical, nx, dA, h_ext, N, N_cub): m_z = np.cos(theta) mag = np.concatenate((m_x, m_y, m_z)) - h_eff = h_ext - N.dot(mag).real # + N_cub.nonlinear_field(mag) + h_eff = h_ext - N.dot(mag).real + N_cub.nonlinear_field(mag) # TODO include again! h_x = h_eff[:nx] @@ -95,7 +95,7 @@ def minimize_gibbs_free_energy(sample, Bext, tol_cg, return_last=False, continue sample.N_iDMI.set_k(0) N_tot = sample.N_dip + sample.N_exc + sample.N_uni + sample.N_DMI + sample.N_iDMI - N_cub = csr_matrix([]) + N_cub = sample.N_cub fac = (sample.Msat * sample.scale) ** 2 * mu_0 # tol_cg = 1e-12 diff --git a/tetrax/helpers/math.py b/tetrax/helpers/math.py index 31eb071b391744a473b6680b3e7ed005eb8ed38e..bd88eec0ce9d13fddb9c4508519d5dec0a916839 100644 --- a/tetrax/helpers/math.py +++ b/tetrax/helpers/math.py @@ -8,11 +8,13 @@ is used. import numpy as np from scipy.sparse import dia_matrix, bmat + def normalize_single_3d_vec(vec): - norm = np.sqrt(vec.T[0]**2 + vec.T[1]**2 + vec.T[2]**2) - norm = np.repeat(norm,3) - norm = np.reshape(norm,(len(norm)//3,3)) - return np.array(vec)/norm + norm = np.sqrt(vec.T[0] ** 2 + vec.T[1] ** 2 + vec.T[2] ** 2) + norm = np.repeat(norm, 3) + norm = np.reshape(norm, (len(norm) // 3, 3)) + return np.array(vec) / norm + def flattened_mesh_vec_abs(vec): """Calculates the node-wise magnitude of a flattened mesh vector :math:`\mathbf{v}`. @@ -134,24 +136,21 @@ def flattened_mesh_vec_tensor_product(vec1, vec2): tmv2z = vec2[2 * nx:] # only calculate necessary elements (only 6 distinct) and make diagonal blocks from them - dxx = dia_matrix((tmv1x * tmv2x, 0), shape=(nx,nx)) - dxy = dia_matrix((tmv1x * tmv2y, 0), shape=(nx,nx)) - dxz = dia_matrix((tmv1x * tmv2z, 0), shape=(nx,nx)) - - dyx = dia_matrix((tmv1y * tmv2x, 0), shape=(nx,nx)) - dyy = dia_matrix((tmv1y * tmv2y, 0), shape=(nx,nx)) - dyz = dia_matrix((tmv1y * tmv2z, 0), shape=(nx,nx)) + dxx = dia_matrix((tmv1x * tmv2x, 0), shape=(nx, nx)) + dxy = dia_matrix((tmv1x * tmv2y, 0), shape=(nx, nx)) + dxz = dia_matrix((tmv1x * tmv2z, 0), shape=(nx, nx)) - dzx = dia_matrix((tmv1z * tmv2x, 0), shape=(nx,nx)) - dzy = dia_matrix((tmv1z * tmv2y, 0), shape=(nx,nx)) - dzz = dia_matrix((tmv1z * tmv2z, 0), shape=(nx,nx)) + dyx = dia_matrix((tmv1y * tmv2x, 0), shape=(nx, nx)) + dyy = dia_matrix((tmv1y * tmv2y, 0), shape=(nx, nx)) + dyz = dia_matrix((tmv1y * tmv2z, 0), shape=(nx, nx)) + dzx = dia_matrix((tmv1z * tmv2x, 0), shape=(nx, nx)) + dzy = dia_matrix((tmv1z * tmv2y, 0), shape=(nx, nx)) + dzz = dia_matrix((tmv1z * tmv2z, 0), shape=(nx, nx)) # stitch blocks together - sparse_mat = bmat([[dxx, dxy, dxz], [dyx, dyy, dyz], [dzx, dzy, dzz]],format="csr") - - + sparse_mat = bmat([[dxx, dxy, dxz], [dyx, dyy, dyz], [dzx, dzy, dzz]], format="csr") return sparse_mat @@ -215,7 +214,8 @@ def spherical_angles_to_mesh_vector(theta, phi, as_flattened=False): vec_y = np.sin(theta) * np.sin(phi) vec_z = np.cos(theta) - return np.concatenate((vec_x, vec_y, vec_z)).flatten() if as_flattened else np.concatenate((vec_x, vec_y, vec_z)).flatten().reshape(3, nx).T + return np.concatenate((vec_x, vec_y, vec_z)).flatten() if as_flattened else np.concatenate( + (vec_x, vec_y, vec_z)).flatten().reshape(3, nx).T def diag_matrix_from_vec(v): @@ -265,10 +265,10 @@ def h_strip(k, A, L, s): # sinc is evaluated using np.divide in order to catch divide-by-zero exception sinc = np.divide(np.sin(k * L / 2), (k * L / 2), out=np.ones_like(k), where=(k != 0)) - return A*sinc*np.exp(-np.abs(k*s)) + return A * sinc * np.exp(-np.abs(k * s)) -def h_CPW(k,A,L,g,s): +def h_CPW(k, A, L, g, s): r"""Fourier transform in `z` direction of one component of the microwave field distribution of a CPW antenna. Parameters @@ -307,10 +307,10 @@ def h_CPW(k,A,L,g,s): Review Research 2, 043203 (2020) <https://journals.aps.org/prresearch/abstract/10.1103/PhysRevResearch.2.043203>`_. """ - return 2 * np.sin(k*(g+L)/2) ** 2 * h_strip(k, A, L, s) + return 2 * np.sin(k * (g + L) / 2) ** 2 * h_strip(k, A, L, s) -def h_U(k,A,L,g,s): +def h_U(k, A, L, g, s): r"""Fourier transform in `z` direction of one component of the microwave-field distribution of a U-shaped antenna. Parameters @@ -349,7 +349,7 @@ def h_U(k,A,L,g,s): Review Research 2, 043203 (2020) <https://journals.aps.org/prresearch/abstract/10.1103/PhysRevResearch.2.043203>`_. """ - return 2 * 1j * np.sin(k*(g+L)/2) * h_strip(k, A, L, s) + return 2 * 1j * np.sin(k * (g + L) / 2) * h_strip(k, A, L, s) def h_hom(k, A): @@ -379,5 +379,33 @@ def h_hom(k, A): with :math:`\delta` being the Dirac distribution. """ - return A*np.piecewise(k, [k == 0, k != 0], [1, 0]) + return A * np.piecewise(k, [k == 0, k != 0], [1, 0]) + + +def matrix_elem(i, j, A, dV): + """ + Calculates the element of an operator `A` in the basis with respect to the basis vectors `i` and `j` as + + .. math:: + A_{i,j} = \int\mathrm{d}V \ \mathbf{i}^*\cdot \hat{\mathbf{A}} \cdot \mathbf{j}. + + Parameters + ---------- + i : np.array + Flattened mesh vector of shape `(3*N,)`. + j : np.array + Flattened mesh vector of shape `(3*N,)`. + A : LinearOperator + LinearOperator of shape `(3*N, 3*N)`. + dV : Volume elements of the mesh. + + Returns + ------- + float + """ + rhs = A.dot(j) + + temp = flattened_mesh_vec_scalar_product2d(np.conjugate(i), rhs) + elem = (np.sum(temp * dV)) + return elem