diff --git a/.dockerignore b/.dockerignore index 0e1abff..6bc8ba3 100644 --- a/.dockerignore +++ b/.dockerignore @@ -3,3 +3,4 @@ build dist *.egg-info/ Dockerfile +*.csv diff --git a/.gitignore b/.gitignore index e9575f0..d675dcb 100644 --- a/.gitignore +++ b/.gitignore @@ -6,4 +6,10 @@ dist .ipynb_checkpoints *.DS_Store .vscode -*.py.save \ No newline at end of file +*.py.save + +cellimages +example_json/ +*.png +*.ipynb +psf diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 0000000..e55867f --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,26 @@ +# Allen Institute Contribution Agreement + +This document describes the terms under which you may make “Contributions” — +which may include without limitation, software additions, revisions, bug fixes, configuration changes, +documentation, or any other materials — to any of the projects owned or managed by the Allen Institute. +If you have questions about these terms, please contact us at terms@alleninstitute.org. + +You certify that: + +• Your Contributions are either: + +1. Created in whole or in part by you and you have the right to submit them under the designated license +(described below); or +2. Based upon previous work that, to the best of your knowledge, is covered under an appropriate +open source license and you have the right under that license to submit that work with modifications, +whether created in whole or in part by you, under the designated license; or + +3. Provided directly to you by some other person who certified (1) or (2) and you have not modified them. + +• You are granting your Contributions to the Allen Institute under the terms of the [2-Clause BSD license](https://opensource.org/licenses/BSD-2-Clause) +(the “designated license”). + +• You understand and agree that the Allen Institute projects and your Contributions are public and that +a record of the Contributions (including all metadata and personal information you submit with them) is +maintained indefinitely and may be redistributed consistent with the Allen Institute’s mission and the +2-Clause BSD license. diff --git a/Dockerfile b/Dockerfile index c587935..57deccf 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,4 +1,4 @@ -FROM fcollman/render-python:latest +FROM fcollman/render-modules:master MAINTAINER Forrest Collman (forrest.collman@gmail.com) RUN mkdir -p /usr/local/render-python-apps WORKDIR /usr/local/render-python-apps @@ -7,9 +7,15 @@ RUN pip install -r requirements.txt RUN pip install setuptools --upgrade --disable-pip-version-check RUN pip install argschema --upgrade --disable-pip-version-check RUN pip install jupyter -RUN apt-get install libspatialindex-dev -y +RUN apt-get update && apt-get install libspatialindex-dev -y +RUN conda install nomkl COPY . /usr/local/render-python-apps +WORKDIR /shared/render-modules +RUN pip install . +WORKDIR /usr/local/render-python-apps + + #RUN git clone https://github.com/fcollman/render-python-apps #WORKDIR render-python-apps #RUN git pull && git checkout newrender diff --git a/allen_utils/conda_env_setup.sh b/allen_utils/conda_env_setup.sh new file mode 100755 index 0000000..d5aa3d2 --- /dev/null +++ b/allen_utils/conda_env_setup.sh @@ -0,0 +1,3 @@ +conda create --name render-python-apps --prefix $IMAGE_PROCESSING_DEPLOY_PATH python=2.7 +source activate render-python-apps +pip install -r ../requirements.txt diff --git a/allen_utils/docker_setup.sh b/allen_utils/docker_setup.sh index 4878ec6..25a7fcc 100755 --- a/allen_utils/docker_setup.sh +++ b/allen_utils/docker_setup.sh @@ -1,5 +1,5 @@ -docker pull atbigdawg:5000/fcollman/render-python:latest -docker tag atbigdawg:5000/fcollman/render-python:latest fcollman/render-python:latest +#docker pull atbigdawg:5000/fcollman/render-python:latest +#docker tag atbigdawg:5000/fcollman/render-python:latest fcollman/render-python:latest docker build -t fcollman/render-python-apps . docker tag fcollman/render-python-apps atbigdawg:5000/fcollman/render-python-apps docker push atbigdawg:5000/fcollman/render-python-apps @@ -10,14 +10,11 @@ docker run -t --name renderapps \ -v /nas2:/nas2 \ -v /nas3:/nas3 \ -v /nas4:/nas4 \ +-v /nas5:/nas5 \ -v /data:/data \ -v /pipeline:/pipeline \ -v /pipeline/render-python-apps:/usr/local/render-python-apps \ -v /etc/hosts:/etc/hosts \ ---dns 10.128.104.10 \ --p 8888:8888 \ +-p 7777:7777 \ -e "PASSWORD=$JUPYTERPASSWORD" \ --i -t fcollman/render-python-apps \ -/bin/bash -c "/opt/conda/bin/jupyter notebook --config=/root/.jupyter/jupyter_notebook_config.py --notebook-dir=/pipeline/render-python-apps --no-browser --allow-root" - - +-i -t fcollman/render-python-apps diff --git a/allen_utils/reorg_setup.sh b/allen_utils/reorg_setup.sh index 7f95c5e..d8990ea 100755 --- a/allen_utils/reorg_setup.sh +++ b/allen_utils/reorg_setup.sh @@ -1,21 +1,20 @@ -docker pull atbigdawg:5000/fcollman/render-python:master -docker tag atbigdawg:5000/fcollman/render-python:master fcollman/render-python -docker build -t fcollman/render-python-apps:reorg . -docker tag fcollman/render-python-apps:reorg fcollman/render-python-apps:reorg_testsharmi -docker push atbigdawg:5000/fcollman/render-python-apps -docker kill renderapps_testsharmi -docker rm renderapps_testsharmi -docker run -d --name renderapps_testsharmi \ +docker pull localhost:5000/fcollman/render-python:master +docker tag localhost:5000/fcollman/render-python:master fcollman/render-python +docker build -t sharmi/render-python-apps:reorg . +#docker tag fcollman/render-python-apps:reorg fcollman/render-python-apps:reorg_testsharmi +#docker push atbigdawg:5000/fcollman/render-python-apps +docker kill renderapps_develop +docker rm renderapps_develop +docker run -d --name renderapps_develop \ -v /nas2:/nas2 \ -v /nas:/nas \ -v /nas3:/nas3 \ -v /nas4:/nas4 \ -v /data:/data \ -v /pipeline:/pipeline \ --v /pipeline/sharmi/Sharmi_tools/render-python-apps:/usr/local/render-python-apps \ +-v /pipeline/sharmi/Sharmi_tools/render-python-apps-branches/DEVELOP/render-python-apps:/usr/local/share/render-python-apps \ -v /etc/hosts:/etc/hosts \ --p 9999:9999 \ +-p 7777:7777 \ -e "PASSWORD=$JUPYTERPASSWORD" \ --i -t fcollman/render-python-apps:reorg_testsharmi - - +-i -t sharmi/render-python-apps:reorg \ +/bin/bash -c "jupyter notebook --config=/root/.jupyter/jupyter_notebook_config.py --no-browser --allow-root --notebook-dir=/pipeline/sharmi/Sharmi_tools/render-python-apps-branches/DEVELOP/render-python-apps" diff --git a/jupyter_notebook_config.py b/jupyter_notebook_config.py index b789905..9ad08b5 100644 --- a/jupyter_notebook_config.py +++ b/jupyter_notebook_config.py @@ -16,7 +16,7 @@ from IPython.lib import passwd c.NotebookApp.ip = '*' -c.NotebookApp.port = 8888 +c.NotebookApp.port = 7777 c.NotebookApp.open_browser = False c.MultiKernelManager.default_kernel_name = 'python2' diff --git a/renderapps/TrakEM2/AnnotationJsonSchema.py b/renderapps/TrakEM2/AnnotationJsonSchema.py index bf8991c..9a3260d 100644 --- a/renderapps/TrakEM2/AnnotationJsonSchema.py +++ b/renderapps/TrakEM2/AnnotationJsonSchema.py @@ -19,6 +19,7 @@ class Area(argschema.schemas.mm.Schema): description='Nx2 numpy array of local points') global_path = NumpyArray(argschema.fields.List(argschema.fields.Float), description='Nx2 numpy array of global coordinates') + z = argschema.fields.Float(required=False,description="z value of tileId") class AreaList(argschema.schemas.mm.Schema): oid = argschema.fields.Str() diff --git a/renderapps/TrakEM2/ImportTrakEM2Annotations.py b/renderapps/TrakEM2/ImportTrakEM2Annotations.py index 3cba672..f14dd54 100644 --- a/renderapps/TrakEM2/ImportTrakEM2Annotations.py +++ b/renderapps/TrakEM2/ImportTrakEM2Annotations.py @@ -18,14 +18,15 @@ "project": "M247514_Rorb_1", "client_scripts": "/pipeline/render/render-ws-java-client/src/main/scripts" }, - "EMstack": "ALIGNEM_reg2", - "trakem2project": "/nas4/data/EM_annotation/annotationFilesForJHU/annotationTrakEMprojects_M247514_Rorb_1/m247514_Site3Annotation_RD.xml", - "output_annotation_file": "/nas4/data/EM_annotation/annotationFilesForJHU/m247514_Site3Annotation_RD_local.json", - "output_bounding_box_file": "/nas4/data/EM_annotation/annotationFilesForJHU/m247514_Site3Annotation_RD_bb_local.json", + "EMstack": "BIGALIGN_LENS_EMclahe_all", + "trakem2project": "/nas4/data/EM_annotation/M247514_Rorb_1/m247514_Site3Annotation_MN.xml", + "output_annotation_file": "/nas3/data/M247514_Rorb_1/annotation/m247514_Site3Annotation_MN_local.json", + "output_bounding_box_file":"/nas3/data/M247514_Rorb_1/annotation/m247514_Site3Annotation_MN_bb_local.json", "renderHome": "/pipeline/render" } + class AnnotationConversionError(Exception): pass @@ -98,8 +99,10 @@ def convert_path_to_area(path_numpy, layer_tilespecs): local_path_numpy[convert_point_mask_ind, :] = local_points point_missing[convert_point_mask_ind] = 0 local_tileIds[convert_point_mask_ind] = rts.tileId - - assert(np.sum(point_missing) == 0) + + if np.sum(point_missing)>0: + raise AnnotationConversionError("unable to find all points {} on the tiles given {}".format(path_numpy,[ts.tileId for poly,ts,rts in layer_tilespecs])) + d = {} d['tileIds'] = local_tileIds @@ -134,7 +137,10 @@ def parse_area_lists(render_tilespecs, tem2_tilespecs, tem2_polygons, root, area for path in paths: # initialize the path's polygon with the entire path path_numpy = convert_path(path, tform) - d = convert_path_to_area(path_numpy, layer_tilespecs) + try: + d = convert_path_to_area(path_numpy, layer_tilespecs) + except AnnotationConversionError as e: + raise AnnotationConversionError("error in converting synapse oid:{} id:{} on layer:{}, {}".format(al.attrib['oid'],thisid,layerid,e.args)) area_list_d['areas'].append(d) json_output['area_lists'].append(area_list_d) @@ -168,8 +174,13 @@ def run(self): pot_render_tilespecs = self.render.run(renderapi.tilespec.get_tile_specs_from_z, self.args['EMstack'], ts.z) - filepath = (os.path.split(ts.ip.get(0)['imageUrl'])[ - 1]).split('_flip')[0] + try: + mml=ts.ip.get(0) + except KeyError: + mml = ts.channels[0].ip.get(0) + + + filepath = (os.path.split(mml['imageUrl'])[1]).split('_flip')[0] pot_filepaths = [(os.path.split(t.ip.get(0)['imageUrl'])[1]).split( '_flip')[0] for t in pot_render_tilespecs] render_tilespecs.append(next(t for t, fp in zip( @@ -204,7 +215,10 @@ def run(self): layer_tilespecs = [(poly, ts, t) for poly, ts, t in zip(tem2_polygons, tem2_tilespecs, render_tilespecs) if ts.tileId in patchids] - d = convert_path_to_area(corners,layer_tilespecs) + try: + d = convert_path_to_area(corners,layer_tilespecs) + except AnnotationConversionError as e: + raise AnnotationConversionError("unable to find bounding box of layer {} z={}, due to corners not being on layers with patches {} ".format(layer.attrib['oid'],layer.attrib['z'],patchids)) area_list_d = {} area_list_d['oid'] = layer.attrib['oid'] area_list_d['id'] = int(layer_tilespecs[0][2].z) diff --git a/renderapps/TrakEM2/MakeSynaptogramsFromAnnotation_OidList.py b/renderapps/TrakEM2/MakeSynaptogramsFromAnnotation_OidList.py new file mode 100644 index 0000000..1006b45 --- /dev/null +++ b/renderapps/TrakEM2/MakeSynaptogramsFromAnnotation_OidList.py @@ -0,0 +1,212 @@ +import renderapi +import matplotlib as mpl +mpl.use('Agg') +from renderapps.module.render_module import RenderParameters, RenderModule +import argschema +from renderapps.TrakEM2.AnnotationJsonSchema import AnnotationFile +import matplotlib.pyplot as plt +#%matplotlib notebook +import numpy as np +import os +import pandas as pd +from multiprocessing import pool +from functools import partial +import json + + +############## this is modified from notebook: http://ibs-forrestc-ux1:8888/notebooks/MakeSynaptogramsFromAnnotations.ipynb ##### + + +example_parameters = { + "render":{ + "host": "ibs-forrestc-ux1", + "port": 80, + "owner": "Forrest", + "project": "M246930_Scnn1a_4_f1", + "client_scripts": "/pipeline/render/render-ws-java-client/src/main/scripts" + }, + + "global_annotation_file":"/nas/data/M246930_Scnn1a_4_f1/annotation/m246930_site5_annotation_MN_adjustZ_bb_Take2Site5_EMA_global.json", + "annotation_metadata_json":"/nas/data/M246930_Scnn1a_4_f1/SEMdata/processed/Synaptograms/annotationMetadata/m246930_Take2Site5_synaptogram_metadata.json", + "fig_directory":"/nas/data/M246930_Scnn1a_4_f1/SEMdata/processed/Synaptograms/Take2Site5_TdTeSyn", + "synapses_to_make":['1226','1326','1416','2171','2299','668','2707','2709','2711'], + "channel_stacks":[ + { + + "stack":"EMSite5_take2_EMA" + }, + { + + "stack":"EMSite5_take2_EMA" + }, + { + "channel":"TdTomato", + "stack":"Take2Site5_EMA_STI_DCV_FF_allSession_2", + "maxIntensity":10000 + }, + { + "channel":"synapsin", + "stack":"Take2Site5_manReg_EMA_STI_DCV_FF_allSession_3", + "maxIntensity":16000 + } + ] +} + + + +class ChannelStack(argschema.schemas.DefaultSchema): + channel = argschema.fields.Str(required=False, + description="Channel of stack to render (if needed)") + stack = argschema.fields.Str(required=True, + description="render stack to render") + maxIntensity = argschema.fields.Int(required=False, + description="Max intensity to render this channel") + +class MakeAnnotationSynaptogramParameters(RenderParameters): + global_annotation_file = argschema.fields.InputFile(required=True, + description = "global annotation file") + fig_directory = argschema.fields.OutputDir(required=True, + description = "directory to store figures") + annotation_metadata_json = argschema.fields.OutputFile(required=True, + description = "where to save metadata") + channel_stacks = argschema.fields.Nested(ChannelStack, + many=True, + required=True, + description="list of Channel Stacks to render") + synapses_to_make=argschema.fields.List(argschema.fields.Str, + required=False, + description="list of oid's to make synapses from") + + + +def load_annotation_file(annotation_path): + with open(annotation_path,'r') as fp: + annotation_d = json.load(fp) + schema = AnnotationFile() + annotations,errors = schema.load(annotation_d) + assert(len(errors)==0) + return annotations + +def make_synaptogram(render,channel_stacks,savedir,al,border=400/3,borderz=2): + oid = al['oid'] + zvalues = [] + Nareas = len(al['areas']) + mins = np.zeros((Nareas,2)) + maxs = np.zeros((Nareas,2)) + for i,area in enumerate(al['areas']): + zvalues.append(area['z']) + gp = area['global_path'] + mins[i,:] = np.min(gp,axis=0) + maxs[i,:] = np.max(gp,axis=0) + gmins = np.min(mins,axis=0)-border + gmaxs = np.max(maxs,axis=0)+border + minX = gmins[0] + minY = gmins[1] + maxX = gmaxs[0] + maxY = gmaxs[1] + width = maxX-minX+1 + height = maxY-minY+1 + zvalues=sorted(zvalues) + zvals = np.arange(np.min(zvalues)-1,np.max(zvalues)+1+1) + Nz = len(zvals) + Nc = len(channel_stacks) + minZ = int(min(zvals)) + ratio = ((Nc)*(height)*1.05)/((Nz)*(width)*1.0) + fig_width=.5*(width*Nz)/100.0 + print width,height,Nz + d={ + 'oid':oid, + 'id':al['id'], + 'minX':minX, + 'maxX':maxX, + 'minY':minY, + 'maxY':maxY, + 'Nz':Nz, + 'minZ':minZ, + 'maxZ':minZ+Nz, + 'width':width, + 'height':height + } + if width>1000: + return d + if height>1000: + return d + if len(zvals)>16: + return d + plt.rcParams['axes.facecolor'] = 'black' + f,ax=plt.subplots(Nc,Nz, + figsize=(fig_width,int(fig_width*ratio)), + gridspec_kw = {'wspace':0, 'hspace':0}, + facecolor='black') + plt.subplots_adjust(left=0.0, right=1.0, top=1.0, bottom=0.0) + for c,chstack in enumerate(channel_stacks): + for zi,z in enumerate(zvals): + a=ax[c,zi] + a.set_xlim([minX,maxX]) + a.set_ylim([minY,maxY]) + a.set_xticks([]) + a.set_yticks([]) + a.set_aspect('equal') + chname = chstack.get('channel',None) + maxIntensity = chstack.get('maxIntensity',None) + img = renderapi.image.get_bb_image(chstack['stack'], + z, + minX, + minY, + width, + height, + channel=chname, + maxIntensity=maxIntensity, + render=render) + a.imshow(img,vmin=0,vmax=255,extent=[minX,maxX,maxY,minY]) + for i,area in enumerate(al['areas']): + zi = int(area['z']-minZ) + for c in range(1,len(channel_stacks)): + a = ax[c,zi] + a.plot(area['global_path'][:,0],area['global_path'][:,1],c='Teal',linewidth=2) + for c,chstack in enumerate(channel_stacks): + chname = chstack.get('channel',chstack['stack']) + + ax[c,0].text(minX,minY,chname[0:4],fontdict={'color':'SLATEGRAY','weight':'bold'},fontsize=14) + #f.tight_layout(True) + fname = os.path.join(savedir,'{}.png'.format(oid)) + f.savefig(fname) + d['figure']=fname + plt.clf() + plt.close() + del(f) + return d + + +class MakeAnnotationSynaptograms(RenderModule): + default_schema = MakeAnnotationSynaptogramParameters + + def run(self): + print self.args + annotations = load_annotation_file(self.args['global_annotation_file']) + + if not os.path.isdir(self.args['fig_directory']): + os.makedirs(self.args['fig_directory']) + + + myp = partial(make_synaptogram, + self.render, + self.args['channel_stacks'], + self.args['fig_directory']) + + #print disagree_oids + proc_pool= pool.Pool(6) + good_oids = self.args.get('synapses_to_make',None) + if good_oids is not None: + good_ann =[al for al in annotations['area_lists'] if al['oid'] in good_oids] + ds = proc_pool.map(myp,good_ann) + else: + ds=proc_pool.map(myp,annotations['area_lists']) + print(len(ds)) + with open(self.args['annotation_metadata_json'],'w') as fp: + json.dump(ds,fp) + + +if __name__ == '__main__': + mod = MakeAnnotationSynaptograms(input_data = example_parameters) + mod.run() diff --git a/renderapps/TrakEM2/create_annotation_from_anish_csv.py b/renderapps/TrakEM2/create_annotation_from_anish_csv.py new file mode 100755 index 0000000..2273447 --- /dev/null +++ b/renderapps/TrakEM2/create_annotation_from_anish_csv.py @@ -0,0 +1,66 @@ +import argschema +from AnnotationJsonSchema import AnnotationFile +import renderapi +import numpy as np +import pandas as pd +from ..module.render_module import RenderParameters, RenderModule + +example_input = { + "render": { + "host": "ibs-forrestc-ux1", + "port": 8080, + "owner": "Forrest", + "project": "M247514_Rorb_1", + "client_scripts": "/pipeline/render/render-ws-java-client/src/main/scripts" + }, + "stack": "ALIGNEM_reg2", + "csv_file": "/Users/forrestcollman/AnishCsvDetections/resultVol_3.txt", + "json_file": "/Users/forrestcollman/AnishCsvDetections/resultVol_3.json", +} + +class MakeAnnotationFileAnishParameters(RenderParameters): + stack = argschema.fields.Str(required=True, + description="name of render stack annotations were derived from") + csv_file = argschema.fields.InputFile(required=True, + description="path to csv file in anish's format") + json_file = argschema.fields.OutputFile(required=True, + description="location of output file to save annotation") + pixel_size = argschema.fields.Float(required=False,default = 100.0/3.0) + +class MakeAnnotationFileAnish(RenderModule): + default_schema = MakeAnnotationFileAnishParameters + + def run(self): + #open the csv file + with open(self.args['csv_file'],'r') as fp: + df = pd.DataFrame() + indices = [] + coords = [] + + for i,line in enumerate(fp): + vals=np.array(map(int,line.split(',')[:-1])) + N = len(vals) + Np = N/3 + vals = np.reshape(vals,(Np,3)) + indices.append((i+1)*np.ones(vals.shape[0],np.uint64)) + coords.append(vals) + coords = np.vstack(coords) + df = pd.DataFrame(coords) + df.columns=['x','y','z'] + df['ind']=np.hstack(indices) + #find all the unique synapseIDs + synapses = df.groupby('ind') + for ind,dfg in synapses: + zsections = dfg.groupby('z') + for z,dfgz in zsections: + print(dfgz) + break + #loop over them, constructing a dictionary according to AnnotationFile schema + + #save result to json_file + + +if __name__ == '__main__': + mod = MakeAnnotationFileAnish(input_data = example_input, args=[]) + mod.run() + diff --git a/renderapps/TrakEM2/create_render_stack_from_trakem2.py b/renderapps/TrakEM2/create_render_stack_from_trakem2.py index 08daa1b..8ac2627 100644 --- a/renderapps/TrakEM2/create_render_stack_from_trakem2.py +++ b/renderapps/TrakEM2/create_render_stack_from_trakem2.py @@ -29,8 +29,9 @@ def run(self): ts_render = get_matching_tilespec_by_path(ts,render_tilespecs) ts.tileId = ts_render.tileId mml = renderapi.tilespec.MipMapLevel(0,ts_render.ip.get(0)['imageUrl'],ts_render.ip.get(0)['maskUrl']) + ts.channels=[] ts.ip.update(mml) - + renderapi.stack.logger.setLevel(logging.DEBUG) #create a new stack renderapi.stack.create_stack(self.args['output_stack'], @@ -50,9 +51,9 @@ def run(self): "project": "M247514_Rorb_1", "client_scripts": "/pipeline/render/render-ws-java-client/src/main/scripts" }, - "input_stack":"ALIGNEM_reg2", - "output_stack":"", - "tem2project": "/nas4/data/EM_annotation/M247514_Rorb_1/m247514_Site3Annotation_MN.xml", + "input_stack":"Site3Align_EM", + "output_stack":"Site3Align2_EM", + "tem2project": "/nas/data/M247514_Rorb_1/EMraw/ribbon0000/Site3Aligned.xml", "renderHome": "/pipeline/render" } mod = CreateRenderStackFromTrakEM2(input_data = example_input) diff --git a/renderapps/TrakEM2/create_trakem2_subvolume_from_render.py b/renderapps/TrakEM2/create_trakem2_subvolume_from_render.py index 54ccdf7..af6e003 100755 --- a/renderapps/TrakEM2/create_trakem2_subvolume_from_render.py +++ b/renderapps/TrakEM2/create_trakem2_subvolume_from_render.py @@ -14,24 +14,26 @@ example_parameters = { "render":{ "host":"ibs-forrestc-ux1", - "port":8080, - "owner":"KDM_SYN", - "project":"KDM_SYN_100430B_L5", + "port":80, + "owner":"Forrest", + "project":"M246930_Scnn1a_4_f1", "client_scripts":"/pipeline/render/render-ws-java-client/src/main/scripts" }, - 'minX':0, - 'maxX':1388, - 'minY':0, - 'maxY':1040, + 'minX':1938, + 'maxX':3238, + 'minY':5400, + 'maxY':6700, 'minZ':0, - 'maxZ':48, - 'inputStack':'Stitched_YFP_1', - 'outputStack':'TEST', + 'maxZ':112, + 'inputStack':'Fine_Aligned_Deconvolved_1_DAPI_1', + 'outputStack':'Fine_Aligned_Deconvolved_1_DAPI_1', "doChunk":False, - "outputXMLdir":"/nas4/KDM-SYN-100430B-L5_Deconv/Curated_SJS_2017/Deconvolved_and_Ultraligned/alignment_intermediates/trakem2/test", - "renderHome":"/pipeline/forrestrender/" + "outputXMLdir":"/nas/data/M246930_Scnn1a_4_f1/processed/3D_volumes/test/Ilastik_test", + "renderHome":"/pipeline/render/" } + + class CreateTrakEM2Project(RenderModule): def __init__(self,schema_type=None,*args,**kwargs): if schema_type is None: @@ -83,7 +85,7 @@ def run(self): self.args['maxY'], render=self.render) print "Now adding layer: %d \n %d tiles"%(layerid,len(tilespecs)) - createlayer_fromtilespecs(tilespecs, outfile,layerid,shiftx=-self.args['minX'],shifty=-self.args['minY']) + createlayer_fromtilespecs(tilespecs, outfile,layerid,shiftx=-self.args['minX'],shifty=-self.args['minY'],affineOnly=True) #footers print outfile diff --git a/renderapps/TrakEM2/make_synaptograms_from_annotations.py b/renderapps/TrakEM2/make_synaptograms_from_annotations.py new file mode 100644 index 0000000..2237259 --- /dev/null +++ b/renderapps/TrakEM2/make_synaptograms_from_annotations.py @@ -0,0 +1,203 @@ +import renderapi +import matplotlib as mpl +mpl.use('Agg') +from renderapps.module.render_module import RenderParameters, RenderModule +import argschema +from renderapps.TrakEM2.AnnotationJsonSchema import AnnotationFile +import matplotlib.pyplot as plt +#%matplotlib notebook +import numpy as np +import os +import pandas as pd +from multiprocessing import pool +from functools import partial +import json + +example_parameters = { + "render":{ + "host": "ibs-forrestc-ux1", + "port": 8988, + "owner": "Forrest", + "project": "M247514_Rorb_1", + "client_scripts": "/pipeline/allenrender/render-ws-java-client/src/main/scripts" + }, + "global_annotation_file":"/nas3/data/M247514_Rorb_1/annotation/m247514_Site3Annotation_MN_global_v2.json", + "annotation_metadata_json":"/nas3/data/M247514_Rorb_1/annotation/m247514_Site3Annotation_synaptogram_metadata.json", + "fig_directory":"/nas3/data/M247514_Rorb_1/processed/Site3Synaptograms_v2", + "channel_stacks":[ + { + "channel":"EM", + "stack":"Site3Align2_EM_multichannel" + }, + { + "channel":"EM", + "stack":"Site3Align2_EM_multichannel" + }, + { + "channel":"PSD95", + "stack":"Site3Align2_LENS_Session1", + "maxIntensity":10000 + }, + { + "channel":"GABA", + "stack":"Site3Align2_LENS_Session3", + "maxIntensity":5000 + }, + { + "channel":"synapsin", + "stack":"Site3Align2_LENS_Session3", + "maxIntensity":16000 + }, + { + "channel":"TdTomato", + "stack":"Site3Align2_LENS_Session2", + "maxIntensity":25000 + } + ] +} + +class ChannelStack(argschema.schemas.DefaultSchema): + channel = argschema.fields.Str(required=False, + description="Channel of stack to render (if needed)") + stack = argschema.fields.Str(required=True, + description="render stack to render") + maxIntensity = argschema.fields.Int(required=False, + description="Max intensity to render this channel") + +class MakeAnnotationSynaptogramParameters(RenderParameters): + global_annotation_file = argschema.fields.InputFile(required=True, + description = "global annotation file") + fig_directory = argschema.fields.OutputDir(required=True, + description = "directory to store figures") + annotation_metadata_json = argschema.fields.OutputFile(required=True, description = "path to save annotation metadata json output") + channel_stacks = argschema.fields.Nested(ChannelStack, + many=True, + required=True, + description="list of Channel Stacks to render") + + +def load_annotation_file(annotation_path): + with open(annotation_path,'r') as fp: + annotation_d = json.load(fp) + schema = AnnotationFile() + annotations,errors = schema.load(annotation_d) + assert(len(errors)==0) + return annotations + +def make_synaptogram(render,channel_stacks,savedir,al,border=400/3,borderz=2): + oid = al['oid'] + zvalues = [] + Nareas = len(al['areas']) + mins = np.zeros((Nareas,2)) + maxs = np.zeros((Nareas,2)) + for i,area in enumerate(al['areas']): + zvalues.append(area['z']) + gp = area['global_path'] + mins[i,:] = np.min(gp,axis=0) + maxs[i,:] = np.max(gp,axis=0) + gmins = np.min(mins,axis=0)-border + gmaxs = np.max(maxs,axis=0)+border + minX = gmins[0] + minY = gmins[1] + maxX = gmaxs[0] + maxY = gmaxs[1] + width = maxX-minX+1 + height = maxY-minY+1 + zvalues=sorted(zvalues) + zvals = np.arange(np.min(zvalues)-1,np.max(zvalues)+1+1) + Nz = len(zvals) + Nc = len(channel_stacks) + minZ = int(min(zvals)) + ratio = ((Nc)*(height)*1.05)/((Nz)*(width)*1.0) + fig_width=.5*(width*Nz)/100.0 + print width,height,Nz + d={ + 'oid':oid, + 'id':al['id'], + 'minX':minX, + 'maxX':maxX, + 'minY':minY, + 'maxY':maxY, + 'Nz':Nz, + 'minZ':minZ, + 'maxZ':minZ+Nz, + 'width':width, + 'height':height + } + if width>1000: + return d + if height>1000: + return d + if len(zvals)>15: + return d + f,ax=plt.subplots(Nc,Nz, + figsize=(fig_width,int(fig_width*ratio)), + gridspec_kw = {'wspace':0, 'hspace':0}) + plt.subplots_adjust(left=0.0, right=1.0, top=1.0, bottom=0.0) + for c,chstack in enumerate(channel_stacks): + for zi,z in enumerate(zvals): + a=ax[c,zi] + a.set_xlim([minX,maxX]) + a.set_ylim([minY,maxY]) + a.set_xticks([]) + a.set_yticks([]) + a.set_aspect('equal') + chname = chstack.get('channel',None) + maxIntensity = chstack.get('maxIntensity',None) + img = renderapi.image.get_bb_image(chstack['stack'], + z, + minX, + minY, + width, + height, + channel=chname, + maxIntensity=maxIntensity, + render=render) + a.imshow(img,vmin=0,vmax=255,extent=[minX,maxX,maxY,minY]) + for i,area in enumerate(al['areas']): + zi = int(area['z']-minZ) + for c in range(1,len(channel_stacks)): + a = ax[c,zi] + a.plot(area['global_path'][:,0],area['global_path'][:,1],c='g',linewidth=3) + for c,chstack in enumerate(channel_stacks): + chname = chstack.get('channel',chstack['stack']) + + ax[c,0].text(minX,minY,chname,fontdict={'color':'w','weight':'bold'},fontsize=16) + #f.tight_layout(True) + fname = os.path.join(savedir,'{}.png'.format(oid)) + f.savefig(fname) + d['figure']=fname + plt.clf() + plt.close() + del(f) + return d + +class MakeAnnotationSynaptograms(RenderModule): + default_schema = MakeAnnotationSynaptogramParameters + + def run(self): + print self.args + annotations = load_annotation_file(self.args['global_annotation_file']) + + if not os.path.isdir(self.args['fig_directory']): + os.makedirs(self.args['fig_directory']) + + + myp = partial(make_synaptogram, + self.render, + self.args['channel_stacks'], + self.args['fig_directory']) + + #print disagree_oids + proc_pool= pool.Pool(6) + #good_ann =[al for al in annotations['area_lists'] if int(al['oid']) in disagree_oids] + #ds = proc_pool.map(myp,good_ann) + ds=proc_pool.map(myp,annotations['area_lists']) + + with open(self.args['annotation_metadata_json'],'w') as fp: + json.dump(ds,fp) + +if __name__ == '__main__': + mod = MakeAnnotationSynaptograms(input_data = example_parameters) + mod.run() + \ No newline at end of file diff --git a/renderapps/TrakEM2/reimport_trakem2_to_render.py b/renderapps/TrakEM2/reimport_trakem2_to_render.py index bbc6367..62df42b 100644 --- a/renderapps/TrakEM2/reimport_trakem2_to_render.py +++ b/renderapps/TrakEM2/reimport_trakem2_to_render.py @@ -17,24 +17,25 @@ example_parameters = { "render":{ "host":"ibs-forrestc-ux1", - "port":8080, - "owner":"KDM_SYN", - "project":"KDM_SYN_100430B_L5", + "port":80, + "owner":"Forrest", + "project":"M246930_Scnn1a_4_f1", "client_scripts":"/pipeline/render/render-ws-java-client/src/main/scripts" }, - 'minX':0, - 'maxX':1417, - 'minY':0, - 'maxY':2017, - 'minZ':0, - 'maxZ':48, - 'inputStack':'Stitched_YFP_1', - 'outputStack':'TrakEM2_Aligned_YFP_1_TEST', + 'minX':1565, + 'maxX':3277, + 'minY':9450, + 'maxY':11198, + 'minZ':20, + 'maxZ':20, + 'inputStack':'Registered_3_DAPI_3_CCtest', + 'outputStack':'Registered_3_DAPI_3_CCtest', "doChunk":False, - "outputXMLdir":"/nas4/KDM-SYN-100430B-L5_Deconv/Curated_SJS_2017/Deconvolved_and_Ultraligned/alignment_intermediates/trakem2/test/", - "renderHome":"/pipeline/forrestrender/" + "outputXMLdir":"/nas/data/M246930_Scnn1a_4_f1/processed/registration_fix_20", + "renderHome":"/pipeline/render/" } + class ReImportTrakEM2ToRender(TrakEM2RenderModule): def __init__(self,schema_type=None,*args,**kwargs): if schema_type is None: diff --git a/renderapps/TrakEM2/trakem2utils.py b/renderapps/TrakEM2/trakem2utils.py index 81a53ec..36b6cb3 100644 --- a/renderapps/TrakEM2/trakem2utils.py +++ b/renderapps/TrakEM2/trakem2utils.py @@ -8,8 +8,13 @@ def get_matching_tilespec_by_path(ts,pot_tilespecs): - filepath = (os.path.split(ts.ip.get(0)['imageUrl'])[ - 1]).split('_flip')[0] + if ts.ip.get(0) is not None: + ip = ts.ip + else: + ip = ts.channels[0].ip + + filepath = (os.path.split(ip.get(0)['imageUrl'])[ + 1]).split('_flip')[0] pot_filepaths = [(os.path.split(t.ip.get(0)['imageUrl'])[1]).split( '_flip')[0] for t in pot_tilespecs] return next(t for t, fp in zip( diff --git a/renderapps/TrakEM2/transform_local_annotation_json_to_global.py b/renderapps/TrakEM2/transform_local_annotation_json_to_global.py index 8cfdded..9ba118f 100644 --- a/renderapps/TrakEM2/transform_local_annotation_json_to_global.py +++ b/renderapps/TrakEM2/transform_local_annotation_json_to_global.py @@ -12,14 +12,14 @@ example_input={ "render":{ "host":"ibs-forrestc-ux1", - "port":8080, + "port":80, "owner":"Forrest", - "project":"M247514_Rorb_1", + "project":"M246930_Scnn1a_4_f1", "client_scripts":"/pipeline/render/render-ws-java-client/src/main/scripts" }, - "stack":"BIGALIGN_LENS_EMclahe_Site3", - "input_annotation_file":"/nas4/data/EM_annotation/M247514_Rorb_1/m247514_Site3Annotation_MN_bb_local.json", - "output_annotation_file":"/nas4/data/EM_annotation/M247514_Rorb_1/m247514_Site3Annotation_MN_bb_global.json" + "stack":"EMSite5_take2_EMA", + "input_annotation_file":"/nas3/data/M246930_Scnn1a_4_f1/annotation/m246930_site5_annotation_MN_adjustZ_bb_local.json", + "output_annotation_file":"/nas3/data/M246930_Scnn1a_4_f1/annotation/m246930_site5_annotation_MN_adjustZ_bb_EMSite5_take2_EMA_global.json" } @@ -62,6 +62,7 @@ def transform_annotations(render,stack,local_annotation): ind = np.where(area['tileIds']==tileId)[0] global_path[ind,:]= renderapi.transform.estimate_dstpts(ts.tforms,lp[ind,:]) area['global_path']=global_path + area['z']=ts.z return local_annotation diff --git a/renderapps/__init__.py b/renderapps/__init__.py index e3a7165..f5862a3 100644 --- a/renderapps/__init__.py +++ b/renderapps/__init__.py @@ -14,9 +14,10 @@ from . import transfer from . import wrinkle_detection from . import rough_align - +from . import synapse_detection +from . import intensity_correction __all__ = ['cross_modal_registration', 'dataimport', 'materialize', 'pointmatch', 'module','shapely', 'registration', 'section_polygons', 'stack', - 'stitching', 'tile', 'TrakEM2','transfer','wrinkle_detection','datamanagement','rough_align'] + 'stitching', 'tile', 'TrakEM2','transfer','wrinkle_detection','datamanagement','rough_align','synapse_detection','intensity_correction'] diff --git a/renderapps/cross_modal_registration/atlas_utils.py b/renderapps/cross_modal_registration/atlas_utils.py index ef67703..87dbbd8 100644 --- a/renderapps/cross_modal_registration/atlas_utils.py +++ b/renderapps/cross_modal_registration/atlas_utils.py @@ -456,7 +456,9 @@ def process_siteset(render,siteset, sectionset, doc, project_path,lm_dataset='te # print tile['UID'],tile['@row'],tile['@col'],tile['StageX'],tile['StageY'] # df.to_csv(sitefile,index=False,header=False) tilespec_path = os.path.join(project_dir,'tilespecs') - + if not os.path.isdir(tilespec_path): + os.makedirs(tilespec_path) + json_file=os.path.join( tilespec_path, 'EM_rib%04dsect%04d_%s.json' % (ribnum, sectnum, sitename)) with open(json_file,'w') as fp: diff --git a/renderapps/cross_modal_registration/import_EM_registration_projects_multi.py b/renderapps/cross_modal_registration/import_EM_registration_projects_multi.py index 26998cd..16792fe 100644 --- a/renderapps/cross_modal_registration/import_EM_registration_projects_multi.py +++ b/renderapps/cross_modal_registration/import_EM_registration_projects_multi.py @@ -18,16 +18,16 @@ example_json = { "render":{ "host":"ibs-forrestc-ux1", - "port":8080, + "port":8988, "owner":"Forrest", - "project":"M247514_Rorb_1", + "project":"M246930_Scnn1a_4_f1", "client_scripts":"/pipeline/render/render-ws-java-client/src/main/scripts" }, - "inputStack":"EM_Site4_stitched_SHIFT", - "LMstacks":["BIGREG_MARCH_21_MBP_deconvnew"], - "outputStack":"BIGREG_EM_Site4_stitched", + "inputStack":"REG_STI_FF_S03_DAPI_3", + "LMstacks":["Stitched_1_DAPI_1"], + "outputStack":"REG01_STI_FF_S03_DAPI_3", "renderHome":"/var/www/render", - "outputXMLdir":"/nas3/data/M247514_Rorb_1/processed/EMLMRegProjects_Site4/" + "outputXMLdir":"/nas/data/M246930_Scnn1a_4_f1/processed/tilepairfiles/TrackEM_projects/" } class ImportEMRegistrationMultiProjects(TrakEM2RenderModule): def __init__(self,schema_type=None,*args,**kwargs): @@ -47,14 +47,27 @@ def run(self): EMz = renderapi.stack.get_z_values_for_stack(self.args['inputStack'],render=self.render) tilespecsfiles = [] - shiftTransform = AffineModel(B0=self.args['minX'],B1=self.args['minY']) + + buffersize = self.args['buffersize'] + self.args['minX'] = self.args['minX'] - buffersize + self.args['minY'] = self.args['minY'] - buffersize + self.args['maxX'] = self.args['maxX'] + buffersize + self.args['maxY'] = self.args['maxY'] + buffersize + #width = self.args['maxX']-self.args['minX'] + #height = self.args['maxY']-self.args['minY'] + + print("This is buffersize: %d "%buffersize) + + shiftTransform = AffineModel(B0=self.args['minX'] ,B1=self.args['minY'] ) + + for z in EMz: infile = os.path.join(xmlDir,'%05d.xml'%z) outfile = os.path.join(xmlDir,'%05d.json'%z) newoutfile = os.path.join(xmlDir,'%05d-new.json'%z) self.convert_trakem2_project(infile,xmlDir,outfile) - + newtilejson = json.load(open(outfile,'r')) newEMtilespecs = [TileSpec(json=tsj) for tsj in newtilejson] EMtilespecs = renderapi.tilespec.get_tile_specs_from_minmax_box( @@ -81,4 +94,4 @@ def run(self): if __name__ == "__main__": mod = ImportEMRegistrationMultiProjects(input_data= example_json) - mod.run() \ No newline at end of file + mod.run() diff --git a/renderapps/cross_modal_registration/import_LM_subset_from_EM_registration_multi.py b/renderapps/cross_modal_registration/import_LM_subset_from_EM_registration_multi.py index 5d70550..90ed9a9 100644 --- a/renderapps/cross_modal_registration/import_LM_subset_from_EM_registration_multi.py +++ b/renderapps/cross_modal_registration/import_LM_subset_from_EM_registration_multi.py @@ -1,3 +1,6 @@ +if __name__ == "__main__" and __package__ is None: + __package__ = "renderapps.cross_modal_registration.import_LM_subset_from_EM_registration_multi" + import renderapi from ..TrakEM2.trakem2utils import \ createchunks, createheader, createproject, \ diff --git a/renderapps/cross_modal_registration/import_atlas_project.py b/renderapps/cross_modal_registration/import_atlas_project.py index 788e8ec..dc4126c 100644 --- a/renderapps/cross_modal_registration/import_atlas_project.py +++ b/renderapps/cross_modal_registration/import_atlas_project.py @@ -52,7 +52,7 @@ class ImportAtlasSchema(RenderParameters): description="name of stack to save into render") LM_stack = Str(required=True, default='ACQDAPI_1', description="Name of LM stack in render that was imported into atlas and whose coordinate system the EM tiles will be registered to") - make_tiles = Bool(required=False, default=True, + make_tiles = Bool(required=False, default=False, description="whether to launch jobs to make jpg img tiles of raw atlas tif's (inverting and flipping)") example_parameters = { @@ -60,14 +60,14 @@ class ImportAtlasSchema(RenderParameters): "host": "ibs-forrestc-ux1", "port": 8080, "owner": "Forrest", - "project": "M247514_Rorb_1", + "project": "M246930_Scnn1a_4_f1", "client_scripts": "/pipeline/render/render-ws-java-client/src/main/scripts" }, - 'project_path': '/nas/data/M247514_Rorb_1/EMraw/ribbon0000/M247514_Rorb_1_Ribbon0000_take2.a5proj', + 'project_path': '/nas/data/M246930_Scnn1a_4_f1/SEMdata/ribbon1/m246930_Scnn1a_4_Ribbon0001_f1_take2.a5proj', 'LM_dataset_name': 'test', - 'site_name': 'Site 5', - 'output_stack': 'EMSite5_take2RAW', - 'LM_stack': 'BIGREG_MARCH_21_DAPI_1' + 'site_name': 'Site 4', + 'output_stack': 'EMSite4_take2RAW', + 'LM_stack': 'ACQDAPI_0' } if __name__ == '__main__': @@ -83,7 +83,7 @@ class ImportAtlasSchema(RenderParameters): project = doc['F-BioSEM-Project']['BioSemProject'] projname = project_path.split(os.path.sep)[-4] - assert (mod.render.DEFAULT_PROJECT == projname) + assert (projname[1:7] in mod.render.DEFAULT_PROJECT) # get the list of sitesets if type(project['AtlasCarrier']['SectionSet']) == collections.OrderedDict: @@ -103,7 +103,7 @@ class ImportAtlasSchema(RenderParameters): # coordinate system of the for siteset in sitesets: if siteset['Name'] == mod.args['site_name']: - print 'in', siteset['Name'] + print '1 in', siteset['Name'] json_files = process_siteset(mod.render, siteset, sectionset, @@ -118,7 +118,7 @@ class ImportAtlasSchema(RenderParameters): if mod.args['make_tiles']: for siteset in sitesets: if siteset['Name'] == mod.args['site_name']: - print 'in', siteset['Name'] + print '2 in', siteset['Name'] # uncomment to make masks and flipped images make_tile_masks(siteset, sectionset, project, project_path) diff --git a/renderapps/cross_modal_registration/make_EM_LM_registration_projects_multi.py b/renderapps/cross_modal_registration/make_EM_LM_registration_projects_multi.py index b662ea5..3c77059 100644 --- a/renderapps/cross_modal_registration/make_EM_LM_registration_projects_multi.py +++ b/renderapps/cross_modal_registration/make_EM_LM_registration_projects_multi.py @@ -1,3 +1,7 @@ + +if __name__ == "__main__" and __package__ is None: + __package__ = "renderapps.cross_modal_registration.make_EM_LM_registration_projects_multi" + import renderapi from ..TrakEM2.trakem2utils import createchunks,createheader,createproject,createlayerset,createfooters,createlayer_fromtilespecs,Chunk import json @@ -12,16 +16,22 @@ example_json = { "render":{ "host":"ibs-forrestc-ux1", - "port":8080, - "owner":"Forrest", - "project":"M247514_Rorb_1", + "port":80, + "owner":"Antibody_testing_2018", + "project":"M362218_CSATlx3_NPY", "client_scripts":"/pipeline/render/render-ws-java-client/src/main/scripts" }, - "inputStack":"BIGREG_EM_Site4_stitched", - "LMstacks":["BIGREG_MARCH_21_PSD95","BIGREG_MARCH_21_MBP_deconvnew","BIGREG_MARCH_21_DAPI_1"], - "outputStack":"BIGREG_EM_Site4", + "inputStack":"Stitched_1_NPY", + "LMstacks":["Stitched_1_DAPI_1"], + "outputStack":"Test_junk", "renderHome":"/var/www/render", - "outputXMLdir":"/nas3/data/M247514_Rorb_1/processed/EMLMRegMultiProjects_Site4b/" + "outputXMLdir":"/nas3/data/M362218_CSATlx3_NPY/processed/CouncilFigureTrackEM_projects/", + "minX":2270, + "minY":1658, + "minZ":405, + "maxZ":409, + "maxX":3692, + "maxY":3301 } class makeEMLMRegistrationMultiProjects(RenderModule): def __init__(self,schema_type=None,*args,**kwargs): @@ -30,13 +40,21 @@ def __init__(self,schema_type=None,*args,**kwargs): super(makeEMLMRegistrationMultiProjects,self).__init__(schema_type=schema_type,*args,**kwargs) def run(self): print self.args - - + + #fill in missing bounds with the input stack bounds bounds = self.render.run(renderapi.stack.get_stack_bounds,self.args['inputStack']) for key in bounds.keys(): self.args[key]=self.args.get(key,bounds[key]) + + #buffersize = 3000 + buffersize = self.args['buffersize'] + self.args['minX'] = self.args['minX'] - buffersize + self.args['minY'] = self.args['minY'] - buffersize + self.args['maxX'] = self.args['maxX'] + buffersize + self.args['maxY'] = self.args['maxY'] + buffersize + if not os.path.isdir(self.args['outputXMLdir']): os.makedirs(self.args['outputXMLdir']) EMstack = self.args['inputStack'] @@ -48,6 +66,8 @@ def run(self): outfile = os.path.join(self.args['outputXMLdir'],'%05d.xml'%z) createheader(outfile) createproject(outfile) + + createlayerset(outfile,width=(self.args['maxX']-self.args['minX']),height=(self.args['maxY']-self.args['minY'])) EMtilespecs = renderapi.tilespec.get_tile_specs_from_minmax_box( EMstack, @@ -61,33 +81,32 @@ def run(self): for ts in EMtilespecs: ts.minint = 0 ts.maxint = 6000 - createlayer_fromtilespecs(EMtilespecs, outfile,0,shiftx=-self.args['minX'],shifty=-self.args['minY']) + createlayer_fromtilespecs(EMtilespecs, outfile,0,shiftx=-self.args['minX'],shifty=-self.args['minY'],affineOnly=True) for i,LMstack in enumerate(LMstacks): LMtilespecs = renderapi.tilespec.get_tile_specs_from_minmax_box( LMstack, z, - mod.args['minX'], - mod.args['maxX'], - mod.args['minY'], - mod.args['maxY'], - render=mod.render) + self.args['minX'], + self.args['maxX'], + self.args['minY'], + self.args['maxY'], + render=self.render) if 'PSD' in LMstack: for ts in LMtilespecs: - ts.minint = 2400 - ts.maxint = 7000 + ts.minint = 1500 + ts.maxint = 8000 if 'MBP' in LMstack: for ts in LMtilespecs: ts.minint = 0 - ts.maxint = 6000 + ts.maxint = 10000 if 'DAPI' in LMstack: for ts in LMtilespecs: ts.minint = 0 - ts.maxint = 6000 + ts.maxint = 4000 - createlayer_fromtilespecs(LMtilespecs, outfile,i+1,shiftx=-mod.args['minX'],shifty=-mod.args['minY'],affineOnly=True) + createlayer_fromtilespecs(LMtilespecs, outfile,i+1,shiftx=-self.args['minX'],shifty=-self.args['minY'],affineOnly=True) createfooters(outfile) if __name__ == "__main__": mod = makeEMLMRegistrationMultiProjects(input_data= example_json) mod.run() - diff --git a/renderapps/hierarchical_alignment/__init__.py b/renderapps/hierarchical_alignment/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/renderapps/hierarchical_alignment/split_stack.py b/renderapps/hierarchical_alignment/split_stack.py new file mode 100644 index 0000000..695d8a8 --- /dev/null +++ b/renderapps/hierarchical_alignment/split_stack.py @@ -0,0 +1,177 @@ +if __name__ == "__main__" and __package__ is None: + __package__ = "renderapps.heirarchical_alignment.split_stack" +import renderapi +from ..module.render_module import RenderModule, RenderParameters +from argschema.fields import InputFile, InputDir, Str, Float, Int, List +import numpy as np +import argschema +import json +example_input = { + "render": { + "host": "render-dev-eric.neurodata.io", + "port": 8080, + "owner": "flyTEM", + "project": "trautmane_test", + "client_scripts": "/Users/forrestcollman/RenderStack/render/render-ws-java-client/src/main/scripts" + }, + "input_stack": "rough_tiles", + "output_split_k":5, + "output_size":1024, + "pool_size": 5 +} + +# output_json = { +# "output_stacks":["stack_1_0","stack_1_1","stack_1_2"], +# "output_split_level":3, +# "output_rows":1, +# "output_columns"3 +# } + + +# example_input = { +# "render": { +# "host": "ibs-forrestc-ux1", +# "port": 8080, +# "owner": "M246930_Scnn1a", +# "project": "M246930_Scnn1a_4", +# "client_scripts": "/var/www/render/render-ws-java-client/src/main/scripts" +# }, +# "input_stacks": ['stack_1_0','stack_1_1','stack_1_2','stack_1_3','stack_5','stack_6','stack_7','stack_8','stack_9'] +# "output_split_level":5, +# "pool_size": 20 +# } + +# output_json= +# { +# "stacks_to_split":['stack_1','stack_2','stack_7'], +# "stacks_to_copy":['stack_3','stack_4','stack_5','stack_6','stack_8','stack_9'] +# } + +class SplitStackParams(RenderParameters): + input_stack = Str(required=True, description='Input stack') + output_split_k = Int(required=True, description='Maximum number of rows/columns of split stack outputs') + output_size = Int(required=True, description='approximate size of split tiles, one of height or width will be this') + pool_size = Int(required=False,default=20,description='size of pool for parallel processing (default=20)') + +class SplitStackOutputParams(argschema.schemas.DefaultSchema): + output_stacks=List(Str,required=True, description = 'stacks this split into') + rows = Int(required=True,description="number of rows this module split the input stack into") + cols = Int(required=True, description="number of columns this modules split the input stack into") + output_split_k = Int(required=True, description="output split level... repeated from input") + +class SplitStack(RenderModule): + default_schema=SplitStackParams + + def run(self): + stack = self.args['input_stack'] + bounds = renderapi.stack.get_stack_bounds(stack,render=self.render) + zvalues = renderapi.stack.get_z_values_for_stack(stack, render=self.render) + width = bounds['maxX']-bounds['minX'] + height = bounds['maxY']-bounds['minY'] + max_dim = np.max([width,height]) + + fov = max_dim/self.args['output_split_k'] + + rows = int(np.round(height/fov)) + cols = int(np.round(width/fov)) + + fov_height = height/rows + fov_width = width/cols + scale = self.args['output_size']/fov + scale = np.min([1.0,scale]) + level = np.int(np.ceil(np.log2(1/scale))) + level_scale = 1.0/np.power(2,level) + print 'level',level + print 'level_scale',level_scale + + + #scale=1.0 + print 'scale',scale + print fov_height,fov_width + + x = np.arange(bounds['minX'],bounds['maxX'],fov_width) + y = np.arange(bounds['minY'],bounds['maxY'],fov_height) + print x,y + print len(x),len(y),cols,rows + xx,yy = np.meshgrid(x,y) + + template = "http://{}:{}/render-ws/v1/owner/{}/project/{}/stack/{}/z/{}/box/{},{},{},{},{}/tiff-image?name=z{}.tif" + render_args = self.args['render'] + + for k,(xc,yc) in enumerate(zip(xx.ravel(),yy.ravel())): + + i,j=np.unravel_index(k,(rows,cols)) + #print i,k,xc,yc + tform1 = renderapi.transform.AffineModel(B0=-fov_width/2, + B1=fov_height/2, + labels=['middle_center']) + tform2 = renderapi.transform.AffineModel(B0=xc+(fov_width/2), + B1=yc+(fov_height/2), + labels=['to_global']) + tform3 = renderapi.transform.AffineModel(labels=['identity']) + tforms = [tform1,tform2,tform3] + + outstack = stack + "_%d"%k + tilespecs = [] + #tform_id = 'boxtform_{}_{}_{}'.format( stack,i,j ) + #tform_list = renderapi.transform.TransformList([tform1,tform2,tform3], + # transformId=) + #ref_tform = renderapi + + for z in zvalues: + layout = renderapi.tilespec.Layout(sectionId="%2.1f"%z, + cameraId="virtual", + scopeId="heir_{}".format(self.args['output_split_k']), + imageRow=i, + imageCol=j, + rotation=0, + stageX=xc, + stageY=yc) + imageUrl = template.format(render_args['host'], + render_args['port'], + render_args['owner'], + render_args['project'], + stack, + int(z), + xc, + yc, + np.int(fov_width), + np.int(fov_height), + level_scale, + int(z)) + + mml = renderapi.tilespec.MipMapLevel(level,imageUrl=imageUrl) + tilespec= renderapi.tilespec.TileSpec( + tileId="{}_{}_{}_{}_{}".format(outstack,i,j,k,z), + mipMapLevels=[mml], + z=z, + tforms=tforms, + layout=layout, + maxint=255, + minint=0, + width = int(fov_width), + height = int(fov_height) + ) + + tilespecs.append(tilespec) + + #print json.dumps(tilespecs[0].to_dict(),indent=4) + renderapi.stack.create_stack(outstack,render=self.render) + #print renderapi.utils.renderdumps(tilespecs,indent=4) + renderapi.client.import_tilespecs(outstack,tilespecs, render=self.render) + renderapi.stack.set_stack_state(outstack,'COMPLETE',render=self.render) + + + + + # out_d ={ + # 'output_stacks':output_stacks, + # 'output_split_k':self.args['output_split_k'], + # 'rows':rows, + # 'columns':cols + # } + # self.output(out_d) + +if __name__ == '__main__': + mod = SplitStack(input_data = example_input) + mod.run() diff --git a/renderapps/hierarchical_alignment/split_stack_2.py b/renderapps/hierarchical_alignment/split_stack_2.py new file mode 100644 index 0000000..5225f29 --- /dev/null +++ b/renderapps/hierarchical_alignment/split_stack_2.py @@ -0,0 +1,183 @@ +if __name__ == "__main__" and __package__ is None: + __package__ = "renderapps.heirarchical_alignment.split_stack" +import renderapi +from ..module.render_module import RenderModule, RenderParameters +from argschema.fields import InputFile, InputDir, Str, Float, Int, List +import numpy as np +import argschema +import json +example_input = { + "render": { + "host": "render-dev-eric.neurodata.io", + "port": 8080, + "owner": "flyTEM", + "project": "trautmane_test", + "client_scripts": "/Users/forrestcollman/RenderStack/render/render-ws-java-client/src/main/scripts" + }, + "input_stack": "rough_tiles", + "output_split_k":5, + "output_size":1024, + "pool_size": 5 +} + +# output_json = { +# "output_stacks":["stack_1_0","stack_1_1","stack_1_2"], +# "output_split_level":3, +# "output_rows":1, +# "output_columns"3 +# } + + +# example_input = { +# "render": { +# "host": "ibs-forrestc-ux1", +# "port": 8080, +# "owner": "M246930_Scnn1a", +# "project": "M246930_Scnn1a_4", +# "client_scripts": "/var/www/render/render-ws-java-client/src/main/scripts" +# }, +# "input_stacks": ['stack_1_0','stack_1_1','stack_1_2','stack_1_3','stack_5','stack_6','stack_7','stack_8','stack_9'] +# "output_split_level":5, +# "pool_size": 20 +# } + +# output_json= +# { +# "stacks_to_split":['stack_1','stack_2','stack_7'], +# "stacks_to_copy":['stack_3','stack_4','stack_5','stack_6','stack_8','stack_9'] +# } + +class SplitStackParams(RenderParameters): + input_stack = Str(required=True, description='Input stack') + output_split_k = Int(required=True, description='Maximum number of rows/columns of split stack outputs') + output_size = Int(required=True, description='approximate size of split tiles, one of height or width will be this') + pool_size = Int(required=False,default=20,description='size of pool for parallel processing (default=20)') + +class SplitStackOutputParams(argschema.schemas.DefaultSchema): + output_stacks=List(Str,required=True, description = 'stacks this split into') + rows = Int(required=True,description="number of rows this module split the input stack into") + cols = Int(required=True, description="number of columns this modules split the input stack into") + output_split_k = Int(required=True, description="output split level... repeated from input") + +class SplitStack(RenderModule): + default_schema=SplitStackParams + + def run(self): + stack = self.args['input_stack'] + bounds = renderapi.stack.get_stack_bounds(stack,render=self.render) + zvalues = renderapi.stack.get_z_values_for_stack(stack, render=self.render) + width = bounds['maxX']-bounds['minX'] + height = bounds['maxY']-bounds['minY'] + max_dim = np.max([width,height]) + + fov = max_dim/self.args['output_split_k'] + + rows = int(np.round(height/fov)) + cols = int(np.round(width/fov)) + + fov_height = height/rows + fov_width = width/cols + scale = self.args['output_size']/fov + scale = np.min([1.0,scale]) + level = np.int(np.ceil(np.log2(1/scale))) + level_scale = 1.0/np.power(2,level) + print 'level',level + print 'level_scale',level_scale + + + #scale=1.0 + print 'scale',scale + print 'fov_hw',fov_height,fov_width + + x = np.arange(bounds['minX'],bounds['maxX'],fov_width) + y = np.arange(bounds['minY'],bounds['maxY'],fov_height) + print x,y + print len(x),len(y),cols,rows + xx,yy = np.meshgrid(x,y) + fov_ds_width=int(np.round(fov_width)*scale) + fov_ds_height=int(np.round(fov_height)*scale) + + fov_scale = 1.0*fov_width/fov_ds_width + template = "http://{}:{}/render-ws/v1/owner/{}/project/{}/stack/{}/z/{}/box/{},{},{},{},{}/tiff-image?name=z{}.tif" + render_args = self.args['render'] + + for k,(xc,yc) in enumerate(zip(xx.ravel(),yy.ravel())): + + i,j=np.unravel_index(k,(rows,cols)) + #print i,k,xc,yc + tform0 = renderapi.transform.AffineModel(M00=fov_scale, + M11=fov_scale, + labels=['scale']) + tform1 = renderapi.transform.AffineModel(B0=-fov_width/2, + B1=fov_height/2, + labels=['middle_center']) + tform2 = renderapi.transform.AffineModel(B0=xc+(fov_width/2), + B1=yc+(fov_height/2), + labels=['to_global']) + tform3 = renderapi.transform.AffineModel(labels=['identity']) + tforms = [tform0,tform1,tform2,tform3] + + outstack = stack + "_%d_v2"%k + tilespecs = [] + #tform_id = 'boxtform_{}_{}_{}'.format( stack,i,j ) + #tform_list = renderapi.transform.TransformList([tform1,tform2,tform3], + # transformId=) + #ref_tform = renderapi + + for z in zvalues: + layout = renderapi.tilespec.Layout(sectionId="%2.1f"%z, + cameraId="virtual", + scopeId="heir_{}".format(self.args['output_split_k']), + imageRow=i, + imageCol=j, + rotation=0, + stageX=xc, + stageY=yc) + imageUrl = template.format(render_args['host'], + render_args['port'], + render_args['owner'], + render_args['project'], + stack, + int(z), + xc, + yc, + int(np.round(fov_width)), + int(np.round(fov_height)), + scale, + int(z)) + + mml = renderapi.tilespec.MipMapLevel(0,imageUrl=imageUrl) + tilespec= renderapi.tilespec.TileSpec( + tileId="{}_{}_{}_{}_{}".format(outstack,i,j,k,z), + mipMapLevels=[mml], + z=z, + tforms=tforms, + layout=layout, + maxint=255, + minint=0, + width = fov_ds_width, + height = fov_ds_height + ) + + tilespecs.append(tilespec) + + #print json.dumps(tilespecs[0].to_dict(),indent=4) + renderapi.stack.create_stack(outstack,render=self.render) + #print renderapi.utils.renderdumps(tilespecs,indent=4) + renderapi.client.import_tilespecs(outstack,tilespecs, render=self.render) + renderapi.stack.set_stack_state(outstack,'COMPLETE',render=self.render) + + + + + # out_d ={ + # 'output_stacks':output_stacks, + # 'output_split_k':self.args['output_split_k'], + # 'rows':rows, + # 'columns':cols + # } + # self.output(out_d) + +if __name__ == '__main__': + mod = SplitStack(input_data = example_input) + mod.run() diff --git a/renderapps/intensity_correction/__init__.py b/renderapps/intensity_correction/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/renderapps/intensity_correction/apply_deconv_zoned.py b/renderapps/intensity_correction/apply_deconv_zoned.py new file mode 100644 index 0000000..ba3e20b --- /dev/null +++ b/renderapps/intensity_correction/apply_deconv_zoned.py @@ -0,0 +1,232 @@ +if __name__ == "__main__" and __package__ is None: + __package__ = "renderapps.intensity_correction.apply_deconv_zoned" +import os +import renderapi +from ..module.render_module import RenderModule, RenderParameters +from argschema.fields import InputFile, InputDir, Str, Float, Int, Bool +from functools import partial +import numpy as np +from skimage.morphology import disk +import cv2 +import tifffile +from deconvtools import deconvlucy, deconvblind + +example_input = { + "render": { + "host": "ibs-forrestc-ux1", + "port": 8080, + "owner": "Forrest", + "project": "M246930_Scnn1a_4_f1", + "client_scripts": "/var/www/render/render-ws-java-client/src/main/scripts" + }, + "input_stack": "Flatfieldcorrected_3_Synapsin", + "output_stack": "Deconv_zoned_3_Synapsin", + "output_directory": "/nas/data/M246930_Scnn1a_4_f1/processed/Deconv_zoned", + "z_index": 0, + "pool_size": 20, + "psf_file": "/nas/data/M246930_Scnn1a_4_f1/processed/psfs/psf_Synapsin.tif", + "num_iter": 20, + "bgrd_size": 20, + "scale_factor": 4 +} + +class ApplyDeconvZonedParams(RenderParameters): + input_stack = Str(required=True, + description='Input stack') + output_stack = Str(required=True, + description='Output stack') + output_directory = Str(required=True, + description='Directory for storing Images') + z_index = Int(required=True, + description='z value for section') + pool_size = Int(required=False, default=20, + description='size of pool for parallel processing (default=20)') + psf_file = InputFile(required=True, + description='path to psf file') + num_iter = Int(required=True, default=20, + description='number of iterations (default=20)') + bgrd_size = Int(required=False, default=20, + description='size of rolling ball (default=20)') + scale_factor = Int(required=False, default=1, + description='scaling factor (default=1)') + close_stack = Bool(required=False, default=False, + description="whether to close stack or not") + +def getImage(ts): + d = ts.to_dict() + img = tifffile.imread(d['mipmapLevels'][0]['imageUrl']) + img = img.astype(float) + return img + +def getPSF(filename): + psf = tifffile.imread(filename) + psf = psf.astype(float) + psf = psf/np.sum(psf) + return psf + +def bloc_deconv(img, psf, num_iter, blocksize=[128,128], blockoverlap=[10,10], + deconv='lucy'): + print 'blocksize {}, blockoverlap {}, deconv {}, num iter {}'\ + .format(blocksize, blockoverlap, deconv, num_iter) + imgsize = img.shape + + # calculate number of blocks + num_blocks = int((imgsize[0] + blocksize[0])/blocksize[0]*(imgsize[1] + + blocksize[1])/blocksize[1]) + b0 = range(0, imgsize[0] + blocksize[0], blocksize[0]) + b1 = range(0, imgsize[1] + blocksize[1], blocksize[1]) + + # divide image into blocks + blocks, idx = create_blocks(img, imgsize, num_blocks, b0, b1, blocksize, blockoverlap) + + # apply deconvolution to each block + blocks_dec = [] + if deconv == 'lucy': + psf_calc = [] + if len(psf.shape) > 2: + for k in range(num_blocks): + blocks_dec.append(deconvlucy(blocks[k], psf[k,:,:], num_iter)) + else: + for k in range(num_blocks): + blocks_dec.append(deconvlucy(blocks[k], psf, num_iter)) + else: + psf_init = np.ones((21,21)) + psf_calc = np.zeros((num_blocks, psf_init.shape[0], psf_init.shape[1])) + for k in range(num_blocks): + blocks_dec_k, psf_calc[k,:,:] = deconvblind(blocks[k], psf_init, num_iter) + blocks_dec.append(blocks_dec_k) + + # put blocks back together using average blending + img_dec = deblock(blocks_dec, imgsize, blocksize, num_blocks, b0, b1, blockoverlap, idx) + return img_dec, psf_calc + +def create_blocks(img, imgsize, num_blocks, b0, b1, blocksize, blockoverlap): + #divide image into blocks + # create block indices + idx = np.zeros((num_blocks,4), dtype=np.int) + k = 0 + for n in b1: + for m in b0: + idx[k, 0] = m - m/blocksize[0]*blockoverlap[0] # block's top edge + idx[k, 1] = m - m/blocksize[0]*blockoverlap[0] + blocksize[0] #block's bottom edge + idx[k, 2] = n - n/blocksize[1]*blockoverlap[1] # block's left edge + idx[k, 3] = n - n/blocksize[1]*blockoverlap[1] + blocksize[1] # block's right edge + if m == imgsize[0]: + idx[k, 1] = m #block's bottom edge + if n == imgsize[1]: + idx[k, 3] = n #block's bottom edge + k = k + 1 + + #create blocks + blocks = [] + for k in range(num_blocks): + blocks.append(img[idx[k,0]:idx[k,1], idx[k,2]:idx[k,3]]) + return blocks, idx + +def deblock(blocks, imgsize, blocksize, num_blocks, b0, b1, blockoverlap, idx): + # put blocks back together using average blending + mask = np.ones(imgsize) + #take into account masked 3 edge pixels on each block + for m in b0[1:]: + mask[m - m/blocksize[0]*blockoverlap[0] + 3:m - m/blocksize[0]*blockoverlap[0] + + blockoverlap[0] - 3, :] = 0.5 + for n in b1[1:]: + mask[:, n - n/blocksize[1]*blockoverlap[1] + 3:n - n/blocksize[1]*blockoverlap[1] + + blockoverlap[1] - 3] = 0.5 + for m in b0[1:]: + mask[m - m/blocksize[0]*blockoverlap[0] + 3:m - m/blocksize[0]*blockoverlap[0] + + blockoverlap[0] - 3, + n - n/blocksize[1]*blockoverlap[1] + 3:n - n/blocksize[1]*blockoverlap[1] + + blockoverlap[1] - 3] = 0.25 + #mask 3 edge pixels on each block except image borders + for k in range(num_blocks): + if idx[k,0]!=0: + blocks[k][0:3,:] = 0 + if idx[k,1]!=imgsize[0]: + blocks[k][-3:,:] = 0 + if idx[k,2]!=0: + blocks[k][:,0:3] = 0 + if idx[k,3]!=imgsize[1]: + blocks[k][:,-3:] = 0 + img_new = np.zeros(imgsize) + for k in range(num_blocks): + img_new[idx[k,0]:idx[k,1], idx[k,2]:idx[k,3]]=\ + img_new[idx[k,0]:idx[k,1], idx[k,2]:idx[k,3]]+\ + blocks[k]*mask[idx[k,0]:idx[k,1], idx[k,2]:idx[k,3]] + return img_new + +def process_tile(num_iter, bgrd_size, scale_factor, dirout, stackname, input_ts): + img = getImage(input_ts) + + #subtract background + if bgrd_size == 50: + img1 = cv2.morphologyEx(img,cv2.MORPH_TOPHAT,disk(bgrd_size)) + + if not bgrd_size == 0: + img = cv2.morphologyEx(img,cv2.MORPH_TOPHAT,disk(20)) + + #apply deconvolution + img_dec, psf_calc = bloc_deconv(img, psf=None, num_iter=10, blocksize=[128,128], + blockoverlap=[10,10], deconv='blind') + if bgrd_size == 50: + img = img1 + + img_dec, psf_calc = bloc_deconv(img, psf_calc, num_iter, blocksize=[128,128], + blockoverlap=[10,10], deconv='lucy') + + img_dec = img_dec/scale_factor + img_dec[img_dec > 65535] = 65535 + +# if not os.path.exists(dirout): +# os.makedirs(dirout) + d = input_ts.to_dict() + [head,tail] = os.path.split(d['mipmapLevels'][0]['imageUrl']) + outImage = "%s/%s_%04d_%s"%(dirout, stackname, input_ts.z,tail) + tifffile.imsave(outImage, np.uint16(img_dec)) + + output_ts = input_ts + d = output_ts.to_dict() + for i in range(1,len(d['mipmapLevels'])): + del d['mipmapLevels'][i] + d['mipmapLevels'][0]['imageUrl'] = outImage + output_ts.from_dict(d) + return output_ts + +class ApplyDeconvZoned(RenderModule): + def __init__(self,schema_type=None,*args,**kwargs): + if schema_type is None: + schema_type = ApplyDeconvZonedParams + super(ApplyDeconvZoned,self).__init__(schema_type=schema_type,*args,**kwargs) + + def run(self): + + #get tilespecs + Z = self.args['z_index'] + inp_tilespecs = renderapi.tilespec.get_tile_specs_from_z( + self.args['input_stack'], Z, render=self.render) + + if not os.path.exists(self.args['output_directory']): + os.makedirs(self.args['output_directory']) + #deconvolve each tilespecs and return tilespecs + #render=self.render + #psf = getPSF(self.args['psf_file']) + + mypartial = partial(process_tile, self.args['num_iter'], + self.args['bgrd_size'], self.args['scale_factor'], + self.args['output_directory'], self.args['output_stack']) + with renderapi.client.WithPool(self.args['pool_size']) as pool: + tilespecs = pool.map(mypartial,inp_tilespecs) + + #upload to render + renderapi.stack.create_stack( + self.args['output_stack'],cycleNumber=2,cycleStepNumber=1, + render=self.render) + renderapi.client.import_tilespecs( + self.args['output_stack'],tilespecs,render=self.render, + close_stack=self.args['close_stack']) +# renderapi.stack.set_stack_state( +# self.args['output_stack'],"COMPLETE",render=self.render) + +if __name__ == "__main__": + mod = ApplyDeconvZoned(input_data=example_input,schema_type=ApplyDeconvZonedParams) + mod.run() diff --git a/renderapps/intensity_correction/apply_deconvolution.py b/renderapps/intensity_correction/apply_deconvolution.py new file mode 100644 index 0000000..0584ba0 --- /dev/null +++ b/renderapps/intensity_correction/apply_deconvolution.py @@ -0,0 +1,134 @@ +if __name__ == "__main__" and __package__ is None: + __package__ = "renderapps.intensity_correction.apply_deconvolution" +import os +import renderapi +from ..module.render_module import RenderModule, RenderParameters +from argschema.fields import InputFile, InputDir, Str, Float, Int, Bool +from functools import partial +import numpy as np +from skimage.morphology import disk +import cv2 +import tifffile +from deconvtools import deconvlucy + +example_input = { + "render": { + "host": "ibs-forrestc-ux1", + "port": 8080, + "owner": "6_ribbon_expts", + "project": "M335503_Ai139_smallvol", + "client_scripts": "/var/www/render/render-ws-java-client/src/main/scripts" + }, + "input_stack": "Flatfieldcorrected_1_GFP", + "output_stack": "Deconvolved_1_GFP", + "output_directory": "/nas2/data/M335503_Ai139_smallvol/processed/Deconvolved", + "z_index": 400, + "pool_size": 20, + "psf_file": "/nas2/data/M335503_Ai139_smallvol/processed/psfs/psf_GFP.tif", + "num_iter": 20, + "bgrd_size": 50, + "scale_factor": 1 +} + +class ApplyDeconvParams(RenderParameters): + input_stack = Str(required=True, + description='Input stack') + output_stack = Str(required=True, + description='Output stack') + output_directory = Str(required=True, + description='Directory for storing Images') + z_index = Int(required=True, + description='z value for section') + pool_size = Int(required=False, default=20, + description='size of pool for parallel processing (default=20)') + psf_file = InputFile(required=True, + description='path to psf file') + num_iter = Int(required=True, default=20, + description='number of iterations (default=20)') + bgrd_size = Int(required=False, default=20, + description='size of rolling ball (default=20)') + scale_factor = Int(required=False, default=1, + description='scaling factor (default=1)') + close_stack = Bool(required=False, default=False, + description="whether to close stack or not") + +def getImage(ts): + d = ts.to_dict() + img = tifffile.imread(d['mipmapLevels'][0]['imageUrl']) + img = img.astype(float) + return img + +def getPSF(filename): + psf = tifffile.imread(filename) + psf = psf.astype(float) + psf = psf/np.sum(psf) + return psf + +#def subtract_bgrd(img, bgrd_size): +# if not bgrd_size == 0: +# img = cv2.morphologyEx(img,cv2.MORPH_TOPHAT,disk(bgrd_size)) + +def process_tile(psf, num_iter, bgrd_size, scale_factor, dirout, stackname, input_ts): + img = getImage(input_ts) + + #subtract background + if not bgrd_size == 0: + img = cv2.morphologyEx(img,cv2.MORPH_TOPHAT,disk(bgrd_size)) + + #apply deconvolution + img_dec = deconvlucy(img, psf, num_iter) + + img_dec = img_dec/scale_factor + img_dec[img_dec > 65535] = 65535 + + if not os.path.exists(dirout): + os.makedirs(dirout) + d = input_ts.to_dict() + [head,tail] = os.path.split(d['mipmapLevels'][0]['imageUrl']) + outImage = "%s/%s_%04d_%s"%(dirout, stackname, input_ts.z,tail) + tifffile.imsave(outImage, np.uint16(img_dec)) + + output_ts = input_ts + d = output_ts.to_dict() + for i in range(1,len(d['mipmapLevels'])): + del d['mipmapLevels'][i] + d['mipmapLevels'][0]['imageUrl'] = outImage + output_ts.from_dict(d) + return output_ts + +class ApplyDeconv(RenderModule): + def __init__(self,schema_type=None,*args,**kwargs): + if schema_type is None: + schema_type = ApplyDeconvParams + super(ApplyDeconv,self).__init__(schema_type=schema_type,*args,**kwargs) + + def run(self): + + #get tilespecs + Z = self.args['z_index'] + inp_tilespecs = renderapi.tilespec.get_tile_specs_from_z( + self.args['input_stack'], Z, render=self.render) + + #deconvolve each tilespecs and return tilespecs + #render=self.render + psf = getPSF(self.args['psf_file']) + + mypartial = partial(process_tile, psf, self.args['num_iter'], + self.args['bgrd_size'], self.args['scale_factor'], + self.args['output_directory'], self.args['output_stack']) + with renderapi.client.WithPool(self.args['pool_size']) as pool: + tilespecs = pool.map(mypartial,inp_tilespecs) + + #upload to render + renderapi.stack.create_stack( + self.args['output_stack'],cycleNumber=2,cycleStepNumber=1, + render=self.render) + renderapi.client.import_tilespecs( + self.args['output_stack'],tilespecs,render=self.render, + close_stack=self.args['close_stack']) +# renderapi.stack.set_stack_state( +# self.args['output_stack'],"COMPLETE",render=self.render) + +if __name__ == "__main__": + mod = ApplyDeconv(input_data=example_input,schema_type=ApplyDeconvParams) + mod.run() diff --git a/renderapps/intensity_correction/deconvtools.py b/renderapps/intensity_correction/deconvtools.py new file mode 100644 index 0000000..7d099cc --- /dev/null +++ b/renderapps/intensity_correction/deconvtools.py @@ -0,0 +1,222 @@ +# -*- coding: utf-8 -*- +""" +Created on Mon Jan 18 18:05:26 2016 + +@author: olgag +""" +# this module contains all functions used to apply deconvolution to image + +import numpy as np +from scipy.fftpack import fftn, ifftn + +def psf2otf(psf, otf_size): + # calculate otf from psf with size >= psf size + + if psf.any(): # if any psf element is non-zero + # pad PSF with zeros up to image size + pad_size = ((0,otf_size[0]-psf.shape[0]),(0,otf_size[1]-psf.shape[1])) + psf_padded = np.pad(psf, pad_size, 'constant') + + # circularly shift psf + psf_padded = np.roll(psf_padded, -int(np.floor(psf.shape[0]/2)), axis=0) + psf_padded = np.roll(psf_padded, -int(np.floor(psf.shape[1]/2)), axis=1) + + #calculate otf + otf = fftn(psf_padded) + # this condition depends on psf size + num_small = np.log2(psf.shape[0])*4*np.spacing(1) + if np.max(abs(otf.imag))/np.max(abs(otf)) <= num_small: + otf = otf.real + else: # if all psf elements are zero + otf = np.zeros(otf_size) + return otf + +def otf2psf(otf, psf_size): + # calculate psf from otf with size <= otf size + + if otf.any(): # if any otf element is non-zero + # calculate psf + psf = ifftn(otf) + # this condition depends on psf size + num_small = np.log2(otf.shape[0])*4*np.spacing(1) + if np.max(abs(psf.imag))/np.max(abs(psf)) <= num_small: + psf = psf.real + + # circularly shift psf + psf = np.roll(psf, int(np.floor(psf_size[0]/2)), axis=0) + psf = np.roll(psf, int(np.floor(psf_size[1]/2)), axis=1) + + # crop psf + psf = psf[0:psf_size[0], 0:psf_size[1]] + else: # if all otf elements are zero + psf = np.zeros(psf_size) + return psf + +def deconvlucy(image, psf, num_iter, weight=None, subsmpl=1): + # apply Richardson-Lucy deconvolution to image + + # calculate otf from psf with the same size as image + otf = psf2otf(psf, np.array(image.shape)*subsmpl) + + # create list to be used for iterations + data = [image, 0, [0, 0]] + + # create indexes taking into account subsampling + idx = [np.tile(np.arange(0,image.shape[0]), (subsmpl,1)).flatten(1), + np.tile(np.arange(0,image.shape[1]), (subsmpl,1)).flatten(1)] + + if weight is None: + weight = np.ones(image.shape) # can be input parameter + # apply weight to image to exclude bad pixels or for flat-field correction + image_wtd = np.maximum(weight*image, 0) + data[0] = data[0].take(idx[0], axis=0).take(idx[1], axis=1) + weight = weight.take(idx[0], axis=0).take(idx[1], axis=1) + # normalizing constant + norm_const = np.real(ifftn(otf.conj()*fftn(weight))) + np.sqrt(np.spacing(1)) + + if subsmpl !=1: + vec = np.zeros(len(image.shape)*2, dtype=np.int) + vec[np.arange(0,len(image.shape)*2,2)] = image.shape + vec[vec==0] = subsmpl + + # iterations + alpha = 0 # acceleration parameter + for k in range(num_iter): + if k > 2: + alpha = np.dot(data[2][0].T,data[2][1])\ + /(np.dot(data[2][1].T,data[2][1]) + np.spacing(1)) + alpha = max(min(alpha,1),0) # 0 2: + alpha_img = np.dot(data_img[2][0].T,data_img[2][1])\ + /(np.dot(data_img[2][1].T,data_img[2][1]) + np.spacing(1)) + alpha_img = max(min(alpha_img,1),0) # 00: + dst_pts = np.vstack(dst_pts_list) + p_pts = np.vstack(p_pts_list) + final_tform = Transform() + final_tform.estimate(p_pts,dst_pts) + tsp.tforms=tsp.tforms[0:num_local_transforms]+[final_tform] + if setz == True: + tsp.z = tsq.z + tilespecs_out.append(tsp) + except IndexError as e: pass except StopIteration as e: @@ -97,6 +113,7 @@ def fit_transforms_by_pointmatch(render, # print p_pts_global # if k==1: # break + return tilespecs_out @@ -120,17 +137,19 @@ def run(self): self.args['dst_stack'], self.args['matchcollection'], self.args['num_local_transforms'], + self.args['setz'], Transform) outstack = self.args['output_stack'] self.render.run(renderapi.stack.delete_stack, outstack) self.render.run(renderapi.stack.create_stack, outstack) - + sv=self.render.run(renderapi.stack.get_stack_metadata, self.args['dst_stack']) + renderapi.client.import_tilespecs_parallel(outstack,tilespecs, render=self.render, close_stack=True) - + self.render.run(renderapi.stack.set_stack_metadata,outstack, sv) self.render.run(renderapi.stack.set_stack_state, outstack, state='COMPLETE') diff --git a/renderapps/registration/make_synthetic_cross_stack_point_matches.py b/renderapps/registration/make_synthetic_cross_stack_point_matches.py index f0b7b75..2664242 100644 --- a/renderapps/registration/make_synthetic_cross_stack_point_matches.py +++ b/renderapps/registration/make_synthetic_cross_stack_point_matches.py @@ -11,13 +11,14 @@ example_json = { "render": { "host": "ibs-forrestc-ux1", - "port": 8080, + "port": 8988, "owner": "Forrest", - "project": "M247514_Rorb_1", + "project": "M246930_Scnn1a_4_f1", "client_scripts": "/pipeline/render/render-ws-java-client/src/main/scripts" }, - "tile_pair_file": "/nas3/data/M247514_Rorb_1/processed/tilepairfiles/DAPI1_DAPI3_tilepairs.json", - "matchcollection": "M247514_Rorb_1_DAPI3_TO_DAPI1" + "tile_pair_file": "/nas/data/M246930_Scnn1a_4_f1/processed/tilepairfiles/REG_DAPI1_DAPI2_tilepairs.json", + "matchcollection": "M246930_Scnn1a_4_f1_DAPI2_TO_DAPI1", + "local_transforms": 0 } @@ -30,7 +31,7 @@ class MakeSyntheticCrossStackPointMatchesParameters(RenderParameters): description = 'number of points in one axis of grid points') matchcollection = Str(required=True, description= 'match collection to save point matches into') - + local_transforms = Int(required=False,default=0,description='how many transforms are local and the point matches should be written down in coordinates after this local transform is applied') def define_local_grid(ts, num_points): xvals = np.linspace(0, ts.width - 1, num=num_points, endpoint=True, dtype=np.float64) @@ -40,52 +41,81 @@ def define_local_grid(ts, num_points): xy = np.vstack([xx.ravel(), yy.ravel()]).T return xy +def define_synthetic_point_matches(tsp,tsq,grid_size=8,local_transforms=0): + src_points = define_local_grid(tsp,grid_size) + src_points_global = renderapi.transform.estimate_dstpts(tsp.tforms,src_points) + + if local_transforms>0: + src_points = renderapi.transform.estimate_dstpts(tsp.tforms[0:local_transforms], + src_points) + dst_points = src_points_global + for tform in reversed(tsq.tforms[local_transforms:]): + dst_points = tform.inverse_tform(dst_points) + good_xmin=dst_points[:,0]>0 + good_ymin=dst_points[:,1]>0 + good_xmax=dst_points[:,0]0 - good_ymin=dst_points[:,1]>0 - good_xmax=dst_points[:,0]0: +# src_points = renderapi.transform.estimate_dstpts(tsp.tforms[0:local_transforms], +# src_points) +# dst_points = src_points_global +# for tform in reversed(tsq.tforms[local_transforms:]): +# dst_points = tform.inverse_tform(dst_points) +# good_xmin=dst_points[:,0]>0 +# good_ymin=dst_points[:,1]>0 +# good_xmax=dst_points[:,0]=0) and (maxz >=0): + print "sub z exist" + for i in range(minz,maxz): + if i in allzvalues: + #if range(minz, maxz) in allzvalues: + zvalues.append(i) + print 'added new z' + else: + print " this z value does not exist:" + str(i) + + else: + #print zvalues + zvalues = allzvalues + print zvalues + + + mypartial = partial(process_z,render,input_stack,mask_file) + with renderapi.client.WithPool(self.args['pool_size']) as pool: + jsonFilePaths = pool.map(mypartial, zvalues) + + print len(jsonFilePaths) + + if (self.args['input_stack'] != output_stack): + self.render.run(renderapi.stack.clone_stack,input_stack, output_stack) + print "cloned stack" + self.render.run(renderapi.client.import_jsonfiles_parallel,output_stack, jsonFilePaths) + self.render.run(renderapi.stack.set_stack_state,output_stack,state='COMPLETE') + print "updated stack" + + #renderapi.client.import_tilespecs_parallel(output_stack,newts,render=self.render) + #sv = renderapi.stack.get_stack_metadata(input_stack, render=self.render) + #renderapi.stack.set_stack_metadata(output_stack,sv, render=self.render) + #renderapi.stack.set_stack_state(output_stack,'COMPLETE', render=self.render) + +if __name__ == "__main__": + mod = ApplyMask(input_data= example_parameters) + mod.run() diff --git a/renderapps/stack/merge_session_stacks.py b/renderapps/stack/merge_session_stacks.py new file mode 100644 index 0000000..da70cba --- /dev/null +++ b/renderapps/stack/merge_session_stacks.py @@ -0,0 +1,80 @@ +from renderapps.module.render_module import RenderParameters, RenderModule +import argschema +from functools import partial +import renderapi +import os + +example_parameters = { + "render":{ + "host": "ibs-forrestc-ux1", + "port": 80, + "owner": "Forrest", + "project": "M246930_Scnn1a_4_f1", + "client_scripts": "/pipeline/allenrender/render-ws-java-client/src/main/scripts" + }, + "base_stack":"Stitched_1_DAPI_1", + "channel_stacks":{ + "DAPI1":"Deconvolved_1_DAPI_1", + "DAPI1_FF":"Stitched_1_DAPI_1", + "PSD95":"Deconvolved_1_PSD95", + "GluN1_FF":"Flatfieldcorrected_1_PSD95", + "VGlut1":"Deconvolved_1_VGlut1", + "VGlut1_FF":"Flatfieldcorrected_1_VGlut1", + "Gephyrin":"Deconvolved_1_Gephyrin", + "Gephyrin_FF":"Flatfieldcorrected_1_Gephyrin", + }, + "output_stack":"REG_STI_DCV_FF_Session1" +} + +class MergeSessionStacksParameters(RenderParameters): + base_stack = argschema.fields.Str(required=True,description="stack to base tilespecs from") + channel_stacks = argschema.fields.Dict(required=True,description="channelname:stack dictionary to find mipmap levels by frame") + output_stack = argschema.fields.Str(required=True,description="output stack to save tilespecs in") + +#define a standard function for making a json file from render +#returning the filepath to that json, and a list of the framenumbers +def get_tilespecs_and_framenumbers(render,stack,z): + tilespecs = render.run(renderapi.tilespec.get_tile_specs_from_z,stack,z) + def get_framenumber(filepath): + return int(os.path.split(filepath)[1].split('_F')[-1][0:4]) + framenumbers = [get_framenumber(ts.ip.get(0)['imageUrl']) for ts in tilespecs] + return tilespecs,framenumbers + +def process_z(render,base_stack, channel_stacks,output_stack,z): + #use the function to make jsons for aligned and input stacks + base_tilespecs,base_framenumbers = get_tilespecs_and_framenumbers(render, base_stack, z) + + for channelname,channelstack in channel_stacks.items(): + channel_tilespecs,channel_framenumbers = get_tilespecs_and_framenumbers(render, channelstack, z) + for bts,bfn in zip(base_tilespecs,base_framenumbers): + cts = next(t for t,fn in zip(channel_tilespecs,channel_framenumbers) if fn==bfn) + channel = renderapi.channel.Channel(name=channelname,ip=cts.ip,maxIntensity=cts.maxint) + if bts.channels is None: + bts.channels = [] + if 'DAPI' in channelname: + bts.channels = [channel]+bts.channels + else: + bts.channels.append(channel) + renderapi.client.import_tilespecs(output_stack,base_tilespecs,render=render) + +class MergeSessionStacks(RenderModule): + default_schema = MergeSessionStacksParameters + + def run(self): + zvalues = renderapi.stack.get_z_values_for_stack(mod.args['base_stack'],render=self.render) + my_partial = partial(process_z, + self.render, + self.args['base_stack'], + self.args['channel_stacks'], + self.args['output_stack']) + renderapi.stack.create_stack(self.args['output_stack'],render=self.render) + with renderapi.client.WithPool(20) as pool: + pool.map(my_partial,zvalues) + #for z in zvalues: + # my_partial(z) + # break + renderapi.stack.set_stack_state(self.args['output_stack'],'COMPLETE',render=self.render) + +if __name__ == '__main__': + mod=MergeSessionStacks(input_data=example_parameters,args=[]) + mod.run() \ No newline at end of file diff --git a/renderapps/stack/set_stack_state.py b/renderapps/stack/set_stack_state.py new file mode 100644 index 0000000..9c27902 --- /dev/null +++ b/renderapps/stack/set_stack_state.py @@ -0,0 +1,41 @@ +import renderapi +from ..module.render_module import RenderModule, RenderParameters +from argschema.fields import Str, Int, List + +example_json = { + "render": { + "host": "ibs-forrestc-ux1", + "port": 8080, + "owner": "6_ribbon_experiments", + "project": "M335503_Ai139_smallvol", + "client_scripts" : "/pipeline/render/render-ws-java-client/src/main/scripts" + }, + "stack": "Rough_Aligned_Deconv_1_PSD95", + "state": "COMPLETE" +} + +class SetStackStateParameters(RenderParameters): + stack = Str(required=False,default="", + description= 'stack to change state of') + state = Str(required=True, description = "State to set: LOADING or COMPLETE") + +class SetStackState(RenderModule): + def __init__(self, schema_type=None, *args, **kwargs): + if schema_type is None: + schema_type = SetStackStateParameters + super(SetStackState, self).__init__( + schema_type=schema_type, *args, **kwargs) + + def run(self): + + if self.args['stack'] == "": + allstacks = renderapi.render.get_stacks_by_owner_project(render=self.render) + else: + allstacks = [self.args['stack'] ] + for stack in allstacks: + renderapi.stack.set_stack_state(stack, self.args['state'], + render=self.render) + +if __name__ == "__main__": + mod = SetStackState(input_data=example_json) + mod.run() diff --git a/renderapps/stack/squeeze_stack.py b/renderapps/stack/squeeze_stack.py index 767d092..f639f41 100644 --- a/renderapps/stack/squeeze_stack.py +++ b/renderapps/stack/squeeze_stack.py @@ -1,10 +1,11 @@ if __name__ == "__main__" and __package__ is None: - __package__ = "renderapps.materialize.make_downsample_image_stack" + __package__ = "renderapps.stack.squeeze_stack" import json import os import renderapi from ..module.render_module import RenderModule,RenderParameters -from json_module import InputFile,InputDir,OutputDir +#from json_module import InputFile,InputDir,OutputDir +from argschema.fields import Str, Int import marshmallow as mm from functools import partial import glob @@ -36,13 +37,13 @@ } class SqueezeStackParameters(RenderParameters): - input_stack = mm.fields.Str(required=True, + input_stack = Str(required=True, metadata={'description':'input stacks'}) - output_stack = mm.fields.Str(required=True, + output_stack = Str(required=True, metadata={'description':'output stack to name'}) - output_directory = mm.fields.Str(required=True, + output_directory = Str(required=True, metadata={'description':'input stacks'}) - pool_size = mm.fields.Int(required=False,default=20, + pool_size = Int(required=False,default=20, metadata={'description':'number of parallel threads to use'}) def process_z(stack,render,output_directory,Z): diff --git a/renderapps/stitching/detect_and_drop_stitching_mistakes.py b/renderapps/stitching/detect_and_drop_stitching_mistakes.py index 4b19857..a42f82d 100644 --- a/renderapps/stitching/detect_and_drop_stitching_mistakes.py +++ b/renderapps/stitching/detect_and_drop_stitching_mistakes.py @@ -81,7 +81,8 @@ def process_section(render,prestitchedStack, poststitchedStack, outputStack, dis Gpos[i]=((ts.minX+ts.maxX)/2,(ts.minY+ts.maxY)/2) #loop over edges in the graph - for p,q in G.edges(): + edges = G.edges() + for p,q in edges.keys(): # need keys with new changes in networkx #p and q are now indices into the tilespecs, and labels on the graph nodes #assuming the first transform is the right one, and is only translation @@ -107,7 +108,7 @@ def process_section(render,prestitchedStack, poststitchedStack, outputStack, dis #get the largest connected component of G Gc = max(nx.connected_component_subgraphs(G), key=len) #use it to pick out the good post stitch tilspecs that remain in the graph - ts_good_json = [post_tilespecs[node].to_dict() for node in Gc.nodes_iter()] + ts_good_json = [post_tilespecs[node].to_dict() for node in Gc.nodes()] #changed from nodes_iter to nodes for new networkx #formulate a place to save them jsonfilepath = os.path.join(jsonDirectory,'%s_z%04.0f.json'%(outputStack,z)) #dump the json to that location diff --git a/renderapps/synapse_detection/__init__.py b/renderapps/synapse_detection/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/renderapps/synapse_detection/evaluate_synapse_detection.py b/renderapps/synapse_detection/evaluate_synapse_detection.py new file mode 100644 index 0000000..d2ec2c3 --- /dev/null +++ b/renderapps/synapse_detection/evaluate_synapse_detection.py @@ -0,0 +1,436 @@ +import os +from functools import partial +from argschema import ArgSchema, ArgSchemaParser +import argschema +import marshmallow as mm +from renderapps.TrakEM2.AnnotationJsonSchema import AnnotationFile, NumpyArray +import json +import pandas as pd +from rtree import index +from shapely import geometry +import numpy as np +import pprint as pp +#import pdb +example_json = { + "EM_annotation_json":"/nas3/data/M247514_Rorb_1/annotation/m247514_Site3Annotation_MN_global_v2.json", + "LM_annotation_json":"/nas3/data/M247514_Rorb_1/processed/Site3AlignStacks/glut_synapses46a_global.json", + "EM_boundary_json":"/nas3/data/M247514_Rorb_1/annotation/m247514_Site3Annotation_MN_bb_Site3Align2_global.json", + "EM_metadata_csv":"https://docs.google.com/spreadsheets/d/1cV6BuQosLgjZX1e2l9EZHTOSLZGlDETKjlv2-wjpeIk/export?gid=607779864&format=csv", + "EM_inclass_column":"GABA", + "EM_inclass_invert":True, + "EM_not_synapse_column":"NotSynapse", + "output_json":"/nas3/data/M247514_Rorb_1/processed/Site3AlignStacks/glut_synapses45a_evaluation.json" + } +# example_json = { +# "EM_annotation_json":"/nas3/data/M247514_Rorb_1/annotation/m247514_Take2Site4Annotation_MN_Take2Site4global.json", +# "LM_annotation_json":"/nas3/data/M247514_Rorb_1/processed/Take2Site4AlignStacks/glut_synapses15a_global.json", +# "EM_metadata_csv":"https://docs.google.com/spreadsheets/d/1zp3mApWRo5xyg1tqOSt7Ddp0LC7xWIFaFbsxZrk73bQ/export?gid=0&format=csv", +# "EM_inclass_column":"GABA", +# "EM_inclass_invert":True, +# "EM_not_synapse_column":"NotSynapse", +# "output_json":"/nas3/data/M247514_Rorb_1/processed/Take2Site4AlignStacks/glut_synapses15a_evaluation.json" +# } + + +class EvaluateSynapseDetectionParameters(ArgSchema): + EM_annotation_json = argschema.fields.InputFile(required=True, + description='file path to EM annotation file') + LM_annotation_json = argschema.fields.InputFile(required=True, + description='file path to LM annotation results') + EM_boundary_json = argschema.fields.InputFile(required=True, + description='file path to boundary area list') + EM_metadata_csv = argschema.fields.Url(required=True, + description='file path to EM metadata csv') + EM_inclass_column = argschema.fields.Str(required=True, + description="name of column in metadata that indicates whether synapse is gabaergic") + EM_inclass_conditions=argschema.fields.NumpyArray(required=False,default=[1], + description="what values of EM_inclass_column are considered in class") + EM_inclass_invert = argschema.fields.Boolean(required=False,default=False,description="whether to invert the class columns") + EM_not_synapse_column = argschema.fields.Str(required=True, + description="name of column that indicates whether this annotation should be ignored") + edge_distance = argschema.fields.Int(required=False, default = 100, + description="""distance from the edge of the bounding box in world coordinates + (same as global_path units) for annotation to be considered edge""") + edge_min_sections = argschema.fields.Int(required=False,default=4, + description="synapses occuring in fewer than this many sections and bordering the first or last section will be considered to be edge cases") + +class EvaluateSynapseDetectionOutput(mm.Schema): + LM_per_EM = argschema.fields.NumpyArray(dtype=np.float,required=True, + description="list of fraction of EM synapses with 0,1, or 2+ synapses over them") + EM_per_LM = argschema.fields.NumpyArray(dtype=np.float,required=True, + description ="list of fraction of LM synapses with 0,1, or 2+ EM synapses over them") + missed_EM = argschema.fields.List(argschema.fields.Str, required=True, + description= "list of EM synapses oids for which there were no LM detections") + split_EM = argschema.fields.List(argschema.fields.Str, required=True, + description= "list of EM synapses oids for which there were LM detections") + correct_EM = argschema.fields.List(argschema.fields.Str, required=True, + description= "list of EM synapses oids for which there were exactly one LM detections") + false_pos_LM = argschema.fields.List(argschema.fields.Str, required=True, + description= "list of LM synapses oids for which there were no EM synapses") + merge_LM = argschema.fields.List(argschema.fields.Str, required=True, + description= "list of LM synapses oids for which there were more than one EM synapses") + correct_LM = argschema.fields.List(argschema.fields.Str, required=True, + description= "list of LM synapses oids for which there were more exactly one EM synapses") + +def load_annotation_file(annotation_path): + """function to read an annotation file from disk + + Parameters + ---------- + annotation_path: str + path to annotation file on disk + + Returns + ------- + list[dict]: + A list of dictionaries following the AnnotationFile schema that contains the annotations + """ + with open(annotation_path,'r') as fp: + annotation_d = json.load(fp) + schema = AnnotationFile() + annotations,errors = schema.load(annotation_d) + if len(errors)>0: + print(errors) + assert(len(errors)==0) + return annotations["area_lists"] + +def get_bounding_box_of_al(al): + """a function to return a bounding box of an annotation + + Parameters + ---------- + al: dict + an arealist following the AreaList schema in AnnotationFile + + Returns: + tuple: + (minX,minY,minZ,maxX,maxY,maxZ) tuple of bounding box + """ + Nareas = len(al['areas']) + mins = np.zeros((Nareas,2)) + maxs = np.zeros((Nareas,2)) + zvalues = [] + for i,area in enumerate(al['areas']): + gp = area['global_path'] + mins[i,:] = np.min(gp,axis=0) + maxs[i,:] = np.max(gp,axis=0) + zvalues.append(area['z']) + gmin = np.min(mins,axis=0) + gmax = np.max(maxs,axis=0) + minX = gmin[0] + minY = gmin[1] + maxX = gmax[0] + maxY = gmax[1] + minZ = np.min(zvalues) + maxZ = np.max(zvalues) + return (minX,minY,minZ,maxX,maxY,maxZ) + +def get_annotation_bounds_df(annotations): + """function to get a pandas dataframe of annotation bounds from a list of annotations + + Parameters + ---------- + annotations: list[dict] + a list of annotation dictionaries that follow to the AreaList schema + + Returns + ------- + pandas.DataFrame: + A data frame containing the following columns 'oid','minX','minY','minZ','maxX','maxY','maxZ' + """ + ds=[] + for al in annotations: + (minX,minY,minZ,maxX,maxY,maxZ)=get_bounding_box_of_al(al) + ds.append({ + 'oid':al['oid'], + 'minX':minX, + 'minY':minY, + 'minZ':minZ, + 'maxX':maxX, + 'maxY':maxY, + 'maxZ':maxZ, + }) + df = pd.DataFrame(ds) + return df + +def get_bounding_box_of_annotations(annotations): + """a function to get the overall bounding box surrounding all the annotations + + Parameters + ---------- + annotations: list[dict] + a list of annotation dictionaries that follow to the AreaList schema + + Returns: + tuple: + (minX,minY,minZ,maxX,maxY,maxZ) tuple of bounding box that contains all annotations + """ + df = get_annotation_bounds_df(annotations) + ann_minX=df.min().minX + ann_minY=df.min().minY + ann_maxX=df.max().maxX + ann_maxY=df.max().maxY + ann_minZ=df.min().minZ + ann_maxZ=df.max().maxZ + return (ann_minX,ann_minY,ann_minZ,ann_maxX,ann_maxY,ann_maxZ) + +def is_annotation_near_edge(al,EM_boundaries, + distance=100,min_edge_sections=4): + """function to test if annotation is near the 'edge' of a dataset + + Parameters + ---------- + al: dict + annotation dictionary + EM_boundaries: dict + em boundary dictionary + distance: int + x,y distance from edge to be considered near edge (default 100) + min_edge_section: int + if annotation is in fewer than these number of sections + and touches z border of dataset it will be considered in edge (default 4) + + Returns + ------- + bool: + True/False if this annotation is near edge + """ + + ann_boundary_zs = np.array([alb['areas'][0]['z'] for alb in EM_boundaries]) + ann_minZ = np.min(ann_boundary_zs) + ann_maxZ = np.max(ann_boundary_zs) + zvals=np.unique(np.array([area['z'] for area in al['areas']])) + if len(zvals)ann_maxZ: + #print('out z max fail',zvals) + return True + + #pdb.set_trace() + for area in al['areas']: + try: + boundary_area = next(alb['areas'][0] for alb in EM_boundaries if alb['areas'][0]['z']==area['z']) + has_boundary =True + except: + has_boundary = False + if has_boundary: + boundary = geometry.Polygon(boundary_area['global_path']) + b2 = boundary.buffer(-distance) + poly1 = geometry.Polygon(area['global_path']) + try: + if(not b2.contains(poly1)): + #print('area failed',area['global_path'],boundary_area['global_path']) + return True + except: + print(area['global_path']) + assert False + + + #print('pass',al['oid']) + return False + +def get_edge_annotations(annotations,EM_boundaries, + distance=100, + min_edge_sections=4): + """function to get list of True/False values of whether annotation is near the 'edge' of a dataset + + Parameters + ---------- + annotations: dict + annotation dictionary + EM_boundaries: dict + em boundary dictionary + distance: int + x,y distance from edge to be considered near edge (default 100) + min_edge_section: int + if annotation is in fewer than these number of sections + and touches z border of dataset it will be considered in edge (default 4) + + Returns + ------- + list[bool]: + list of True/False if annotations are near edge + """ + is_edge = np.zeros(len(annotations),np.bool) + for k,al in enumerate(annotations): + is_edge[k]=is_annotation_near_edge(al, + EM_boundaries, + distance=distance, + min_edge_sections=min_edge_sections) + return is_edge + + +def get_index(name='LM_index'): + """function to get a spatial index, removing existing one if it exists + + Parameters + ---------- + name: str + name of index + + Returns + ------- + rtree.Index: + new index ready to be filled + """ + + dataname = '{}.dat'.format(name) + indexname = '{}.idx'.format(name) + if os.path.isfile(dataname): + os.remove(dataname) + if os.path.isfile(indexname): + os.remove(indexname) + p = index.Property() + p.dimension=3 + return index.Index(name,properties = p) + +def insert_annotations_into_index(index,annotations): + """function to insert annotations into rtree index + + Parameters + ---------- + index: rtree.index + spatial index to insert + annotations: list[dict] + list of annotations following area_list schema in AnnotationFile schema + + Returns + ------- + list: + a list of (minX,minY,minZ,maxX,maxY,maxZ) tuples containing bounds of annotations + """ + + bound_list=[] + for i,al in enumerate(annotations): + bounds = get_bounding_box_of_al(al) + bound_list.append(bounds) + index.insert(i,bounds) + return bound_list + +def do_annotations_overlap(al1,al2): + """function to test of two annotations overlap + + Parameters + ---------- + al1: dict + AreaList dictionary that follows schema in AnnotationJsonSchema.AnnotationFile + al2: dict + AreaList dictionary that follows schema in AnnotationJsonSchema.AnnotationFile + + Returns + ------- + bool: + True/False whether they overlap + """ + + for area2 in al2['areas']: + poly2 = geometry.Polygon(area2['global_path']) + for area1 in al1['areas']: + if int(area1['z'])==int(area2['z']): + poly1 = geometry.Polygon(area1['global_path']) + if poly1.intersects(poly2): + return True,area1['z'] + return False,None + +def annotations_to_df(annotations): + dl = [] + for al in annotations: + for a in al['areas']: + a.update({'oid':al['oid'],'id':al['id']}) + mins=np.min(a['global_path'],axis=0) + maxs=np.max(a['global_path'],axis=0) + a['minX']=mins[0] + a['minY']=mins[1] + a['maxX']=maxs[0] + a['maxY']=maxs[1] + dl.append(a) + return pd.DataFrame(dl) + +def get_overlap_matrix(good_annotations, LM_annotations, EM_index, LM_bounds): + overlap_matrix = np.zeros((len(good_annotations),len(LM_annotations)),np.bool) + j=0 + for i,alLM in enumerate(LM_annotations): + res=EM_index.intersection(LM_bounds[i]) + for k in res: + alEM=good_annotations[k] + overlaps,zsection = do_annotations_overlap(alLM,alEM) + if overlaps: + overlap_matrix[k,i]=True + return overlap_matrix + +class EvaluateSynapseDetection(ArgSchemaParser): + """Module for evaluating synapse detection results given EM ground truth""" + default_schema = EvaluateSynapseDetectionParameters + default_output_schema = EvaluateSynapseDetectionOutput + + def run(self): + pp.pprint(self.args) + EM_annotations = load_annotation_file(self.args['EM_annotation_json']) + LM_annotations = load_annotation_file(self.args['LM_annotation_json']) + df = pd.read_csv(self.args['EM_metadata_csv'], index_col=0,skiprows=1,dtype={'oid':np.object}) + good_rows = (~(np.uint8(df[self.args['EM_not_synapse_column']])>0.0)) + in_class_rows = np.isin(df[self.args['EM_inclass_column']],self.args['EM_inclass_conditions']) + if self.args['EM_inclass_invert']: + in_class_rows = ~in_class_rows + good_rows = good_rows & in_class_rows + good_df=df[good_rows] + good_annotations = [al for al in EM_annotations if np.sum(good_df.oid.str.match(al['oid']))>0] + + EM_boundaries = load_annotation_file(self.args['EM_boundary_json']) + + #(ann_minX,ann_minY,ann_minZ,ann_maxX,ann_maxY,ann_maxZ) = get_bounding_box_of_annotations(good_annotations) + + print("number of good rows",good_df.shape) + + print("len of good_annotations",len(good_annotations)) + + EM_edge=get_edge_annotations(good_annotations,EM_boundaries, + distance=self.args['edge_distance'], + min_edge_sections=self.args['edge_min_sections']) + + LM_edge=get_edge_annotations(LM_annotations,EM_boundaries, + distance=self.args['edge_distance'], + min_edge_sections=self.args['edge_min_sections']) + + LM_index=get_index('LM_index') + LM_bounds=insert_annotations_into_index(LM_index,LM_annotations) + EM_index = get_index('EM_index') + EM_bounds=insert_annotations_into_index(EM_index,good_annotations) + + overlap_matrix = get_overlap_matrix(good_annotations, LM_annotations, EM_index, LM_bounds) + + bins = np.arange(0,4) + LM_per_EM = np.sum(overlap_matrix,axis=1) + EM_per_LM = np.sum(overlap_matrix,axis=0) + LM_per_EM_counts,edges = np.histogram(LM_per_EM[EM_edge==False],bins=bins,normed=True) + EM_per_LM_counts,edges = np.histogram(EM_per_LM[LM_edge==False],bins=bins,normed=True) + print("EM_per_LM",EM_per_LM_counts) + print("LM_per_EM",LM_per_EM_counts) + print('lm edge detections:',np.sum(LM_edge)) + print('em edge annotations',np.sum(EM_edge)) + print('LM detections:',len(LM_edge)) + d= {} + d['EM_per_LM']=EM_per_LM_counts + d['LM_per_EM']=LM_per_EM_counts + d['missed_EM']= [al['oid'] for k,al in enumerate(good_annotations) if (EM_edge[k]==False) and (LM_per_EM[k]==0)] + d['split_EM']= [al['oid'] for k,al in enumerate(good_annotations) if (EM_edge[k]==False) and (LM_per_EM[k]>1)] + d['correct_EM']= [al['oid'] for k,al in enumerate(good_annotations) if (EM_edge[k]==False) and (LM_per_EM[k]==1)] + d['false_pos_LM']= [al['oid'] for k,al in enumerate(LM_annotations) if (LM_edge[k]==False) and (EM_per_LM[k]==0)] + d['merge_LM']= [al['oid'] for k,al in enumerate(LM_annotations) if (LM_edge[k]==False) and (EM_per_LM[k]>1)] + d['correct_LM']= [al['oid'] for k,al in enumerate(LM_annotations) if (LM_edge[k]==False) and (EM_per_LM[k]==1)] + print ('missed EM: ', d['missed_EM']) + self.output(d) + + + +if __name__ == '__main__': + mod = EvaluateSynapseDetection(input_data= example_json) + mod.run() diff --git a/renderapps/synapse_detection/evaluate_synapse_detection2.py b/renderapps/synapse_detection/evaluate_synapse_detection2.py new file mode 100644 index 0000000..0ac6323 --- /dev/null +++ b/renderapps/synapse_detection/evaluate_synapse_detection2.py @@ -0,0 +1,445 @@ +import os +from functools import partial +from argschema import ArgSchema, ArgSchemaParser +import argschema +import marshmallow as mm +from renderapps.TrakEM2.AnnotationJsonSchema import AnnotationFile, NumpyArray +import json +import pandas as pd +from rtree import index +from shapely import geometry +import numpy as np +import pprint as pp +#import pdb +example_json = { + "EM_annotation_json":"/nas3/data/M247514_Rorb_1/annotation/m247514_Site3Annotation_MN_global_v2.json", + "LM_annotation_json":"/nas3/data/M247514_Rorb_1/processed/Site3AlignStacks/glut_synapses46a_global.json", + "EM_boundary_json":"/nas3/data/M247514_Rorb_1/annotation/m247514_Site3Annotation_MN_bb_Site3Align2_global.json", + "EM_metadata_csv":"https://docs.google.com/spreadsheets/d/1cV6BuQosLgjZX1e2l9EZHTOSLZGlDETKjlv2-wjpeIk/export?gid=607779864&format=csv", + "EM_inclass_column1":"GABA", + "EM_inclass_column2":"tdTom", + "EM_inclass_invert":False, + "EM_inclass_conditions1":[1], + "EM_inclass_conditions2":[2,3], + "EM_not_synapse_column":"NotSynapse", + "output_json":"/nas3/data/M247514_Rorb_1/processed/Site3AlignStacks/glut_synapses45a_evaluation.json" + } +# example_json = { +# "EM_annotation_json":"/nas3/data/M247514_Rorb_1/annotation/m247514_Take2Site4Annotation_MN_Take2Site4global.json", +# "LM_annotation_json":"/nas3/data/M247514_Rorb_1/processed/Take2Site4AlignStacks/glut_synapses15a_global.json", +# "EM_metadata_csv":"https://docs.google.com/spreadsheets/d/1zp3mApWRo5xyg1tqOSt7Ddp0LC7xWIFaFbsxZrk73bQ/export?gid=0&format=csv", +# "EM_inclass_column":"GABA", +# "EM_inclass_invert":True, +# "EM_not_synapse_column":"NotSynapse", +# "output_json":"/nas3/data/M247514_Rorb_1/processed/Take2Site4AlignStacks/glut_synapses15a_evaluation.json" +# } + + +class EvaluateSynapseDetectionParameters(ArgSchema): + EM_annotation_json = argschema.fields.InputFile(required=True, + description='file path to EM annotation file') + LM_annotation_json = argschema.fields.InputFile(required=True, + description='file path to LM annotation results') + EM_boundary_json = argschema.fields.InputFile(required=True, + description='file path to boundary area list') + EM_metadata_csv = argschema.fields.Url(required=True, + description='file path to EM metadata csv') + EM_inclass_column1 = argschema.fields.Str(required=True, + description="name of column in metadata that indicates whether synapse is gabaergic") + EM_inclass_column2 = argschema.fields.Str(required=True, + description="name of column in metadata that indicates whether synapse is gabaergic") + EM_inclass_conditions1=argschema.fields.NumpyArray(required=False,default=[1], + description="what values of EM_inclass_column are considered in class") + EM_inclass_conditions2=argschema.fields.NumpyArray(required=False,default=[1], + description="what values of EM_inclass_column are considered in class") + EM_inclass_invert = argschema.fields.Boolean(required=False,default=False,description="whether to invert the class columns") + EM_not_synapse_column = argschema.fields.Str(required=True, + description="name of column that indicates whether this annotation should be ignored") + edge_distance = argschema.fields.Int(required=False, default = 100, + description="""distance from the edge of the bounding box in world coordinates + (same as global_path units) for annotation to be considered edge""") + edge_min_sections = argschema.fields.Int(required=False,default=4, + description="synapses occuring in fewer than this many sections and bordering the first or last section will be considered to be edge cases") + +class EvaluateSynapseDetectionOutput(mm.Schema): + LM_per_EM = argschema.fields.NumpyArray(dtype=np.float,required=True, + description="list of fraction of EM synapses with 0,1, or 2+ synapses over them") + EM_per_LM = argschema.fields.NumpyArray(dtype=np.float,required=True, + description ="list of fraction of LM synapses with 0,1, or 2+ EM synapses over them") + missed_EM = argschema.fields.List(argschema.fields.Str, required=True, + description= "list of EM synapses oids for which there were no LM detections") + split_EM = argschema.fields.List(argschema.fields.Str, required=True, + description= "list of EM synapses oids for which there were LM detections") + correct_EM = argschema.fields.List(argschema.fields.Str, required=True, + description= "list of EM synapses oids for which there were exactly one LM detections") + false_pos_LM = argschema.fields.List(argschema.fields.Str, required=True, + description= "list of LM synapses oids for which there were no EM synapses") + merge_LM = argschema.fields.List(argschema.fields.Str, required=True, + description= "list of LM synapses oids for which there were more than one EM synapses") + correct_LM = argschema.fields.List(argschema.fields.Str, required=True, + description= "list of LM synapses oids for which there were more exactly one EM synapses") + +def load_annotation_file(annotation_path): + """function to read an annotation file from disk + + Parameters + ---------- + annotation_path: str + path to annotation file on disk + + Returns + ------- + list[dict]: + A list of dictionaries following the AnnotationFile schema that contains the annotations + """ + with open(annotation_path,'r') as fp: + annotation_d = json.load(fp) + schema = AnnotationFile() + annotations,errors = schema.load(annotation_d) + if len(errors)>0: + print(errors) + assert(len(errors)==0) + return annotations["area_lists"] + +def get_bounding_box_of_al(al): + """a function to return a bounding box of an annotation + + Parameters + ---------- + al: dict + an arealist following the AreaList schema in AnnotationFile + + Returns: + tuple: + (minX,minY,minZ,maxX,maxY,maxZ) tuple of bounding box + """ + Nareas = len(al['areas']) + mins = np.zeros((Nareas,2)) + maxs = np.zeros((Nareas,2)) + zvalues = [] + for i,area in enumerate(al['areas']): + gp = area['global_path'] + mins[i,:] = np.min(gp,axis=0) + maxs[i,:] = np.max(gp,axis=0) + zvalues.append(area['z']) + gmin = np.min(mins,axis=0) + gmax = np.max(maxs,axis=0) + minX = gmin[0] + minY = gmin[1] + maxX = gmax[0] + maxY = gmax[1] + minZ = np.min(zvalues) + maxZ = np.max(zvalues) + return (minX,minY,minZ,maxX,maxY,maxZ) + +def get_annotation_bounds_df(annotations): + """function to get a pandas dataframe of annotation bounds from a list of annotations + + Parameters + ---------- + annotations: list[dict] + a list of annotation dictionaries that follow to the AreaList schema + + Returns + ------- + pandas.DataFrame: + A data frame containing the following columns 'oid','minX','minY','minZ','maxX','maxY','maxZ' + """ + ds=[] + for al in annotations: + (minX,minY,minZ,maxX,maxY,maxZ)=get_bounding_box_of_al(al) + ds.append({ + 'oid':al['oid'], + 'minX':minX, + 'minY':minY, + 'minZ':minZ, + 'maxX':maxX, + 'maxY':maxY, + 'maxZ':maxZ, + }) + df = pd.DataFrame(ds) + return df + +def get_bounding_box_of_annotations(annotations): + """a function to get the overall bounding box surrounding all the annotations + + Parameters + ---------- + annotations: list[dict] + a list of annotation dictionaries that follow to the AreaList schema + + Returns: + tuple: + (minX,minY,minZ,maxX,maxY,maxZ) tuple of bounding box that contains all annotations + """ + df = get_annotation_bounds_df(annotations) + ann_minX=df.min().minX + ann_minY=df.min().minY + ann_maxX=df.max().maxX + ann_maxY=df.max().maxY + ann_minZ=df.min().minZ + ann_maxZ=df.max().maxZ + return (ann_minX,ann_minY,ann_minZ,ann_maxX,ann_maxY,ann_maxZ) + +def is_annotation_near_edge(al,EM_boundaries, + distance=100,min_edge_sections=4): + """function to test if annotation is near the 'edge' of a dataset + + Parameters + ---------- + al: dict + annotation dictionary + EM_boundaries: dict + em boundary dictionary + distance: int + x,y distance from edge to be considered near edge (default 100) + min_edge_section: int + if annotation is in fewer than these number of sections + and touches z border of dataset it will be considered in edge (default 4) + + Returns + ------- + bool: + True/False if this annotation is near edge + """ + + ann_boundary_zs = np.array([alb['areas'][0]['z'] for alb in EM_boundaries]) + ann_minZ = np.min(ann_boundary_zs) + ann_maxZ = np.max(ann_boundary_zs) + zvals=np.unique(np.array([area['z'] for area in al['areas']])) + if len(zvals)ann_maxZ: + #print('out z max fail',zvals) + return True + + #pdb.set_trace() + for area in al['areas']: + try: + boundary_area = next(alb['areas'][0] for alb in EM_boundaries if alb['areas'][0]['z']==area['z']) + has_boundary =True + except: + has_boundary = False + if has_boundary: + boundary = geometry.Polygon(boundary_area['global_path']) + b2 = boundary.buffer(-distance) + poly1 = geometry.Polygon(area['global_path']) + try: + if(not b2.contains(poly1)): + #print('area failed',area['global_path'],boundary_area['global_path']) + return True + except: + print(area['global_path']) + assert False + + + #print('pass',al['oid']) + return False + +def get_edge_annotations(annotations,EM_boundaries, + distance=100, + min_edge_sections=4): + """function to get list of True/False values of whether annotation is near the 'edge' of a dataset + + Parameters + ---------- + annotations: dict + annotation dictionary + EM_boundaries: dict + em boundary dictionary + distance: int + x,y distance from edge to be considered near edge (default 100) + min_edge_section: int + if annotation is in fewer than these number of sections + and touches z border of dataset it will be considered in edge (default 4) + + Returns + ------- + list[bool]: + list of True/False if annotations are near edge + """ + is_edge = np.zeros(len(annotations),np.bool) + for k,al in enumerate(annotations): + is_edge[k]=is_annotation_near_edge(al, + EM_boundaries, + distance=distance, + min_edge_sections=min_edge_sections) + return is_edge + + +def get_index(name='LM_index'): + """function to get a spatial index, removing existing one if it exists + + Parameters + ---------- + name: str + name of index + + Returns + ------- + rtree.Index: + new index ready to be filled + """ + + dataname = '{}.dat'.format(name) + indexname = '{}.idx'.format(name) + if os.path.isfile(dataname): + os.remove(dataname) + if os.path.isfile(indexname): + os.remove(indexname) + p = index.Property() + p.dimension=3 + return index.Index(name,properties = p) + +def insert_annotations_into_index(index,annotations): + """function to insert annotations into rtree index + + Parameters + ---------- + index: rtree.index + spatial index to insert + annotations: list[dict] + list of annotations following area_list schema in AnnotationFile schema + + Returns + ------- + list: + a list of (minX,minY,minZ,maxX,maxY,maxZ) tuples containing bounds of annotations + """ + + bound_list=[] + for i,al in enumerate(annotations): + bounds = get_bounding_box_of_al(al) + bound_list.append(bounds) + index.insert(i,bounds) + return bound_list + +def do_annotations_overlap(al1,al2): + """function to test of two annotations overlap + + Parameters + ---------- + al1: dict + AreaList dictionary that follows schema in AnnotationJsonSchema.AnnotationFile + al2: dict + AreaList dictionary that follows schema in AnnotationJsonSchema.AnnotationFile + + Returns + ------- + bool: + True/False whether they overlap + """ + + for area2 in al2['areas']: + poly2 = geometry.Polygon(area2['global_path']) + for area1 in al1['areas']: + if int(area1['z'])==int(area2['z']): + poly1 = geometry.Polygon(area1['global_path']) + if poly1.intersects(poly2): + return True,area1['z'] + return False,None + +def annotations_to_df(annotations): + dl = [] + for al in annotations: + for a in al['areas']: + a.update({'oid':al['oid'],'id':al['id']}) + mins=np.min(a['global_path'],axis=0) + maxs=np.max(a['global_path'],axis=0) + a['minX']=mins[0] + a['minY']=mins[1] + a['maxX']=maxs[0] + a['maxY']=maxs[1] + dl.append(a) + return pd.DataFrame(dl) + +def get_overlap_matrix(good_annotations, LM_annotations, EM_index, LM_bounds): + overlap_matrix = np.zeros((len(good_annotations),len(LM_annotations)),np.bool) + j=0 + for i,alLM in enumerate(LM_annotations): + res=EM_index.intersection(LM_bounds[i]) + for k in res: + alEM=good_annotations[k] + overlaps,zsection = do_annotations_overlap(alLM,alEM) + if overlaps: + overlap_matrix[k,i]=True + return overlap_matrix + +class EvaluateSynapseDetection(ArgSchemaParser): + """Module for evaluating synapse detection results given EM ground truth""" + default_schema = EvaluateSynapseDetectionParameters + default_output_schema = EvaluateSynapseDetectionOutput + + def run(self): + pp.pprint(self.args) + EM_annotations = load_annotation_file(self.args['EM_annotation_json']) + LM_annotations = load_annotation_file(self.args['LM_annotation_json']) + df = pd.read_csv(self.args['EM_metadata_csv'], index_col=0,skiprows=1,dtype={'oid':np.object}) + good_rows = (~(np.uint8(df[self.args['EM_not_synapse_column']])>0.0)) + in_class_rows1 = np.isin(df[self.args['EM_inclass_column1']],self.args['EM_inclass_conditions1']) + in_class_rows2 = np.isin(df[self.args['EM_inclass_column2']],self.args['EM_inclass_conditions2']) + + #if self.args['EM_inclass_invert']: + # in_class_rows = ~in_class_rows + good_rows = good_rows & in_class_rows1 & in_class_rows2 + good_df=df[good_rows] + good_annotations = [al for al in EM_annotations if np.sum(good_df.oid.str.match(al['oid']))>0] + + EM_boundaries = load_annotation_file(self.args['EM_boundary_json']) + + #(ann_minX,ann_minY,ann_minZ,ann_maxX,ann_maxY,ann_maxZ) = get_bounding_box_of_annotations(good_annotations) + + print("number of good rows",good_df.shape) + + print("len of good_annotations",len(good_annotations)) + + EM_edge=get_edge_annotations(good_annotations,EM_boundaries, + distance=self.args['edge_distance'], + min_edge_sections=self.args['edge_min_sections']) + + LM_edge=get_edge_annotations(LM_annotations,EM_boundaries, + distance=self.args['edge_distance'], + min_edge_sections=self.args['edge_min_sections']) + + LM_index=get_index('LM_index') + LM_bounds=insert_annotations_into_index(LM_index,LM_annotations) + EM_index = get_index('EM_index') + EM_bounds=insert_annotations_into_index(EM_index,good_annotations) + + overlap_matrix = get_overlap_matrix(good_annotations, LM_annotations, EM_index, LM_bounds) + + bins = np.arange(0,4) + LM_per_EM = np.sum(overlap_matrix,axis=1) + EM_per_LM = np.sum(overlap_matrix,axis=0) + LM_per_EM_counts,edges = np.histogram(LM_per_EM[EM_edge==False],bins=bins,normed=True) + EM_per_LM_counts,edges = np.histogram(EM_per_LM[LM_edge==False],bins=bins,normed=True) + print("EM_per_LM",EM_per_LM_counts) + print("LM_per_EM",LM_per_EM_counts) + print('lm edge detections:',np.sum(LM_edge)) + print('em edge annotations',np.sum(EM_edge)) + print('LM detections:',len(LM_edge)) + d= {} + d['EM_per_LM']=EM_per_LM_counts + d['LM_per_EM']=LM_per_EM_counts + d['missed_EM']= [al['oid'] for k,al in enumerate(good_annotations) if (EM_edge[k]==False) and (LM_per_EM[k]==0)] + d['split_EM']= [al['oid'] for k,al in enumerate(good_annotations) if (EM_edge[k]==False) and (LM_per_EM[k]>1)] + d['correct_EM']= [al['oid'] for k,al in enumerate(good_annotations) if (EM_edge[k]==False) and (LM_per_EM[k]==1)] + d['false_pos_LM']= [al['oid'] for k,al in enumerate(LM_annotations) if (LM_edge[k]==False) and (EM_per_LM[k]==0)] + d['merge_LM']= [al['oid'] for k,al in enumerate(LM_annotations) if (LM_edge[k]==False) and (EM_per_LM[k]>1)] + d['correct_LM']= [al['oid'] for k,al in enumerate(LM_annotations) if (LM_edge[k]==False) and (EM_per_LM[k]==1)] + print ('missed EM: ', d['missed_EM']) + self.output(d) + + + +if __name__ == '__main__': + mod = EvaluateSynapseDetection(input_data= example_json) + mod.run() diff --git a/renderapps/synapse_detection/segment_synapse_probability.py b/renderapps/synapse_detection/segment_synapse_probability.py new file mode 100644 index 0000000..26714c7 --- /dev/null +++ b/renderapps/synapse_detection/segment_synapse_probability.py @@ -0,0 +1,134 @@ +import argschema +import numpy as np +from shapely import geometry,ops +import renderapi +from skimage import measure +from skimage.morphology import watershed +from scipy import ndimage as ndi +import tifffile +from renderapps.module.render_module import RenderModule, RenderParameters +from renderapps.TrakEM2.AnnotationJsonSchema import AnnotationFile +import json + + +def make_point_box(xy,box_size): + x=xy[0] + y=xy[1] + pts = np.array([[x,y],[x+box_size,y],[x+box_size,y+box_size],[x,y+box_size]]) + return geometry.Polygon(pts) + +def make_coords_to_geom(coors,box_size): + polys = [make_point_box([c[0],c[1]],box_size) for c in coors] + return ops.cascaded_union(polys) + +def make_area_from_polygon(polygon,pixel_size,xoffset,yoffset,z): + assert(type(polygon) is geometry.Polygon) + x,y = polygon.exterior.xy + x=np.array(x) + y=np.array(y) + x*=pixel_size + x+=xoffset + y*=pixel_size + y+=yoffset + xy = np.stack([x,y]).T + d={'z':z,'global_path':xy} + return d + +def make_prop_into_contours(prop,pixel_size,xoffset=0,yoffset=0,zoffset=0): + coors = np.flip(np.copy(prop.coords),1) + zvalues = np.unique(coors[:,2]) + polys=[] + areas = [] + for z in zvalues: + c2 = coors[coors[:,2]==z] + multipoly = make_coords_to_geom(c2,1.0) + if type(multipoly) is geometry.MultiPolygon: + for g in multipoly.geoms: + areas.append(make_area_from_polygon(g,pixel_size,xoffset,yoffset,z+zoffset)) + else: + areas.append(make_area_from_polygon(multipoly,pixel_size,xoffset,yoffset,z+zoffset)) + + return areas + + + +def segment_syn_prop_map(syn_prop_map,threshold_high = .5, threshold_low = .05): + low_mask = syn_prop_map>threshold_low + labels = ndi.label(low_mask)[0] + props = measure.regionprops(labels,intensity_image=syn_prop_map) + for prop in props: + if(prop.max_intensity0,np.uint8) + img = 255-img + #f, ax = plt.subplots(1,1,figsize=(10,10)) + #ax.imshow(img,cmap=plt.cm.gray) + + hgram=cv2.calcHist([img],[0],mask,[256],[0,256]) + cum_dist = np.cumsum(hgram)/np.sum(hgram) + min_level = np.where(np.diff(cum_dist>sat_pix)==1)[0][0] + max_level = np.where(np.diff(cum_dist<(1.0-sat_pix))==1)[0][0] + + img = np.array(img,np.double) + if (max_level==min_level): + print 'min_level',min_level + print 'max_level',max_level + print 'on file',path_in + raise Exception("%s %f %f"%(path_in,min_level,max_level)) + + img = ((img-min_level)/(max_level-min_level))*255 + img = (img - 128)*contrast_adjust + 128 + #print 'img[2012,195]',img[2012,195] + img = np.clip(img,0,255) + #print 'img[2012,195]',img[2012,195] + img = np.array(img,np.uint8) + #print 'img[2012,195]',img[2012,195] + + img = cv2.bilateralFilter(img,3,20,20) + #clahe1 = cv2.createCLAHE(clipLimit=.5, tileGridSize=(10,10)) + clahe2 = cv2.createCLAHE(clipLimit=clip_limit, + tileGridSize=(grid_size,grid_size)) + #img = clahe1.apply(img) + img = clahe2.apply(img) + if vert_flip: + img = cv2.flip(img,0) + cv2.imwrite(path_out,img) + +def process_z(render,input_stack,sat_pix,grid_size,contrast_adjust,clip_limit,vert_flip,z): + + #get the tilespecs for this Z + tilespecs = render.run(renderapi.tilespec.get_tile_specs_from_z,input_stack,z) + + #loop over the tilespes adding the transform + for ts in tilespecs: + mml = ts.ip.mipMapLevels[0] + + old_url = mml.imageUrl + old_path = fix_url(old_url) + + directory,old_file = os.path.split(old_path) + + #print 'old_file',old_file + orig_tif = next(f for f in os.listdir(directory) + if ((old_file[0:-9] in f)) and ('flip' not in f) and ('mask' not in f) and (not f.endswith('bak0'))) + #print 'orig_tif',orig_tif + orig_path = os.path.join(directory,orig_tif) + + new_url = mml.imageUrl[0:-4]+'_flip.tif' + new_path = fix_url(new_url) + + filter_em_image(orig_path, + new_path, + sat_pix, + grid_size, + contrast_adjust, + clip_limit, + vert_flip) + + mml.imageUrl = new_url + ts.ip.update(mml) + #open a temporary file + tid,tfile = tempfile.mkstemp(suffix='.json') + file = open(tfile,'w') + #write the file to disk + renderapi.utils.renderdump(tilespecs,file) + os.close(tid) + #return the filepath + return tfile class FilterEMModule(RenderModule): def __init__(self,*args,**kwargs): super(FilterEMModule,self).__init__(*args, **kwargs) - def filter_em_image(self,path_in,path_out): - - img = cv2.imread(path_in,cv2.CV_8UC1) - - mask=np.array(img>0,np.uint8) - img = 255-img - #f, ax = plt.subplots(1,1,figsize=(10,10)) - #ax.imshow(img,cmap=plt.cm.gray) - sat_pix = self.args['sat_pix']/100 - - hgram=cv2.calcHist([img],[0],mask,[256],[0,256]) - cum_dist = np.cumsum(hgram)/np.sum(hgram) - min_level = np.where(np.diff(cum_dist>sat_pix)==1)[0][0] - max_level = np.where(np.diff(cum_dist<(1.0-sat_pix))==1)[0][0] - - img = np.array(img,np.double) - if (max_level==min_level): - print 'min_level',min_level - print 'max_level',max_level - print 'on file',path_in - raise Exception("%s %f %f"%(path_in,min_level,max_level)) - - img = ((img-min_level)/(max_level-min_level))*255 - img = (img - 128)*self.args['contrast_adjust'] + 128 - #print 'img[2012,195]',img[2012,195] - img = np.clip(img,0,255) - #print 'img[2012,195]',img[2012,195] - img = np.array(img,np.uint8) - #print 'img[2012,195]',img[2012,195] - - img = cv2.bilateralFilter(img,3,20,20) - #clahe1 = cv2.createCLAHE(clipLimit=.5, tileGridSize=(10,10)) - grid_size = self.args['clahe_size'] - clahe2 = cv2.createCLAHE(clipLimit=self.args['clahe_clip_limit'], - tileGridSize=(grid_size,grid_size)) - #img = clahe1.apply(img) - img = clahe2.apply(img) - if self.args['vert_flip']: - img = cv2.flip(img,0) - cv2.imwrite(path_out,img) - - def process_z(self,render,input_stack,z): - - #get the tilespecs for this Z - tilespecs = render.run(renderapi.tilespec.get_tile_specs_from_z,input_stack,z) - - #loop over the tilespes adding the transform - for ts in tilespecs: - mml = ts.ip.mipMapLevels[0] - - old_url = mml.imageUrl - old_path = fix_url(old_url) - - directory,old_file = os.path.split(old_path) - - #print 'old_file',old_file - orig_tif = next(f for f in os.listdir(directory) - if ((old_file[0:-9] in f)) and ('flip' not in f) and ('mask' not in f) and (not f.endswith('bak0'))) - #print 'orig_tif',orig_tif - orig_path = os.path.join(directory,orig_tif) - - new_url = mml.imageUrl[0:-4]+'_flip.tif' - new_path = fix_url(new_url) - - self.filter_em_image(orig_path,new_path) - - mml.imageUrl = new_url - ts.ip.update(mml) - #open a temporary file - tid,tfile = tempfile.mkstemp(suffix='.json') - file = open(tfile,'w') - #write the file to disk - renderapi.utils.renderdump(tilespecs,file) - os.close(tid) - #return the filepath - return tfile def run(self): #get the z values in the stack @@ -126,7 +130,14 @@ def run(self): output_stack = self.args.get('output_stack',self.args['input_stack']) #define a partial function for processing a single z value - mypartial = partial(self.process_z,self.render,self.args['input_stack']) + mypartial = partial(process_z, + self.render, + self.args['input_stack'], + self.args['sat_pix']/100, + self.args['clahe_size'], + self.args['contrast_adjust'], + self.args['clahe_clip_limit'], + self.args['vert_flip']) #mypartial(0) #get the filepaths of json files in parallel diff --git a/renderapps/tile/create_filtered_EM_images_2.py b/renderapps/tile/create_filtered_EM_images_2.py new file mode 100644 index 0000000..fea9c11 --- /dev/null +++ b/renderapps/tile/create_filtered_EM_images_2.py @@ -0,0 +1,153 @@ +#!/usr/bin/env python +import renderapi +from ..module.render_module import RenderModule, RenderParameters +from functools import partial +import tempfile +import os +import cv2 +import numpy as np +from argschema.fields import Str, Int, Float, Boolean + +# An example set of parameters for this module +example_parameters = { + "render":{ + "host":"ibs-forrestc-ux1", + "port":8080, + "owner":"Forrest", + "project":"M246930_Scnn1a_4_f", + "client_scripts":"/pipeline/render/render-ws-java-client/src/main/scripts" + }, + 'input_stack':'BIGEMSite4_take2Montage_fix', + 'output_stack':'BIG_EM_t2Site4_clahe', + 'pool_size':10 +} +class FilterEMParameters(RenderParameters): + input_stack = Str(required=True,description='stack to apply affine to') + output_stack = Str(required=False,description='stack to save answer into (defaults to overwriting input_stack)') + pool_size = Int(required=False,default=20,description='size of pool for parallel processing (default=20)') + sat_pix = Float(required=False,default=.2,description='percent of pixels to saturate when normalizing contrast (default .2%)') + contrast_adjust = Float(required=False,default=.85,description='constrast fraction to adjust before CLAHE (default .85)') + clahe_size = Int(required=False,default=90,description='CLAHE parameter for grid size.. smaller is less strong, larger is stronger (default 90)') + clahe_clip_limit = Float(required=False,default=1.5,description='clip limit for CLAHE normalization (default 1.5)') + vert_flip = Boolean(required=False,default=True,description='vertically flip the image (default True)') + +def fix_url(url): + path = url.replace('file:','') + path = path.replace('%20',' ') + return path + +class FilterEMModule(RenderModule): + def __init__(self,*args,**kwargs): + super(FilterEMModule,self).__init__(*args, **kwargs) + + + def filter_em_image(self,path_in,path_out): + + img = cv2.imread(path_in,cv2.CV_8UC1) + + mask=np.array(img>0,np.uint8) + img = 255-img + #f, ax = plt.subplots(1,1,figsize=(10,10)) + #ax.imshow(img,cmap=plt.cm.gray) + sat_pix = self.args['sat_pix']/100 + + hgram=cv2.calcHist([img],[0],mask,[256],[0,256]) + cum_dist = np.cumsum(hgram)/np.sum(hgram) + min_level = np.where(np.diff(cum_dist>sat_pix)==1)[0][0] + max_level = np.where(np.diff(cum_dist<(1.0-sat_pix))==1)[0][0] + + img = np.array(img,np.double) + if (max_level==min_level): + print 'min_level',min_level + print 'max_level',max_level + print 'on file',path_in + raise Exception("%s %f %f"%(path_in,min_level,max_level)) + + img = ((img-min_level)/(max_level-min_level))*255 + img = (img - 128)*self.args['contrast_adjust'] + 128 + #print 'img[2012,195]',img[2012,195] + img = np.clip(img,0,255) + #print 'img[2012,195]',img[2012,195] + img = np.array(img,np.uint8) + #print 'img[2012,195]',img[2012,195] + + img = cv2.bilateralFilter(img,3,20,20) + #clahe1 = cv2.createCLAHE(clipLimit=.5, tileGridSize=(10,10)) + grid_size = self.args['clahe_size'] + clahe2 = cv2.createCLAHE(clipLimit=self.args['clahe_clip_limit'], + tileGridSize=(grid_size,grid_size)) + #img = clahe1.apply(img) + img = clahe2.apply(img) + if self.args['vert_flip']: + img = cv2.flip(img,0) + cv2.imwrite(path_out,img) + + def process_z(self,render,input_stack,z): + + #get the tilespecs for this Z + tilespecs = render.run(renderapi.tilespec.get_tile_specs_from_z,input_stack,z) + + #loop over the tilespes adding the transform + for ts in tilespecs: + mml = ts.ip.mipMapLevels[0] + + old_url = mml.imageUrl + old_path = fix_url(old_url) + + directory,old_file = os.path.split(old_path) + + #print 'old_file',old_file + orig_tif = next(f for f in os.listdir(directory) + if ((old_file[0:-9] in f)) and ('flip' not in f) and ('mask' not in f) and (not f.endswith('bak0'))) + #print 'orig_tif',orig_tif + orig_path = os.path.join(directory,orig_tif) + + new_url = mml.imageUrl[0:-4]+'_flip.tif' + new_path = fix_url(new_url) + + self.filter_em_image(orig_path,new_path) + + mml.imageUrl = new_url + ts.ip.update(mml) + #open a temporary file + tid,tfile = tempfile.mkstemp(suffix='.json') + file = open(tfile,'w') + #write the file to disk + renderapi.utils.renderdump(tilespecs,file) + os.close(tid) + #return the filepath + return tfile + + def run(self): + #get the z values in the stack + zvalues = self.render.run(renderapi.stack.get_z_values_for_stack,self.args['input_stack']) + + #output_stack defaults to input_stack + output_stack = self.args.get('output_stack',self.args['input_stack']) + + #define a partial function for processing a single z value + mypartial = partial(self.process_z,self.render,self.args['input_stack']) + + #mypartial(0) + #get the filepaths of json files in parallel + with renderapi.client.WithPool(self.args['pool_size']) as pool: + json_files = pool.map(mypartial,zvalues) + + if self.args['input_stack'] != output_stack: + sv = renderapi.stack.get_stack_metadata(self.args['input_stack'],render=self.render) + renderapi.stack.create_stack(output_stack,render=self.render) + renderapi.stack.set_stack_metadata(output_stack,sv,render=self.render) + #import the json_files into the output stack + renderapi.client.import_jsonfiles_parallel(output_stack, + json_files, + poolsize=mod.args['pool_size'], + render=self.render) + + #clean up the temp files + [os.remove(tfile) for tfile in json_files] + + +if __name__ == '__main__': + #process the command line arguments + mod = FilterEMModule(schema_type=FilterEMParameters,input_data=example_parameters) + mod.run() diff --git a/renderapps/transfer/move_stack.py b/renderapps/transfer/move_stack.py index 134f02e..7a1d971 100644 --- a/renderapps/transfer/move_stack.py +++ b/renderapps/transfer/move_stack.py @@ -84,7 +84,7 @@ def transformURL(self, url): self.seen_urls.add(newurl) # Replace spaces with %20 -- may be needed for ImageJ url.open? - newurl.replace(" ", "%20") + newurl = newurl.replace(" ", "%20") return newurl def transform_ip(self, ip): diff --git a/requirements.txt b/requirements.txt index 34020f8..34e067c 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,10 +1,9 @@ render-python requests numpy -pillow pathos tifffile -opencv-python +opencv-contrib-python shapely rtree networkx @@ -14,3 +13,7 @@ xmltodict argschema>1.15.5 boto3 botocore +pyxb +pillow>=4.3.0 +scikit-image +lxml diff --git a/test_requirements.txt b/test_requirements.txt index 9fbb414..7bcf6a4 100644 --- a/test_requirements.txt +++ b/test_requirements.txt @@ -1,9 +1,9 @@ -coverage==4.1 -mock==2.0.0 -pep8==1.7.0 -pytest==3.0.5 -pytest-cov==2.2.1 -pytest-pep8==1.0.6 -pytest-xdist==1.14 +coverage>=4.1 +mock>=2.0.0 +pep8>=1.7.0 +pytest +pytest-cov +pytest-pep8 +pytest-xdist flake8>=3.0.4 -pylint>=1.5.4 \ No newline at end of file +pylint>=1.5.4