aboutsummaryrefslogtreecommitdiff
path: root/processing/chapter.tex
blob: b324ed307c6ea65c4c23114e03a5c92eb4726083 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
\chapter{Processing} \label{cha:pro}

\begin{dquote}
  What we have is data glut.
  What we really want is the ability to manipulate the information and to reach conclusions from
  it.
  I think we are at the point where that is slipping beyond unaided humans’ abilities.
  So the real thing to be looking for is processing schemes.
  One way is automatic processing: for instance, the sort of analysis that we saw with the IBM
  Watson on Jeopardy.
  Putting that in service to humankind in fields that are suffering from data glut at least gives
  people who are in charge the ability to keep some sort of track of what is going on.

  The other great thing that we have going for us is that we have billions of very intelligent
  people out there in the world.
  :With the networking that we have now, we’re beginning to see that those large populations,
  coordinating amongst themselves, are an intellectual resource that trumps all institutional
  intellectual resources and has a real possibility, if it’s supported by the proper automation, of
  creating solutions to problems, including the problem of the data glut.

  \dsignature{Vernor Vinge \cite{VingeVernor2012a}}
\end{dquote}

\clearpage

CMDS takes a somewhat unique approach to instrumental science.  %
How, then could one go about making a data processing software package for CMDS?  %
The package has to be flexible enough to accommodate the diversity of experiments, but still solid
enough to be a foundational tool.  %

When creating a toolkit for CMDS, there are several challenges worth considering:
\begin{ditemize}
  \item Dimensionality of datasets can typically be greater than two, complicating representation.
  \item Shape and dimensionality change, and relevant axes can be different from the scanned
    dimensions.  %
  \item Data can be awkwardly large-ish (several million pixels), to legitimately large---it is not
    always possible to store entire arrays in memory.  %
  \item There are no agreed-upon file formats for CMDS dataset storage.  %
\end{ditemize}
The biggest challenge is to find a really good definition for what constitutes a CMDS dataset.  %
Once understood, this common denominator can be enshrined into software and built upon.  %
This chapter describes WrightTools, a software package that I created to process CMDS datasets.  %

WrightTools is a software package written in Python, built using the excellent tools provided by
the scientific Python collection of packages, especially Scipy \cite{SciPy} and Numpy
\cite{OliphantTravisE2006a}.  %
WrightTools defines a universal file-format that is flexible enough to encompass the diversity of
CMDS while still being entirely self-describing.  %
This file format is based on the popular binary format ``HDF5'' \cite{FolkMike2011a}, as
interfaced by the h5py python library \cite{h5py}.  %
This format allows WrightTools to interact with the arrays piece-by-piece in a very fast and
reliable way, without loading the entire array in and out of memory.  %
This allows users to interact with legitimately large CMDS datasets without worrying about memory
overflow.  %
WrightTools takes a unique approach to representing CMDS data in array format, what I call
``semi-structure'', that allows for greater flexibility in representing CMDS in different
coordinate spaces.  %

WrightTools is written to be used in scripts and in the command line.  %
It does not have any graphical components built in, except for the ability to generate plots using
matplotlib \cite{HunterJohnD2007a}.  %
Being built in this way gives WrightTools users maximum flexibility, and allows for rapid
collaborative development.  %
It also allows other software packages to use WrightTools as a ``back-end'' foundational software,
as has already been done in simulation and acquisition software created in the Wright Group.  %

\clearpage
\section{Introduction to WrightTools}  % ==========================================================

WrightTools is a moderately complex piece of software ($\sim$10,000 source lines of code), so it is
important to keep the package internally organized so that users are able to use the pieces they
need without feeling overwhelmed by the full complexity.  %
For organizational purposes, WrightTools is designed in a nested, hierarchical manner through heavy
use of object oriented programming (see \autoref{sof:sec:oop}).  %
In this introductory section, I wish to describe the overall structure of WrightTools, without
going into too much detail.  %
In this way the reader can have some context in the sections below, where I describe some crucial
pieces of WrightTools in greater detail.  %

WrightTools is written in Python, and endeavors to have a ``pythonic'', explicit and ``natural''
application programming interface (API).  %
To use WrightTools, simply import:
\begin{codefragment}{python}
>>> import WrightTools as wt
>>> wt.__version__
3.0.0
\end{codefragment}
I'll discuss more about how exactly WrightTools packaging, distribution, and instillation works in
\autoref{pro:sec:distribution}.

\autoref{pro:tab:wt} contains a description of each top-level component within the WrightTools
package.  %
Within an interactive python session, we could see these components using the built in \python{dir}
function: \python{dir(wt)}.  %
There are several types of component: functions, attributes, classes, modules, and subclasses.  %
Functions are simple objects that take some input(s), do something, and return something.  %
For example, the function \python{wt.open}, which accepts a path to a WrightTools dataset file and
returns an opened version of that file.  %
Attributes are not interactive, they are simply pieces of attached information that can be
accessed.  %
For example, \python{wt.__version__} as used in the code fragment above.  %
Classes are instructions for construction of particular custom object types, and can be
instantiated (see \autoref{sof:sec:oop}).  %
We'll talk extensively about the five main WrightTools classes: \python{Axis},
\python{Collection}, \python{Channel}, \python{Data}, and \python{Variable}, in the coming
pages.  %
Modules are literally \bash{.py} files within WrightTools, and they themselves contain attributes,
functions, and classes.  %
Finally, subpackages are literally folders that contain several \bash{.py} files: several
modules.  %

All spectra are stored within wt5 files as multidimensional arrays.  %
Arrays are containers that store many instances of the same data type, typically numerical
datatypes.  %
These arrays have some \python{shape}, \python{size}, and
\python{dtype}.  %
In the context of WrightTools, they can contain floats, integers, complex numbers and NaNs.  %

WrightTools is designed around a universal ``wt5'' file format.  %
wt5 files are simply extensions of the hdf5 format, with some additional requirements applied to
their internal structure.  %
This puts wt5 files in the same category as other domain-specific heirarchial data formats (see
\autoref{sof:sec:hdf}).  %
One of the most important features of the HDF5 paradigm is the ability to access portions of the
multidimensional arrays at a time.  %
WrightTools takes full advantage of this, such that the WrightTools package is simply an
\emph{interface} to the data contained with the wt5 file, and arrays are not stored in memory until
needed.  %

There are two classes which are top-level components of the WrightTools package:
\python{Collection} and \python{Data}.  %

\python{Data} is arguably the most important class, as it provides the crucial function of
interfacing to the stored multidimensional arrays that constitute the CMDS datasets.  %
\python{Data} can be instantiated directly, but typically is instantiated by helper functions
within the \python{data} subpackage, or by the \python{open} function.  %
See \autoref{pro:sec:creating_data} for more information.  %

\python{Collection} is a container class, charged with storing groups of data objects and other
collection objects---empowering users to organize their datasets into clearly structured and well
labeled hierarchies within the wt5 file.  %
See \autoref{pro:sec:collection} for more information about \python{Collection}.  %

The \python{artists} subpackage contains all of the tools needed to plot \python{Data} objects.  %
There are ``quick'' artist functions made primarily for use in interactive plotting, and a larger,
more flexable set of classes and functions that can be used to construct more elaborate figures.  %
See \autoref{pro:sec:artists} for more information.  %

The \python{fit} subpackage is an interface which endeavors to make fitting multidimensional
\python{Data} objects as easy as possible.  %
Towards this end, the \python{fit} subpackage takes a unique approach of dimensionality reduction
via fitting.  %
See \autoref{pro:sec:fit} for more information.  %

The \python{datasets} subpackage is simply a python interface to the set of raw data that is
distributed within WrightTools.  %
\python{datasets} is not imported by default, so ``from'' syntax must be used.  %
\python{datasets} allows users to access full filepaths to the raw data, rather than returning
instances of \python{Data} or \python{Collection}.  %
\begin{codefragment}{python}
>>> from WrightTools import datasets
>>> datasets.COLORS.v0p2_d1_d2_diagonal
'.../WrightTools/datasets/COLORS/v0.2/d1_d2 diagonal.dat'
\end{codefragment}
This strategy is more flexable and allows the developers of WrightTools to write tests and examples
using datasets that are guaranteed to be on every machine.  %

The \python{diagrams} subpackage is a small set of tools used for drawing diagrams, with a focus on
diagrams commonly required by CMDS practitioners.  %
Currently \python{diagrams} can draw WMELs \cite{LeeDuckhwan1985a} and delay space labels (see
\autoref{pps:fig:delay_space} for an example).  %
\python{diagrams} interfaces well with artists since they both are built on top of matplotlib, so
it is easy for WrightTools users to draw diagrams in the same figure as other elements.  %

The \python{units} module handles all unit information, and conversion between values in different
unit systems.  %

The \python{exceptions} module defines the unique exceptions and warnings that WrightTools
raises.  %
All exceptions are children of the \python{WrightToolsException} class, and all warnings are
children of the \python{WrightToolsWarning} class.  %
In this way, users of WrightTools can easily intercept all exceptions/warnings coming from
WrightTools itself (as opposed to packages that WrightTools relies upon) when debugging their
application.  %

Finally, the \python{kit} subpackage is a small menagerie of classes and functions that are useful,
but have no other place within WrightTools.  %
Many of these are used internally throughout the rest of the program, and others are distributed to
be used by WrightTools users.  %
As examples:
\begin{ditemize}
  \item The \python{TimeStamp} class represents a moment in time, and handles conversion between
    different popular representations of time.  %
  \item The \python{INI} class is a very simple python interface to \bash{.ini} configuration
    files.  %
  \item The \python{fft} function is a friendly user interface for N-dimensional fft operations.  %
  \item The \python{closest_pair} function finds the pair(s) of indices corresponding to the
    closest elements in an array.  %
\end{ditemize}

\begin{table}
  \begin{tabular}{c | c | l}
    & type & description \\ \hline
    \texttt{artists} & subpackage & Plotting and figure generation. \\ \hline
    \texttt{Collection} & class & Nestable container for data objects. \\ \hline
    \texttt{collection} & subpackage & Collection creation functions. \\ \hline
    \texttt{Data} & class & Central multidimensional dataset class. \\ \hline
    \texttt{data} & subpackage & Data creation functions. \\ \hline
    \texttt{datasets} & subpackage & Raw data for examples, testing. \\ \hline
    \texttt{diagrams} & subpackage & Draw diagrams. \\ \hline
    \texttt{exceptions} & module & WrightTools exception and warning types. \\ \hline
    \texttt{fit} & subpackage & Fit data objects. \\ \hline
    \texttt{kit} & subpackage & Small useful functions and classes. \\ \hline
    \texttt{open} & module & Universal wt5 file open function. \\ \hline
    \texttt{units} & module & Unit handling and conversion. \\ \hline
  \end{tabular}
  \caption[Components of WrightTools.]{
    Key components of WrightTools, lexicographically listed.
  }
  \label{pro:tab:wt}
\end{table}

I now focus on the \python{Data} class.  %
\autoref{pro:tab:data} contains a description of each key component of \python{Data}.

\python{Data} can be thought of as a container class that contains everything needed to define a
single multidimensional spectra.  %
To do this, each data object contains several multidimensional arrays (typically 2 to 50 arrays,
depending on the kind of data).  %
There are two kinds of arrays, instances of \python{Variable} and \python{Channel}.  %
Variables are coordinate arrays that define the position of each pixel in the multidimensional
spectrum, and channels are each a particular kind of signal within that spectrum.  %
Typical variables might be \python{[labtime, w1, w2, w3, d1, d2]}, and typical channels
\python{[pmt, pyro1, pyro2, pyro3] }.  %
The data object contains attributes \python{Data.variables} and \python{Data.channels} which are
tuples of the instances of \python{Variable}, \python{Channel} contained within that instance of
\python{Data}.  %
The data object also has convenience attributes \python{variable_names} and \python{channel_names};
creation methods \python{create_channel} and \python{create_variable}; and basic manipulation
methods \python{remove_channel}, \python{remove_variable}, and \python{rename_channels}.  %
More information about channels and variables will come on the next pages.  %

Variables contain all of the information about where every piece of hardware was at each coordinate
in the multidimensional dataset, but most of the time users only want to work with data as
parameterized by a few key variables.  %
Crucially, the exact choice of parameterization may be context dependent, or multiple
parameterizations may be desirable. \cite{NeffMallonNathanA2017a}  %
Axes, instances of the WrightTools \python{Axis} class, are easy to use parameterized interfaces to
the variable arrays.  %
Axes do not contain any \emph{new} information, they simply contain expressions which describe how
the variable arrays are accessed when manipulating or displaying the data.  %
The \python{tansform} method allows users to change these expressions.  %
Convenience attribute \python{axis_expressions} allows for quick inspection.  %
See section ... for more information.  %

Besides merely allowing users to access variables and channels, the \python{Data} class allows for
manipulation and processing.  %
Many simple data processing tools are methods of \python{Channel} and \python{Variable}, and are
discussed further later.  %
The data manipulation methods that \python{Data} contains are more holistic---they are
manipulations that involve multiple variable and channel arrays.  %
\python{heal} attempts to ``fill'' holes via multidimensional interpolation.  %
\python{chop}, \python{collapse}, python{split}, \python{map_axis}, and \python{zoom} change the
shape of the data object, by slicing, interpolation, or both.  %

\begin{table}
  \begin{tabular}{c | c | l}
    & type & description \\ \hline
    \texttt{collapse} & method & Collapse along one dimension in a well-defined way. \\ \hline
    \texttt{convert} & method & Convert all axes of a certain kind. \\ \hline
    \texttt{create\_channel} & method & Create a new channel. \\ \hline
    \texttt{create\_variable} & method & Create a new variable. \\ \hline
    \texttt{fullpath} & attribute & External and internal path to data. \\ \hline
    \texttt{get\_nadir} & method & Get the coordinates, in units, of the minimum of a channel. \\ \hline
    \texttt{get\_zenith} & method & Get the coordinates, in units, of the maximum of a channel \\ \hline
    \texttt{heal} & method & Remove nans from channel using interpolation. \\ \hline
    \texttt{kind} & attribute & Instrumental origin of data. \\ \hline
    \texttt{level} & method & Subtract the average value of npts at the edge of a given axis. \\ \hline
    \texttt{map\_variable} & method & Map points of a variable to new points using linear interpolation. \\ \hline
    \texttt{natural\_name} & attribute & Natural name. \\ \hline
    \texttt{ndim} & attribute & Number of dimensions. \\ \hline
    \texttt{offset} & method & Offset one variable based on another variables values. \\ \hline
    \texttt{print\_tree} & method & Print a pretty tree including all contents. \\ \hline
    \texttt{remove\_channel} & method & Remove a channel. \\ \hline
    \texttt{remove\_variable} & method & Remove a variable. \\ \hline
    \texttt{rename\_channels} & method & Rename (multiple) channel(s). \\ \hline
    \texttt{shape} & attribute & Shape of data. \\ \hline
    \texttt{share\_nans} & method & Share not-a-numbers between all channels. \\ \hline
    \texttt{size} & attribute & Number of pixels in entire data shape. \\ \hline
    \texttt{smooth} & method & Smooth a channel using an n-dimensional Kaiser window. \\ \hline
    \texttt{source} & attribute & File of origin. \\ \hline
    \texttt{split} & method & Split the data along a given axis, in units. \\ \hline
    \texttt{transform} & method & Transform the data. \\ \hline
    \texttt{units} & attribute & Tuple of units for each axis. \\ \hline
    \texttt{variable\_names} & attribute & Variable names. \\ \hline
    \texttt{variables} & attribute & Variable objects. \\ \hline
    \texttt{zoom} & method & Zoom the data using spline interpolation of the requested order. \\ \hline
  \end{tabular}
  \caption[Attributes and methods of Data.]{
    Key attributes and methods of data, lexicographically listed.
  }
  \label{pro:tab:data}
\end{table}

I now focus on the \python{Channel} and \python{Variable} classes.  %
These are the principle multidimensional array containers, and each instance of these classes
corresponds to exactly one multidimensional array.  %
These two classes share a large amount of functionality, and they both inherit from the parent
WrightTools \python{Dataset} class, which itself is a child of \python{h5py.Dataset}.  %
See \autoref{sof:sec:oop} to understand the concept of inheritance.  %

% TODO: consider demonstrating slicing

\autoref{pro:tab:dataset} contains a description of each key component of the \python{Channel} and
\python{Variable} classes.  %
For each component the column ``of'' indicates if it is a shared feature (inherited from
\python{Dataset}), or unique to one or the other class.  %
Many of these are attributes which describe the contents or behavior of these arrays.  %
\python{argmax}, \python{argmin}, \python{max}, and \python{min} are methods that make it easy to
inspect the most basic features of the array.  %
The concept of \python{null} as different from zero is unique to channels, and the components
\python{signed}, \python{mag}, \python{major_extent}, and \python{minor_extent} come in association
with the null idea.  %

These classes also have basic mathematical manipulation methods, such as \python{log},
\python{normalize}, and \python{symmetric_sqrt}.  %
Other operations are supported by in-place operations, as described in
\autoref{pro:sec:in_place}.  %

Channels and variables inherit from h5py, so they support partial access through slicing
(\python{__getitem__} syntax).  %
This means that, in principle, very large datasets can be processed piece-by-piece without loading
the entire array into memory simultaneously.  %
This is trivial for ``blind'' operations like taking a logarithm or normalizing, and becomes more
complex for operations like smoothing and interpolation.  %
WrightTools offers several methods that try to make it easier to process arrays piecewise.  %
\python{slices} returns a generator which yields tuples of slice objects for each chunk of the
array.  %
\python{chunkwise} accepts a function and executes it on each chunk returned by \python{slices}.  %

\begin{table}
  \begin{tabular}{c | c | c | l}
    & type & of & description \\ \hline
    \texttt{argmax} & method & both & Index of maximum, ignoring nans. \\ \hline
    \texttt{argmin} & method & both & Index of minimum, ignoring nans. \\ \hline
    \texttt{chunkwise} & method & both & Execute a function for each chunk in the dataset. \\ \hline
    \texttt{clip} & method & both & Clip values outside of a desired range. \\ \hline
    \texttt{convert} & method & both & Convert units, writing to disk. \\ \hline
    \texttt{full} & attribute & both & Content array expanded to full shape. \\ \hline
    \texttt{fullpath} & attribute & both & External and internal path to dataset. \\ \hline
    \texttt{label} & attribute & variable & Label to appear in subscript. \\ \hline
    \texttt{log} & method & both & Take the log of the entire dataset, with choice of base. \\ \hline
    \texttt{log10} & method & both & Take the base 10 log of the entire dataset. \\ \hline
    \texttt{log2} & method & both & Take the base 2 log of the entire dataset. \\ \hline
    \texttt{mag} & method & channel & Alias for \texttt{major\_extent}. \\ \hline
    \texttt{major\_extent} & attribute & channel & Maximum deviation from null. \\ \hline
    \texttt{max} & method & both & Maximum, ignoring nans. \\ \hline
    \texttt{min} & method & both & Minimum, ignoring nans. \\ \hline
    \texttt{minor\_extent} & attribute & channel & Minimum deviation from null. \\ \hline
    \texttt{natural\_name} & attribute & both & Natural name. \\ \hline
    \texttt{normalize} & method & channel & Zero out znull, divide such that mag is 1. \\ \hline
    \texttt{null} & attribute & channel & Null value. \\ \hline
    \texttt{parent} & attribute & both & Parent data object. \\ \hline
    \texttt{points} & attribute & both & Squeezed content array. \\ \hline
    \texttt{signed} & attribute & channel & Flag to indicate data is signed. \\ \hline
    \texttt{slices} & method & both & Returns a generator yielding tuple of slice objects. \\ \hline
    \texttt{symmetric\_root} & method & both & Take the root, propagating sign. \\ \hline
    \texttt{trim} & method & channel & Remove outliers using statistical tests. \\ \hline
  \end{tabular}
  \caption[Attributes and methods of Channel and Variable.]{
    Key attributes and methods of channel and variable, lexicographically listed
  }
  \label{pro:tab:dataset}
\end{table}

I now focus on the \python{Axis} class.  %
\autoref{pro:tab:axis} contains a description of each key component of the \python{Axis} class.  %

Axes are ways to organize data as functional of particular variables (and combinations thereof).  %
The \python{Axis} class does not directly contain the respective arrays---it merely refers to the
associated variables.  %
The flexibility of this association is one of the main new features in WrightTools 3.  %

Axis expressions are simple human-friendly strings made up of numbers and variable
\python{natural_name}s.  %
Given 5 variables with names \python{['w1', 'w2', 'wm', 'd1', 'd2']}, example valid expressions
include \python{'w1'}, \python{'w1=wm'}, \python{'w1+w2'}, \python{'2*w1'}, \python{'d1-d2'}, and
\python{'wm-w1+w2'}.  %
Axes can be directly indexed / sliced into using \python{__getitem__}, and they support many of the
``numpy-like'' attributes.  %

Axes need not be one-dimensional.
In fact, axes must have the same dimensionality as their parent \python{Data}.  %
The loosening of the one-dimensional axis requirement is what makes WrightTools data not fully
structured, but ``semi-structured''.

Section \ref{pro:sec:axes} decribes the \python{Axis} class in greater detail.  %

\begin{table}
  \begin{tabular}{c | c | l}
    & type & description \\ \hline
    \texttt{full} & attribute & Content array expanded to full shape. \\ \hline
    \texttt{label} & attribute & Label to appear in subscript. \\ \hline
    \texttt{natural\_name} & attribute & Natural name. \\ \hline
    \texttt{ndim} & attribute & Number of dimensions. \\ \hline
    \texttt{points} & attribute & Squeezed content array. \\ \hline
    \texttt{shape} & attribute & Shape of axis. \\ \hline
    \texttt{size} & attribute & Number of pixels in axis. \\ \hline
    \texttt{units} & attrbute & Units of axis. \\ \hline
    \texttt{variables} & attribute & List of variables owned by axis. \\ \hline
    \texttt{convert} & method & Convert the axis to a different set of units. \\ \hline
    \texttt{min} & method & Get the axis minimum. \\ \hline
    \texttt{max} & method & Get the axis maximum. \\ \hline
  \end{tabular}
  \caption[Attributes and methods of Axis.]{
    Key attributes and methods of axis, lexicographically listed
  }
  \label{pro:tab:axis}
\end{table}

\section{Creating a data object} \label{pro:sec:creating_data}  % =================================

WrightTools data objects are capable of storing arbitrary multidimensional spectra, but how can w
actually get data into WrightTools?  %
If you start with a wt5 file, the answer is easy: \python{wt.open(<filepath>)}.  %
But what if you have data that was written using some other software?  %
WrightTools offers data conversion functions (``from'' functions) that do the hard work of creating
data objects from other files.  %
These from-functions are as parameter free as possible, which means they recognize details like
shape and units from each specific file format without manual user intervention.  %

The most important thing about from-functions is that they are extensible: that is, that more
from-functions can be easily added as needed.  %
This modular approach to data creation means that individuals who want to use WrightTools for new
data sources can simply add one function to unlock the capabilities of the entire package as
applied to their data.  %

\autoref{pro:tab:from_functions} contains the currently supported from functions in
WrightTools.  %

\subsubsection{Discover dimensions}

Certain older Wright Group file types (COLORS and KENT) are particularly difficult to import using
a parameter-free from-function.  %
There are two problems:
\begin{ditemize}
  \item Dimensionality limitation to individual files (1D for KENT, 2D for COLORS).
  \item Lack of self-describing metadata (headers).
\end{ditemize}
The way that WrightTools handles data creation for these file-types deserves special discussion.  %

Firstly, WrightTools contains hardcoded column information for each filetype.
Data from Kent Meyer's ``picosecond control'' software had consistent columns over the lifetime of
the software, so only one dictionary is needed to store these correspondences.  %
Schuyler Kain's ``COLORS'' software used at least 7 different formats, and unfortunately
these format types were not fully documented. \cite{KainSchuyler2017a}  %
WrightTools attempts to guess the COLORS data format by counting the number of columns.  %

Because these file-types are dimensionality limited, there are many acquisitions that span over
multiple files.  %
COLORS offered an explicit queue manager which allowed users to repeat the same 2D scan (often a
Wigner scan) many times at different coordinates in non-scanned dimensions.  %
ps\_control scans were done more manually.  %
To account for this problem of multiple files spanning a single acquisition, the functions
\python{from_COLORS} and \python{from_KENT} optionally accept \emph{lists} of filepaths.  %
Inside the function, WrightTools simply appends the arrays from all given files into one long array
with many more rows.  %

The final and most challenging problem of parameter-free importing for these filetypes is
\emph{dimensionality recognition}.  %
Because the files contain no metadata, the shape and coordinates of the original acquisition must
be guessed by simply inspecting the columnar arrays.  %
In general, this problem can become very hard.  %
Luckily, each of these previous instrumental software packages was only used on one instrument with
limited flexibility in acquisition type, so it is possible to make educated guesses for almost all
acquisitions.  %

The function \python{wt.kit.discover_dimensions} handles the work of dimensionality recognition for
both COLORS and ps\_control arrays.  %
This function may be used for more filetypes in the future.  %
Roughly, the function does the following:
\begin{denumerate}
  \item Remove dimensions containing nan(s).
  \item Find which dimensions are equal (within tolerance), condense into single dimensions.
  \item Find which dimensions are scanned (move beyond tolerance).
  \item For each scanned dimension, find how many unique (outside of toelerance) points were taken.
  \item Linearize each scanned dimension between smallest and largest unique point.
  \item Return scanned dimension names, column indices and points.
\end{denumerate}
The \python{from_COLORS} and \python{from_KENT} functions then linearly interpolate each row in the
channels onto the grid defined by \python{discover_dimensions}.  %
This interpolation uses \python{scipy.interpolate.griddata}, which in turn relies upon the C++
library Qhull.  %

This strategy can be copied in the future if other non-self-describing data sources are added into
WrightTools.  %

\begin{table}
  \begin{tabular}{c | l}
    function & data source \\ \hline
    \texttt{wt.collection.from\_CARY} & Cary brand spectrometers. \\ \hline
    \texttt{wt.data.from\_COLORS} & COLORS. \cite{KainSchuyler2017a} \\ \hline
    \texttt{wt.data.from\_JASCO} & JASCO brand spectrometers \\ \hline
    \texttt{wt.data.from\_KENT} & ps\_control. \cite{MeyerKentA2004a} \\ \hline
    \texttt{wt.data.from\_PyCMDS} & PyCMDS (\autoref{cha:acq}). \\ \hline
    \texttt{wt.data.from\_OceanOptics} & Ocean Optics brand spectrometers. \\ \hline
    \texttt{wt.data.from\_shimamdzu} & Shimadzu brand spectrometers. \\ \hline
    \texttt{wt.data.from\_Tensor27} & Tensor 27 FT-IR. \\ \hline
  \end{tabular}
  \caption{
    WrightTools data import functions.
  }
  \label{pro:tab:from_functions}
\end{table}

\section{Collections} \label{pro:sec:collection}  % ===============================================

The WrightTools \python{Collection} class is a container class meant to organize the contents of
the wt5 file.  %
It can contain other collection instances and data objects.  %
Conceptually, it behaves like a folder in a traditional file-system.  %
\python{wt.Collection} is a child of \python{h5py.Group} \cite{h5py.Group}.

The primary attributes and methods of \python{Collection} are
\begin{ditemize}
  \item attribute \python{item_names}
  \item attribute \python{fullpath}
\end{ditemize}
% TODO: finish adding attributes and methodsd

Collections are useful because they allow WrightTools users to ``carry around'' several associated
data objects in the same file.  %
For example, a publication might contain several experiments on the same sample.  %
Collections allow such experiments to be organized in a hierarchical way.  %
The hierarchy of contents that a collection contains can be easily visualized using the
\python{print_tree} method.  %
As an example, consider the following collection instance which contains some experiments
accomplished on neat carbon tetrachloride.  %
\begin{codefragment}{bash}
>>> import WrightTools as wt
>>> root = wt.open('CCl4.wt5')
>>> root.print_tree()
CCl4 (/tmp/0tze7b8a.wt5)
├── 0: delay (111,)
│   ├── axes: d1 (fs)
│   └── channels: ai0, ai1, ai2, ai3
└── 1: frequency
    ├── 0: delay_0 (51, 51)
    │   ├── axes: w2 (eV), w1=wm (eV)
    │   └── channels: ai0, ai1, ai2, ai3, ai4, mc
    └── 1: delay_200 (18, 20)
        ├── axes: w1=wm (eV), w2 (eV)
        └── channels: ai0, ai1, ai2, ai3
\end{codefragment}
Looking at the output of \python{print_tree}, we can see that this collection (named \python{CCl4})
contains the following:
\begin{denumerate}
  \item A data object ``\python{delay}'', shape \python{(111,)}.
  \item A collection object ``\python{frequency}'', containing two 2D data objects.
    \begin{denumerate}
      \item A data object ``\python{delay_0}'', shape \python{(51, 51)}.
      \item A data object ``\python{delay_200}'', shape \python{18, 20}.
    \end{denumerate}
\end{denumerate}
Since this is all contained in one file, a user of WrightTools can easily manage all three
associated datasets.  %
Upon simple inspection it is obvious that two of the datasets are 2D frequency-frequency scans
while one is a 1D delay slice.  %

Like \python{Channel}, \python{Data} and \python{Variable}, \python{Collection} supports adding
arbitrary metadata through the \python{attrs} dictionary.  % TODO: cite

\subsection{From directory}  % --------------------------------------------------------------------

The \python{wt.collection.from_directory} function can be used to automatically import all of the
data sources in an entire directory tree.  %
It returns a WrightTools collection with the same internal structure of the directory tree, but
with WrightTools data objects in the place of raw data source files.  %
Users can configure which files are routed to which from-function.  %

% TODO (also document on wright.tools)

\section{Visualizing a data object} \label{pro:sec:artists}  % ====================================

After importing and manipulating data, one typically wants to create a plot.  %
The artists sub-package contains everything users need to plot their data objects.  %
This includes both ``quick'' artists, which generate simple plots as quickly as possible, and a
full figure layout toolkit that allows users to generate full publication quality figures.  %
It also includes ``specialty'' artists which are made to perform certain common plotting
operations.  %

Currently the artists sub-package is built on-top of the wonderful matplotlib library
\cite{HunterJohnD2007a}.  %
In the future, other libraries (e.g. Mayavi \cite{Mayavi}), may be incorporated.  %

\subsection{Strategies for 2D visualization}  % ---------------------------------------------------

Representing two-dimensional data is an important capability for WrightTools, so some special
discussion about how such representations work is warranted.  %
WrightTools data is typically very structured, with values recorded at a grid of positions.  %
To represent two-dimensional data, then, WrightTools needs to map the values onto a color axis.  %
When doing this mapping, choosing an appropriate colormap and interpolation type is crucial.  %

\subsubsection{Colormap}

For the purposes of this discussion, there are two types of colormaps: 1. qualitative 2.
perceptual.  %
Qualitative colormaps have random orderings of color.  %
They are best used to represent unordered things, and they typically have high dynamic range.  %
Perceptual colormaps are monotonic in lightness, and are best at representing ordered information
(like signal levels in MR-CMDS). \cite{LiuYang2018a}  %

Historically the Wright Group has used a qualitative colormap for all plotting.  %
\autoref{pro:fig:cmaps} shows the red, green, and blue components of four different colormaps.  %
The black line is the net lightness of each color (larger value means lighter color).  %
Below each figure is a gray-scale representation of the corresponding colormap.  %
The r, g, and b components are scaled according to human perception.  % TODO: values, from where
The traditional Wright Group colormap (derived from jet) is shown first.  %
Following are two perceptual colormaps, cubehelix from \texcite{GreenDA2009a}, and viridis, the new
matplotlib default.
WrightTools uses the algorithm from \textcite{GreenDA2009a} to define a custom cubehelix colormap
with good perceptual properties and familiar Wright Group coloration.  %
The lightness is not perfectly straight as in viridis and cubehelix, but it is monotonic at
least.  %

\autoref{pro:fig:cmap_comparison} compares the new (top row) and old (bottom row) colormaps in full
color and greyscale.  %
While the old colormap shows a greater dynamic range, the new colormap is more perceptually
uniform.  %
In my opinion, users should use logarithmic scaling of an appropriate base if more dynamic range is
required, rather than using a non-perceptual colormap.  %

\subsubsection{Interpolation type}

WrightTools data is defined at discrete points, but an entire 2D surface must be defined in order
to make a full colored surface.  %
Defining this surface requires \emph{interpolation}, and there are various strategies that have
different advantages and disadvantages.  %
Choosing the wrong type of interpolation can be misleading.  %

\autoref{pro:fig:fill_types} represents different kinds of plot-type interpolation.  %
Each is labeled according to the corresponding matplotlib method call.  %
In the multidimensional spectroscopy community, the most popular form of interpolation is
``contourf'' and ``contour'', both based on Delaunay triangulation.  %
In \autoref{pro:fig:fill_types} the edges of the Delaunay triangles are drawn for clarity.  %
Such interpolation methods result in \emph{smoother} looking spectra, but they can look strange and
cause visual artifacts.  %
``pcolor'' is a much more direct approach that results in \emph{blocky} but honest two-dimensional
plots.  %

\begin{figure}
  \includegraphics[scale=0.5]{"processing/wright_cmap"}
  \includegraphics[scale=0.5]{"processing/cubehelix_cmap"}
  \includegraphics[scale=0.5]{"processing/viridis_cmap"}
  \includegraphics[scale=0.5]{"processing/default_cmap"}
  \caption[Colormap lightness.]{
    Comparison of four different colormaps.
    R, G, and B components are each shown.
    Black line is human-perceived lightness.
  }
  \label{pro:fig:cmaps}
\end{figure}

\begin{figure}
  \includegraphics[width=\textwidth]{"processing/cmap_comparison"}
  \caption[Colormap comparison.]{
    Comparison between colormaps in plotting two-dimensional data.
    Right column is full-color, and left column is lightness.
    The ``new'' (top) colormap is evenly spaced in terms of lightness, while the ``old'' colormap
    has dramatic, misleading changes at the transition in and out of green.
  }
  \label{pro:fig:cmap_comparison}
\end{figure}

\begin{figure}
  \includegraphics[width=\textwidth]{"processing/fill_types"}
  \caption{
    Interpolation types in 2D plotting.
  }
  \label{pro:fig:fill_types}
\end{figure}

\subsection{Quick}  % -----------------------------------------------------------------------------

To facilitate easy visualization of data, WrightTools offers ``quick'' artist functions which
quickly generate 1D or 2D representations.  %
These functions are made to make good representations by default, but they do have certain keyword
arguments to make popular customization easy.  %
These are particular useful functions within the context of repls and auto-generated plots in
acquisition software.  %

Default outputs of \python{wt.artists.quick1D} and \python{wt.artists.quick2D} are shown in
\autoref{pro:fig:quick1D} and \autoref{pro:fig:quick2D}, respectively.  %
The full script used to create each image is included in the Figures.  %
Note that the actual quick functions are each one-liners, and that the supplied keyword arguments
are necessary only because the images are being saved (not typical for users in interactive
mode).  %

Perhaps the most powerful feature of \python{quick1D} and \python{quick2D} are their ability to
treat higher-dimensional datasets by automatically generating multiple figures.  %
When handing a dataset of higher dimensionality to these artists, the user may choose which axes
will be plotted against using keyword arguments.  %
Any axis not plotted against will be iterated over such that an image will be generated at each
coordinate in that axis.  %
Users may also provide a dictionary with entries of the form
\python{{axis_name: [position, units]}} to choose a single coordinate along non-plotted axes.  %
These functionalities are derived from \python{wt.Data.chop}, discussed further in
\autoref{pro:sec:chop}.  %

\begin{figure}
  \includegraphics[width=0.5\textwidth]{"processing/quick1D 000"}
  \includepython{"processing/quick1D.py"}
  \caption[quick1D example.]{
    Minimal example of the \python{quick1D} function.
  }
  \label{pro:fig:quick1D}
\end{figure}

\begin{figure}
  \includegraphics[width=0.5\textwidth]{"processing/quick2D 000"}
  \includepython{"processing/quick2D.py"}
  \caption[quick2D example.]{
    Minimal example of the \python{quick2D} function.
  }
  \label{pro:fig:quick2D}
\end{figure}

% TODO: signed data (with and without dynamic_range=True)

% \subsection{API}  % -------------------------------------------------------------------------------

% The artists sub-package offers a thin wrapper on the default matplotlib object-oriented figure
% creation API.  %
% The wrapper allows WrightTools to add the following capabilities on top of matplotlib:
% \begin{ditemize}
%   \item More consistent multi-axes figure layout.
%   \item Ability to plot data objects directly.
% \end{ditemize}
% Each of these is meant to lower the barrier to plotting data.  %
% Without going into every detail of matplotlib figure generation capabilities, this section
% introduces the unique strategy that the WrightTools wrapper takes.  %

% % \subsection{Gotchas}  % -------------------------------------------------------------------------

% TODO: mention gotcha of apparently narrowing linewidths with wigners (how to READ colormaps)

\section{Variables and channels}  % ===============================================================

Data objects are made up of many component channels and variables, each array having the same
dimensionality of its parent data.  %
This strategy allows for maximal flexibility in data representation, but it can be overly expensive
if certain arrays do not actually change against all of the dimensions.  %
This is often especially true with variables, which typically correspond to scannable hardware that
may not have even moved across some (or any) dimensions.  %
To avoid unnecessarily large arrays, WrightTools allows Channels and Variables to have different
sizes than the parent data.  %
As an example, consider the following object.
\begin{codefragment}{bash}
>>> import WrightTools as wt; from WrightTools import datasets
>>> data = wt.data.from_COLORS(datasets.COLORS.v2p1_MoS2_TrEE_movie)
>>> data.print_tree()
MoS2 (/tmp/qhg_1b3l.wt5)
├── axes
│   ├── 0: w2 (nm) (41, 1, 1)
│   ├── 1: w1=wm (nm) (1, 41, 1)
│   └── 2: d2 (fs) (1, 1, 23)
├── variables
│   ├── 0: w2 (nm) (41, 1, 1)
│   ├── 1: w1 (nm) (1, 41, 1)
│   ├── 2: wm (nm) (1, 41, 1)
│   ├── 3: d2 (fs) (1, 1, 23)
│   ├── 4: w3 (nm) (1, 1, 1)
│   ├── 5: d0 (fs) (1, 1, 1)
│   └── 6: d1 (fs) (1, 1, 1)
└── channels
    ├── 0: ai0 (41, 41, 23)
    ├── 1: ai1 (41, 41, 23)
    ├── 2: ai2 (41, 41, 23)
    ├── 3: ai3 (41, 41, 23)
    ├── 4: ai4 (41, 41, 23)
    └── 5: mc (41, 41, 23)
\end{codefragment}
Note that this is the primary dataset discussed in \autoref{cha:mx2}.  %
The shape of this data object is \python{(41, 41, 23)}, but none of the variables have that full
shape.  %
From a quick inspection, one can see that \python{w1} and \python{wm} were scanned together, while
\python{w2} and \python{d2} were the other two dimensions.  %
\python{w3}, \python{d0}, and \python{d1} were not moved at all, yet their coordinates are still
propagated.  %

\section{Axes} \label{pro:sec:axes}  % ============================================================

Axes are the primary interface to coordinate positions in WrightTools.  %
Axes are \emph{not} arrays, although they do behave like arrays.  %
They are merely \emph{interfaces} into the information stored in one or more variables.  %

Each axis has an expression, like \python{'w1'}, \python{'d1=d2'}, \python{'2*w3'} or
\python{'w1+w2-wm'}.  %
These expressions describe an unambiguous mathematical operation involving one or more
variables.  %
The axis computes these expressions on the fly when needed, giving users real flexability over how
they would like to represent and process their results.  %

Axes, variables, and channels are array-likes, so they support slicing operations.  %
The axes have the joint shape of their component variables.  %
In addition, all three classes have \python{points} and \python{full} attributes that return the
squeezed and broadcasted array, respectively.  %

One of the coolest features enabled by this approach to axes is \emph{transformation}.  %
\autoref{pro:fig:transform} shows how easy it is to transform data from one coordinate system into
another.  %
Transforming is essentially free, since axes compute coordinates on the fly.  %

\begin{figure}
  \includegraphics[width=0.8\textwidth]{"processing/fringes_transform"}
  \includepython{"processing/fringes_transform.py"}
  \caption{
    Example using transform.
  }
  \label{pro:fig:transform}
\end{figure}

\section{Math}  % =================================================================================

Now that we know the basics of how the WrightTools \python{Data} class stores data, it's time to do
some data manipulation.  %
Let's start with some elementary algebra.  %

\subsection{In-place operators} \label{pro:sec:in_place}  % ---------------------------------------

In Python, operators are symbols that carry out some computation.  %
Consider the following:
\begin{codefragment}{python, label=pro:lst:array_addition}
>>> import numpy as np
>>> a = np.array([4, 5, 6])
>>> b = np.array([-1, -2, -3])
>>> c = a + b
>>> c
array([3, 3, 3])
\end{codefragment}
Here, \python{a} and \python{b} are operands and \python{+} is an operator.  %
When used in this simple way, operators typically create and return a \emph{new} object in the
computers memory.  %
We can verify this by using Python's built-in \python{id} function on the objects created in
\ref{pro:lst:array_addition}.  %
\begin{codefragment}{python}
>>> id(a), id(b), id(c)
(139712529580400, 139712333712320, 139712333713040)
\end{codefragment}
This is usually fine, but sometimes the operands are unwieldy large objects that take a lot of
memory to store.  %
In other cases operators are used millions of times such that, used as above, millions of new
arrays will be created.  %

One way to avoid these problems is to use \emph{in-place} operators.  %
Using a slightly different syntax, one can tell Python to overwrite one of the operands with the
new value. %
Continuing from \ref{pro:lst:array_addition}:
\begin{codefragment}{python, label=pro:lst:in_place_addition}
>>> a += b
>>> a
array([3, 3, 3])
\end{codefragment}
No output \python{c} array was created, so no additional memory footprint is needed in
\ref{pro:lst:in_place_addition}.  %
Since WrightTools channels and variables are typically large arrays, and since these arrays are
stored on disk inside of a larger file, WrightTools requires the use of in-place operators for all
normal math.  %
Currently WrightTools supports addition (\python{+=}), multiplication(\python{*=}),
power (\python{**=}), subtraction (\python{-=}), and division (\python{/=}).  %
As an example, consider dividing a channel by a specific factor:
\begin{codefragment}{python}
>>> import WrightTools as wt; from WrightTools import datasets
>>> data = wt.data.from_JASCO(datasets.JASCO.PbSe_batch_1)
data.created at /tmp/tdyvfxu8.wt5::/
  range: 2500.0 to 700.0 (nm)
  size: 1801
>>> data.signal
<WrightTools.Channel 'signal'' at /tmp/tdyvfxu8.wt5::/signal>
>>> data.signal.min(), data.signal.max()
(0.10755, 1.58144)
>>> data.signal /= 2
>>> data.signal.max(), data.signal.min()
(0.053775, 0.79072)
\end{codefragment}
Variables also support in-place operators.  %

\subsection{Clip}  % ------------------------------------------------------------------------------

Clip allows users to exclude values outside of a certain range.  %
Excluded values can be replaced with a user-specified value or with not-a-number.  %
This can be particularly useful in cases like fitting.  %
See \autoref{pro:sec:fit} for an example.  %

It's also useful for when noise in a certain region of a spectrum obscures useful data.  %
Particularly true for normalized and signed data.  %

\subsection{Symmetric root}  % --------------------------------------------------------------------

Homodyne vs heterodyne-detected data need to be scaled appropriately for comparison.  %
Much of the data that we collect in the Wright Group is homodyne detected, so it goes as $N^2$.  %
To compare with the majority of other experiments, including basic linear experiments like
absorption and Raman spectroscopy, need to plot on ``amplitude level'', that is
$\mathsf{amplitude=\sqrt{signal}}$.  %

Due to things like leveling, chopping, baseline subtraction, and simple noise even homodyne
detected data typically include negative numbers.  %
Symmetric root treats these values as cleanly as possible by applying the same relative scaling to
positive and negative values, and keeping the sign of each pixel, as the following psudocode
shows.  %
\begin{codefragment}{python}
def symmetric_root(value):
    return sign(value) * sqrt(abs(value))
\end{codefragment}

For generality, \python{wt.Channel.symmetric_root} accepts any root as an argument.  %
The default is 2, for the common case of going from intensity scaling to amplitude scaling.  %

Any other power can be applied to a channel using the in-place \python{**=} syntax.  %

\subsection{Log}  % -------------------------------------------------------------------------------

The method \python{wt.Channel.log} applies logarithmic scaling to a channel.  %
The base of the log is settable by keyword argument, with a default of $\me$.  %
There are also methods \python{wt.Channel.log10} and \python{wt.Channel.log2}, which accept no
keyword arguments.  %
These may be slightly faster than \python{channel.log(base=10)} and
\python{channel.log(base=2)}.  %

% \subsection{Level}  % -----------------------------------------------------------------------------

% TODO: figure from wright.tools

\subsection{Trim}  % ------------------------------------------------------------------------------

Trim uses statistical treatment to find and remove outliers from a dataset.  %
It is useful in cases where the naive strategy employed by \python{wt.Channel.clip} is not
sufficient, and when preparing for fitting.  %
See \autoref{pro:fig:outlier} for an example of outlier detection.  %

Currently \python{trim} only supports one statistical treatment: the z-test.  %
Z-testing compares each pixel to its multidimensional neighborhood of pixels.  %
If the pixel is more than $n$ standard deviations outside of the neighborhood mean (using the
neighborhood standard deviation) it is either masked, replaced with \python{np.nan}, or replaced
with the neighborhood mean.  %
All outliers are found before any outliers are modified, so the algorithm is not directional.  %

\python{wt.Channel.trim} can easily be enhanced with other statistical methods as needed.  %

\subsection{Smooth}  % ----------------------------------------------------------------------------

\python{wt.Channel.smooth} essentially passes the channel through a low-pass filter.  %
It does this by convolving the channel with an n-dimensional Kaiser–Bessel window.  %

% TODO: define Kaiser window
% TODO: citations
% TODO: motivate use of Kaiser window over other choices

Smoothing is a highly destructive process, and can be very dangerous if used unthinkingly.  %
However it can be useful when noisy data is collected in high resolution.  %
By taking many more pixels than required to capture the relevant spectral or temporal features, one
can confidently smooth collected data in post to achieve clean results.  %
This strategy is similar to that accomplished in time domain CMDS where a low-pass filter is
applied on the very high resolution raw data.  %

\begin{figure}
  \includegraphics[width=0.8\textwidth]{"processing/outlier"}
  \caption{
    Outlier detection using a z-test.
  }
  \label{pro:fig:outlier}
\end{figure}

% TODO: figure: example of smoothed data

\section{Dimensionality manipulation}  % ==========================================================

WrightTools offers several strategies for reducing the dimensionality of a data object.  %
Also consider using the fit sub-package described in \autoref{pro:sec:fit}.  %

\subsection{Chop} \label{pro:sec:chop}  % ---------------------------------------------------------

Chop is one of the most important methods of data, although it is typically not called directly by
users of WrightTools.  %
Chop takes n-dimensional data and ``chops'' it into all of it's lower dimensional components.  %
Consider a 3D dataset in \python{('wm', 'w2''', 'w1''''')}.  %
This dataset can be chopped to it's component 2D \python{('wm'', 'w1')} spectra.  %
\begin{codefragment}{python, label=test_label}
>>> import WrightTools as wt; from WrightTools import datasets
>>> data = wt.data.from_PyCMDS(datasets.PyCMDS.wm_w2_w1_000)
data created at /tmp/lzyjg4au.wt5::/
  axes ('wm'', 'w2', 'w1')
  shape (35, 11, 11)
>>> chopped = data.chop('wm', 'w1')
chopped data into 11 piece(s) in ('wm', 'w1'')
>>> chopped.chop000
<WrightTools.Data 'chop000' ('wm', 'w1') at /tmp/935c2v5a.wt5::/chop000>
\end{codefragment}
\python{chopped} is a collection containing 11 data objects: \python{chop000, chop001 ...
  chop010}.  %
Note that, by default, the collection is made at the root level of a new tempfile.  %
An optional keyword argument \python{parent} allows users to specify the destination for this new
collection.   %
These lower dimensional data objects can then be used in plotting routines, fitting routines etc.  %

By default, chop returns \emph{all} of the lower dimensional slices.  %
Considering the same data object from \autoref{test_label}, we can choose to get all 1D wm
slices.  %
\begin{codefragment}{python}
>>> chopped = data.chop('wm')
chopped data into 121 piece(s) in ('wm',)
>>> chopped.chop000
<WrightTools.Data 'chop000' ('wm',) at /tmp/pqkbc0qr.wt5::/chop000>
\end{codefragment}

If desired, users may use the \python{at} keyword argument to specify a particular coordinate in
the un-retained dimensions.  %
For example, suppose that you want to plot the data from \ref{test_label} as an wm, w1 plot at
w2 = 1580 wn.  %
\begin{codefragment}{python}
>>> chopped = data.chop('wm', 'w1', at={'w2': [1580, 'wn']})[0]
chopped data into 1 piece(s) in ('wm', 'w1')
>>> chopped
<WrightTools.Data 'chop000' ('wm', 'w1') at /tmp/_yhrdprp.wt5::/chop000>
>>> chopped.w2.points
array([1580.0])
\end{codefragment}
% Note the [0]...  % TODO
% This same syntax used in artists...  % TODO

\subsection{Collapse}  % --------------------------------------------------------------------------

\python{wt.Data.collapse} reduces the dimensionality of the data object by exactly 1 using some
mathematical operation.  %
Currently supported methods are integrate, average, sum, max, and min, with integrate as
default.  %
Collapsing a dataset is a very simple and powerful method of dimensionality reduction.  %
It allows users to inspect the net dependency along a set of axes, without being opinionated about
the coordinate in other dimensions.  %
It can also be used as a method of noise reduction.  %

\subsection{Split}  % -----------------------------------------------------------------------------

\python{wt.Data.split} is not a proper method of dimensionality reduction, but it is a crucial tool
for interacting with the dimensionality of a data object.  %
\python{split} allows users to access a portion of the dataset.  %
The most common use-case is certainly in fitting operations.  %
In population spectroscopies like transient absorption and transient grating it has become typical
to take three-dimensional ``movies'' in \python{('w1', 'w2', 'd2')}, where \python{w1} is a probe,
\python{'w2'} is a pump, and \python{'d2'} is a population delay.  %
It can be informative to fit each \python{d2} trace to a model (often single exponential), but such
a fit will not do well to describe the signal through zero delay and for positive \python{d2}
values (into the coherence pathways).  %
\python{data.split(d2=0.)} will return two data objects, one for the positive delays and one for
negative.  %
You can then pass the data object with only population response into your fitting routine.  %

\subsection{Join}  % ------------------------------------------------------------------------------

Like \python{split}, \python{wt.data.join} is not a method of dimensionality reduction.  %
It is also not a method of the \python{Data} class, it is a bare function.  %
Join accepts multiple data objects and attempts to join them together.  %
To do this, the variable and channel names must agree.  %

\begin{figure}
  \includegraphics[width=\textwidth]{"processing/join_example"}
  \caption{
    Join example.
  }
  \label{pro:fig:join_example}
\end{figure}

\section{Fitting} \label{pro:sec:fit}  % ==========================================================

Like the rest of WrightTools, the \python{fit} sub-package is made to play as nicely as possible
with high-dimensional data.  %
WrightTools uses fitting as a method of dimensionality reduction.  %
For example, consider a three-dimensional \python{('w1', 'w2', 'd2')} ``movie'', where \python{d2}
is a population delay that can be well approximated by a single exponential decay with offset.  %
Rather than attempt to visualize \python{w1, w2} at some specific value of \python{d2}, it can be
powerful to instead consider the parameters (amplitude, offset, and time constant) of an
exponential fit at each \python{w1, w2} coordinate.  %
On a more practical note, this kind of slice-by-slice dimensionality reduction via fitting can
greatly simplify automated instrumental calibration (see \autoref{cha:opa}).  %
WrightTools employs some simple tricks to enable these kind of fit operations, described here.  %

% TODO: consider inserting figures that demonstrate this story (need to use wt2?)

\subsection{Function objects}  % ------------------------------------------------------------------

One challenge of slice-by-slice fitting is making a good intial guess to optimize from.  %
It is not tractable to ask the user to provide a guess for each slice, so some kind of reasonable
automated guessing must be used.  %
WrightTools ``function'' objects are self contained describers of a particular function.  %
As an example, consider the \python{wt.fit.Expontial} class.
It can be used directly, as shown in \autoref{pro:fig:fit_function}

\begin{figure}
  \includegraphics[width=0.5\textwidth]{"processing/fit_function"}
  \includepython{"processing/fit_function.py"}
  \caption{
    Fitting a Gaussian.
  }
  \label{pro:fig:fit_function}
\end{figure}

\subsection{Fitter}  % ----------------------------------------------------------------------------

The Fitter class is specially made to work seamlessly with data objects.  %

WrightTools is especially good at dimensionality reduction through fitting.  %
This concept is best demonstrated through an example.  %

Let’s load in some test data.  %
\begin{codefragment}{python}
#import
import WrightTools as wt
from WrightTools import datasets
# create
ps = datasets.COLORS.v2p1_MoS2_TrEE_movie
data = wt.data.from_COLORS(ps)
# cleanup
data.level('ai0', 'd2', -3)
data.scale()
data.convert('eV')
data.name = 'MoS2'
\end{codefragment}
This is a three-dimensional dataset:  %
\begin{codefragment}{python}
>>> data.axis_names
['w2', 'w1', 'd2']
>>> data.shape
(41, 41, 23)
\end{codefragment}
We could create an animation to see every single pixel, but we can't see everything at once that
way.  %
Instead we could imagine fitting every decay ($\tau_{21}$ trace) to an exponential.  %
Then we could plot the amplitude and time constant of that exponential decay.  %
This helps us get at subtle questions about the data.  %
Do the lineshapes narrow with time? Does the redder feature decay slower than the bluer feature?  %
Faster?  %

Using the \python{Fitter} class, it is easy to perform an exponential fit along each TAU21 trace at
each OMEGA1, OMEGA2 coordinate.  %
\begin{codefragment}{python}
# isolate only relevant data
data = data.split('w1', 1.75)[1].split('d2', 0)[0]
# prepare a function
function = wt.fit.Exponential()
function.limits['amplitude'] = [0, 1]
function.limits['offset'] = [0, 0]
function.limits['tau'] = [0, 2000]
# do the fit
fitter = wt.fit.Fitter(function, data, 'd2')
outs = fitter.run()
\end{codefragment}
When we call fitter.run(), every slice of the data object will be fit according to the given
function object. Fitter automatically creates two new data objects when this happens. outs contains
the fit parameters, in this case amplitude, tau, and offset. Accordingly, outs is lower-dimensional
than the original data object. model contains the fit evaluated at each coordinate of the original
dataset—it’s really useful for inspecting the quality of your fit procedure.

\begin{figure}
  \includegraphics[width=0.4\textwidth]{"processing/fit_amplitude"}
  \includegraphics[width=0.4\textwidth]{"processing/fit_tau"}
  \caption{
    Fitting as dimensionality reduction.
  }
  \label{pro:fig:fitted_movie}
\end{figure}

\section{Construction, maintenance, and distribution}  % ==========================================

While WrightTools has already been useful to the work done in the WrightGroup over the last 3
years, the true potential of the package lies in its future.  %
WrightTools is designed in a modular way so that it can be continuously enhanced to serve an
ever-wider set of users and spectroscopies.  %
Despite its name, WrightTools is built to be used even by those outside of the Wright Group.  %
Currently WrightTools may be only 75\% of what a typical multidimensional spectroscopist needs, but
if those scientists work to enhance the package with what \emph{they} need, they may also solve
problems for others such that the usefulness of the software gradually increases.  %

In order for this dream to come true, WrightTools must be constructed and maintained by
collaborative tools such that users feel comfortable contributing to future enhancements.  %
All of the challenges to collaboration discussed in \autoref{cha:sof} certainly apply to
WrightTools, so it is important that we follow best practices now in order to make WrightTools as
maintainable and future-proof as possible.  %
To this end, this section discusses strategies that I have employed in the construction,
maintenance, and distribution of WrightTools.  %

\subsection{Licensing}  % ------------------------------------------------------------------------

As discussed in \autoref{cha:sof}, open source licenses are an important part of scientific
software development.  %
Those writing software must explicitly license their project in order to ensure that users have
basic rights to copy edit and distribute code.  %
WrightTools is licensed under the hugely popular Expat/MIT license.  %
This license is incredibly permissive and puts as few restrictions as possible on the end users.  %
The following is the WrightTools license, reproduced in its entirety.  %

\begin{dquote}

  The MIT License (MIT)

  Copyright (c) 2016-2018 WrightTools Developers.

  Permission is hereby granted, free of charge, to any person obtaining a copy
  of this software and associated documentation files (the "Software"), to deal
  in the Software without restriction, including without limitation the rights
  to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
  copies of the Software, and to permit persons to whom the Software is
  furnished to do so, subject to the following conditions:

  The above copyright notice and this permission notice shall be included in all
  copies or substantial portions of the Software.

  THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT
  NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
  DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

\end{dquote}

As an aside, since Python is an interpreted language the source code of a library \emph{must} be
distributed for that library to work.  %
In this way, ``closed-source Python'' is a kind of oxymoron.  %
However many Python libraries end up being interfaces to compiled code that could in theory be
closed-source.  %
The Scientific Python Stack have MIT-compatible licenses, including BSD-like licenses.  %

\subsection{Version control}  % -------------------------------------------------------------------

As mentioned several times in \autoref{cha:sof}, having software under source control is probably
the most important recommendation in scientific software development.  %
Source control allows developers to create ``checkpoints'' for their software package that can be
returned to again and again.  %
Developers can collaborate together to edit the software by making incremental changes that are
easy to review.  %

WrightTools uses git for source control, and the package is hosted on GitHub
\cite{GitHub}.  %
As of 2018-04-08, WrightTools has 1,018 commits from seven developers, as shown in
\autoref{pro:tab:commits}.  %
In addition to simply hosting the git repository, GitHub gives us issue tracking, continuous
integration, and Zenodo support.  %

The WrightTools package has a developer controlled version as well, following the semantic
versioning convention \cite{SemanticVersioning}.  %
The current distributed version of WrightTools is \bash{3.0.1}, with \bash{3.0.2} under active
development.  %
The wt5 file also has a semantic version, currently \bash{1.0.0}.  %
These attributes can be accessed through python: \python{wt.__version__} and
\python{wt.__wt5_version__}.  %

\begin{table}
  \begin{tabular}{c | c | c | c}
    person & number of commits & lines added & lines removed \\ \hline
    Blaise Thompson & 478 & 621,918 & 507,938 \\ \hline
    Kyle Sunden & 208 & 19,293 & 9,218 \\ \hline
    Darien Morrow & 29 & 1,589 & 127 \\ \hline
    Nathan Neff-Mallon & 20 & 2,880 & 910 \\ \hline
    Kyle Czech & 5 & 1,150 & 25 \\ \hline
    Daniel Kohler & 3 & 113 & 29 \\ \hline
    Rachel Swedin & 1 & 5,197 & 0 \\ \hline
  \end{tabular}
  \caption[Commits to WrightTools.]{
    Commits to WrightTools by individual, ordered by number of commits.
    Note that datasets are included, so lines added and removed are massively inflated.
  }
  \label{pro:tab:commits}
\end{table}

\subsection{Unit tests}  % ------------------------------------------------------------------------

Maintainable code must be tested, so that future developers can use tests to ensure that they do
not break necessary functionality.  %
Unit testing is a very simple testing paradigm in which small, separate tests are written to
address each ``unit'' of the software package.  %
As an example, the following is one of WrightTools' tests:
\begin{codefragment}{python}
# part of WrightTools/tests/data/convert_data.py
def test_wigner():
    p = datasets.COLORS.v2p2_WL_wigner
    data = wt.data.from_COLORS(p)
    assert data.d1.units == 'fs''
    data.convert('ns')
    assert data.d1.units == 'ns'
    assert data.wm.units == 'nm'
    data.close()
\end{codefragment}
This test loads one of the distributed COLORS datasets and makes sure that the \python{convert}
method works as intended.  %
To do this, it uses the \python{assert} statement which raises an exception when a condition is
\python{False}.  %
This particular test is pretty humble, but there is strength in numbers: as of 2018-04-08 there are
224 unit tests within WrightTools.  %
Using the built in \python{pytest} library (\bash{python setup.py test}), a programmer can run all
of the tests and receive a report on what failed and why.  %
If a future programmer unintentionally breaks \python{convert}, the above test will fail and alert
her to the unexpected side effect of her modification.  %

WrightTools uses continuous integration services to run unit tests every time a pull request is
made on GitHub.  %

\subsection{Distribution} \label{pro:sec:distribution}  % ------------------------------------------

WrightTools is on GitHub, which is a fantastic way for developers to get software onto their
computers.  %
But how does software get onto everybody elses machine?  %
Developers call this process ``distribution''.  %
Luckily for us, distribution is fairly simple within the Python ecosystem.  %
The same tools that are used to distribute hugely popular packages like numpy are also available
for anyone else, including WrightTools.  %

The Python Package Index (PyPI) is the official third party software repository for Python.  %
It is community maintained, and supported by the Python Software Foundation and The Python
Packaging Group.  %
As of 2018-04-08 PyPI hosts 134,758 Python packages, all for free.  %
WrightTools is also hosted on PyPI.  %
Every time we change our version, we ``release'' by uploading the newest version to PyPI.
\cite{PyPI}  %
pip (``pip installs packages'', ``pip installs python'', or ``preferred installer program'')
can be used to install packages directly from PyPI:  %
\begin{codefragment}{bash}
pip install WrightTools
\end{codefragment}

Conda is a multilingual package manager that handles virtual environments and dependencies, even
binary dependencies, in a hassle-free way.  %
Since the scientific Python ecosystem has so many non-Python binary dependencies, Conda is a
popular choice---especially on Windows where the necessary compilers are not typically
pre-installed.  %
Unlike pip, conda is not tied to a single repository.  %
There is the official repository, maintained by Anaconda, the company that develops conda.  %
There is also the popular conda-forge repository, which is maintained by the community via
GitHub.  %
WrightTools is on conda-forge:  %
\begin{codefragment}{bash}
conda config --add channels conda-forge
conda install WrightTools
\end{codefragment}

\subsection{Documentation}  % ---------------------------------------------------------------------

WrightTools is a piece of scripted software, and many spectroscopists many not be comfortable with
using such a thing immediately.  %
To this end, it is important to have easy to use, searchable documentation with end-users in
mind.  %

I have built a website for WrightTools documentation at \url{http://wright.tools/}.  %
The documentation is built and hosted by Read the Docs. \cite{ReadTheDocs}.  %
Both master and development versions are built, so users of both can get documentation that is up
to date for them.  %

% \section{Future directions}  % ====================================================================

% Single variable decomposition.  %

% Usage in next-generation simulation packages.  %

% More tests.  %

% Usage by multiple groups.  %