#include
#include
QFile file(fileName);
if( !file.open(QIODevice::ReadOnly | QIODevice::Text) ) return;
while(!file.atEnd()) { QString line = file.readLine().trimmed(); }
File
System.out.println(new SimpleDateFormat("yyyy-MM-dd HH:mm:ss").format(new Date(Long.parseLong(epochDateInMilliseconds))));
$ wget http://kmkeen.com/jshon/jshon.tar.gz
$ wget http://www.digip.org/jansson/releases/jansson-2.4.tar.gz
$ install jansson first
$ cd jshon-xx
$ vim Makefile
$ add -I/path/to/jansson/include to CFLAGS
$ add -L/path/to/jansson/lib to LFLAGS
$ make and copy jshon to your bin folder
$ export PATH=$PATH:/path/to/jshon/bin
$ export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/path/to/jansson/lib
# examples/document can be found here: http://kmkeen.com/jshon/cat jsonFileName | jshon -k # show keys
\newcounter
# Click Here to View the Original References
% use Latex to create a simple csv file for test1 \documentclass{minimal}
2 \begin{filecontents*}{scientists.csv}
3 name,surname,age
4 Albert,Einstein,133
5 Marie,Curie,145
6 Thomas,Edison,165
7 \end{filecontents*}
% Use package csvsimple to show a simple table1 \documentclass{article}
2 \usepackage{csvsimple}
3 \begin{document}
4 \csvautotabular{scientists.csv}
5 \end{document}
$ mysql -h ipOrHostName -P port -u userName -p databaseName
mysql> SELECT field1,field2 FROM table1 INTO OUTFILE 'some.csv' FIELDS TERMINATED BY ',' ENCLOSED BY '"' LINES TERMINATED BY '\n';
# - store the file on the client$ mysql -h ipOrHostName -P port -u userName -p databaseName -e "SELECT field1,field2 FROM table1" > some.csv
$ mysql -u root -p
mysql> CREATE DATABASE mydatabasename;
mysql> CREATE USER myuser IDENTIFIED BY 'mypassword';
mysql> GRANT ALL ON mydatabasename.* TO myuser;
mysql> USE mysql;
mysql> SHOW TABLES;
mysql> DESC user;
mysql> SELECT host,user FROM user;
mysql> UPDATE user SET host='%' WHERE user='root' and host='somehost';
mysql> FLUSH PRIVILEGES;
open(FILE, "<", $fileName) or die "$!";
while(<FILE>)
{
chomp;
my$line=$_;
}
close(FILE);
> inputData=read.csv(fileName, header='T', split=',')
> names(inputData)
> t(matrixX)
> write.csv(tableX, file=c("fileName.csv"), quote=FALSE)
> sprintf("%.2f", 0.250199129003)
[1] 0.25
> m <- list()
> m1 <- c(1,2,3)
> m[[length(m)+1]] <- m1
> m2 <- c(5,6,7,8,9)
> m[[length(m)+1]] <- m2
> names(m) <- c("m1Name", "m2Name")
# when use the list, e.g., m1 or m2> m1 <- m[["m1Name"]]
# Click Here to View the Original References
> length(object) # number of elements or components
> str(object) # structure of an object
> class(object) # class or type of an object
> names(object) # names
> c(object,object,...) # combine objects into a vector
> cbind(object, object, ...) # combine objects as columns
> rbind(object, object, ...) # combine objects as rows
> object # prints the object
> ls() # list current objects
> rm(object) # delete an object
> newobject <- edit(object) # edit copy and save as newobject
> fix(object) # edit in place
# Click Here to View the Original References
> a=c("a_b", "c-d")
> gsub("[_-]", "", a)
[1] "ab", "cd"
$ sed s/oldString/newString/g
$ sed -n 'lineNumber,1p' fileName
awk '{sum+=$1} END {print sum}' myNumberFile
$ date +%s
$ 1354229900
$ date +%s -d "September 18, 2012"
$ 1347940800
$ convert epoch to date$ date +"%Y-%m-%d %H:%M:%S" -d @1354229900
$ 2012-11-29 17:58:20
$ find someDir | cpio -ov > someDir.cpio
$ 2. Restore(copy-in mode)$ cpio -idv < someDir.cpio
$ 3. Restore files matching some pattern(copy-in mode)$ cpio -idv "*fileNamePattern*"< someDir.cpio
$ 4. List files(output format like ls)$ cpio -it < someDir.cpio
$ cpio -it "*fileNamePattern*"< someDir.cpio
$ 5. List files(output format like ls -l)$ cpio -itv < someDir.cpio
$ cpio -itv "*fileNamePattern*"< someDir.cpio
$ for (( c=1; c<=5; c++ ))
$ do
$ echo "Welcome $c times"
$ done
# Click Here to View the Original References
$ array=( one two three )
$ printf "%s\n" "${array[@]}"
$ for i in "${arrayName[@]}"
$ do
$ echo i
$ done
$ echo $(( ( $RANDOM % MaximumNumberToGenerate ) + 1 ))
$ uniq fileName
$ for line in $(< fileName)
$ do
$ echo $line
$ done
$ read a file line by line with white space(not efficiency)$ FILENAME=$1
$ nbLines=`wc -l < $FILENAME`
$ count=0
$ while [ $count -lt $nbLines ]
$ do
$ let count++
$ line=`head -n $count $FILENAME | tail -1`
$ echo "$count: $line"
$ done
$ if [ $var -lt 1000 ]; then
$ echo "$var is < 1000."
$ fi
$ find someDir -type f -name '*.cpp'
$ find someDir -type f -name '*.cpp' -o -name '*.h'
$ find someDir -type f -name '*.cpp' -o -name '*.h' -o -name '*.java'
$ cut -d , -f 1,2,3 csvFile
$ paste -d , <(cut -d , -f 1 csvFile1) <(cut -d , -f 2,5 csvFile2) > mergedCSVFile
$ sort -t , -k 2,2n -k 1,1n csvFile > csvFile_sortedByField2ThenField1
$ yes $string | head -n N
$ cut keyValuePairFileName -d , -f 1 | sort | uniq | sed 's/\(.*\)/echo "\1,`grep "^\1," keyValuePairFileName \| wc -l`"/g' | sh
$ OLDIFS=$IFS
$ IFS=$(echo -en '\n\b')
$ for line in $lines
$ do
$ execute command based on $line here
$ done
$ dateList=`grep ^date cvsLogFileName | sed 's/date:\s\+\([^;]\+\);.*/\1/g' | xargs -L1 -I {} date +%s -d "{}" | sort -n | uniq`
$ firstDate=`echo "$dateList" | head -n 1`
$ lastDate=`echo "$dateList" | tail -n 1`
$ und add @fileList database.udb
$ und analyze database.udb
$ export metrics$ und metrics database.udb
$ export dependencies (file level)$ und export -dependencies file matrix dependencyMatrix.csv database.udb
$ export dependencies (class level)$ und export -dependencies class matrix dependencyMatrix.csv database.udb
set tabstop=4
set sw=4
set sts=4
set number
set smarttab
set smartindent
:colorscheme relaxedgreen
$ download it from here.
$ mkdir -p ~/.vim/colors (if not exist)
$ copy relaxedgreen.vim to ~/.vim/colors
Odds Ratio
Hazard rate analysis
http://www.graphpad.com/support/faqid/1790
http://www.uccs.edu/lbecker/effect-size.html