Testing Amazon Web Services Codebase: DynamoDB and S3

When switching to an amazon web services infrastructure, one of the main challenges is testing.

Components such as DynamoDB and S3 come in handy however they come with a cost.
When it comes to continuous integration you will end up spending resources if you use the amazon components.

Some of these components have their clones that are capable of running locally.

You can use DynamoDB locally.

By issuing

java -Djava.library.path=./DynamoDBLocal_lib -jar DynamoDBLocal.jar -sharedDb

you will have a local DynamoDB instance up and running.

Also on http://localhost:8000/shell you have a DynamoDB Shell (based on javascript) which will help you to get started.

In order to connect to the local instance you need to set the endpoint on your DynamoDB client.

On Java

AmazonDynamoDBClient client = new AmazonDynamoDBClient();
client.setEndpoint("http://localhost:8000"); 

On Node.js

var AWS = require('aws-sdk');
var config = {"endpoint":"http://localhost:8000"};
var client = new AWS.DynamoDB(config);

Another base component of Amazon Web Services is the Simple Storage Service (S3).

Luckily we have fake-s3 . Fake-S3 a lightweight server clone of amazon S3, exists.

Installing and running fake-s3 is pretty simple

gem install fakes3
fakes3 -r /mnt/fakes3_root -p 4567

In order to connect you have to specify the endpoint

On Java

AmazonS3 client = new AmazonS3Client();
client.setEndpoint("http://localhost:8000"); 

On Node.js

var AWS = require('aws-sdk');
var config = {"endpoint":"http://localhost:8000"};
var client = new AWS.S3(config);

These tools will come in handy during the development face, especially when you get started and want a simple example. By running them locally you avoid overhead of permissions and configurations that come with each component you upload on amazon.

Async for Node.js

Async module for node.js saves the day when it comes to synchronizing asynchronous tasks, or executing them in a serial manner.

To execute tasks in order, you can use the series method.


var async = require('async');

var somethingAsynchronous = function(callback) { 
    console.log('Called something asynchronous'); 
    callback(); 
};

var somethingElseAsynchronous = function(callback) { 
    console.log('called something else asynchronous'); 
    callback() 
};

async.series([
function(callback) {
  somethingAsynchronous(function(err,result) {
    if(err) {
      callback(err);
    } else {
      callback(result);
    }
  });
},
function(callback) {
  somethingElseAsynchronous(function(err,result) {
    if(err) {
      callback(err);
    } else {
      callback(result);
    }
  });
],function(err,result) {

});

To execute tasks in order and use the results of previous tasks, you have to use the waterfall method.
The last function specified will handle the result of the tasks executed. When an error occurs prior to executing all the specified tasks, then the other tasks will not execute and the last function will handle the error.


var somethingAsynchronous = function(callback) { 
    callback(null,'This is a result'); 
};

var somethingElseAsynchronous = function(firstResult,callback) { 
   callback(null,firstResult+" and this is appended");
};

async.waterfall([
  function (callback){
    somethingAsynchronous(callback);
  },
  function(result,callback) {
    somethingElseAsynchronous(result,callback);
  }
],
function(err,result) {
  console.log('The end result is: '+result);
});

The method parallel is used to execute tasks in parallel and synchronize them after their execution.


var somethingAsynchronous = function(callback) { 
    
    /*
        Asynchronous code
    */
    
    callback(null,'23'); 
};

var somethingElseAsynchronous = function(callback) { 

    /*
        Asynchronous code
    */
    
    callback(null,'sad');
};

async.parallel([
somethingAsynchronous,
somethingElseAsynchronous
],function(err,result){
  console.log('The result is '+result);
});

In case of an array of objects that need to be processed in an asynchronous manner then we can use map.


var items = ['a','b','c','d'];

async.map(items,function(item,callback) {

   callback(null,'Did something asynchronous with '+item);
},
function(err,results){

  results.forEach(function(result) {
      console.log(result);
  });
});

Use Map Reduce for Tf-Idf ranking on a Node.js and MongoDB environment

When developing a document search application one of the challenges is to order your results according to the occurrence of the term that you search for. Tf-Idf is a numerical statistic that assists you in weighing the results of you search.
Tf stands for term frequency.
Idf stands for Inverse document frequency.

To get a grasp we will develop a sample of tf-idf in javascript, as a node module.

function TfIdf() {
}

TfIdf.prototype.weights = function(documents,term) {
    
    var results = []
    
    var idf = this.idf(documents,term)
    
    for(var i=0;i<documents.length;i++) {
        
        var tf = this.tf(documents[i],term)
        var tfidf = tf*idf
        var result = {weight:tfidf,doc:documents[i]}    
        
        results.push(result)
    }

    return results
}

TfIdf.prototype.tf = function(words,term) {

    var result = 0
    
    for(var i=0;i<words.length;i++) {

        var word = words[i]

        if(word.indexOf(term)!=-1) {
            result = result+1
        }    
    }

    return result/words.length
}

TfIdf.prototype.idf = function(documents,term) {
   
    var occurence = 0

    for(var j=0;j<documents.length;j++) {
        
        var doc = documents[j]
        
        if(this.__wordInsideDoc(doc,term)){
            occurence = occurence+1
        }                  
    }

    if(occurence==0) {
        return undefined    
    }

    return Math.log(documents.length/occurence)
}

TfIdf.prototype.__wordInsideDoc = function(doc,term) {
    
    for(var i=0;i<doc.length;i++) {

        var word = doc[i]

        if(word.indexOf(term)!=-1) {
            return true
        }
    }    

    return false
}

module.exports = TfIdf

The function weights will accept the documents and term to search

An example follows

var TfIdf = require('./TfIdf')

var tfIdf = new TfIdf()

var docs = [["latest","sprint"],["lair","laugh","fault"],["lemma","on"]]

console.log(tfIdf.weights(docs,"la"))

The result is

[ { weight: 0.2027325540540822, doc: [ 'latest', 'sprint' ] },
  { weight: 0.27031007207210955,
    doc: [ 'lair', 'laugh', 'fault' ] },
  { weight: 0, doc: [ 'lemma', 'on' ] } ]

Now we shall proceed with the map reduce approach.

I will use node.js

First we will install the mongodb driver

npm install mongodb

Then we will setup our mongo database connection. Once initialized, in case there are no records, we will populate the database for testing purposes.

var MongoClient = require('mongodb').MongoClient
Server = require('mongodb').Server

var url = 'mongodb://localhost:27017/mapreduceexample'

function TfIdfMongo() {
}

TfIdfMongo.prototype.__getConnection = function(callback) {

    var tfIdfMongo = this

    MongoClient.connect(url,function (err, connection) {
        if (err) {
            callback(err)
        } else {

            var documents = connection.collection('documents');

            documents.count({}, function (error, numOfDocs) {
                if (numOfDocs == 0) {
                    tfIdfMongo.__insertTestRecords(connection,function(err) {
                        callback(err,connection)
                    })
                } else {
                    callback(undefined,connection)
                }
            })
        }
    })
}

TfIdfMongo.prototype.__insertTestRecords = function(connection,callback) {

    var documents = connection.collection('documents');

    var latestDocuments = [
        {words:["latest","sprint"]},
        {words:["lair","laugh","fault"]},
        {words:["lemma","on"]}
    ]

    documents.insert(latestDocuments,
        function(err,result) {
            callback(err)
        })
}

This is gonna be a two phase process.
On the first phase we have to calculate the idf.
To do so we will issue a map reduce.
The term variable has to be passed in order to be used by the map reduce process.
In order to use a dynamic variable on map reduce we will employee the scope parameter.

TfIdfMongo.prototype.__idf = function(connection,term,callback) {

    var tfIdfMongo = this

    var documents = connection.collection('documents');

    documents.mapReduce(
        tfIdfMongo.__mapIdf,
        tfIdfMongo.__reduceIdf,
        {
            scope: {permterm:term},
            out: "tfidf_results"
        },
        function(err,results) {

            if(err) {
                callback(err)
            }

            results.findOne({},function(err,result) {

                if(err) {
                    callback(err)
                }

                if(result.value.occurrence==0) {
                    return;
                }

                var idf = Math.log(result.value.count/result.value.occurrence)

                callback(undefined,idf)
            })
        }
    )
}

TfIdfMongo.prototype.__mapIdf = function() {

    var term = permterm

    var occurrence = 0

    for (var i = 0; i < this.words.length; i++) {

        var word = this.words[i]

        if (word.indexOf(term) != -1) {

            if (occurrence <=0 ) {

                occurrence = 1
            }
        }
    }

     emit("idf", occurrence)
}

TfIdfMongo.prototype.__reduceIdf = function(key,values) {

    var result = {count:values.length,occurrence:0}

    for(var i=0;i<values.length;i++) {

        if(values[i]==1) {
            result.occurrence += 1
        }
    }

    return result
}

The result is one number

On the second phase we have to calculate the tf for each document and multiply the result with the idf value calculated prior to this.
Map reduce will be used for this case too.
This time through the scope parameter, we are going to pass the term that we search for but also the idf variable.

TfIdfMongo.prototype.__tf = function(connection,term,idf,callback) {

    var tfIdfMongo = this

    var documents = connection.collection('documents');

    documents.mapReduce(
        tfIdfMongo.__mapTf,
        function(key,values) {

            return values
        },
        {
            scope: {permTerm:term,permIdf:idf},
            out: "tf_results"
        },
        function(err,results) {

            if(err) {
                callback(err)
            }

            results.find({},function(err,docs) {

                if(err) {
                    callback(err)
                }

                docs.toArray(function (err,documents) {
                    callback(err,documents)
                })
            })
        }
    )
}

TfIdfMongo.prototype.__mapTf = function() {

    var term = permTerm
    var idf = permIdf

    var occurrence = 0

    for(var i=0;i<this.words.length;i++) {

        var word = this.words[i]
        if (word.indexOf(term) != -1) {

            occurrence += 1
        }
    }

    var weight = idf*(occurrence/this.words.length)

    emit(this, weight)
}

We will implement the tfIdf function which combines the two previous steps.
The function takes the term that we need to search for as an argument.

var MongoClient = require('mongodb').MongoClient
Server = require('mongodb').Server

var url = 'mongodb://localhost:27017/mapreduceexample'

function TfIdfMongo() {
}

TfIdfMongo.prototype.tfIdf = function(term,callback) {

    var tfIdfMongo = this

    tfIdfMongo.__getConnection(function(err,connection) {

        if(err) {
            callback(err)
        }

        tfIdfMongo.__idf(connection,term,function(err,idf) {

            if(err) {
                callback(err)
            }

            tfIdfMongo.__tf(connection,term,idf,function(err,documents) {

                if(err) {
                    callback(err)
                }

                connection.close()

                callback(undefined,documents)

            })

        })
    })
}

TfIdfMongo.prototype.__getConnection = function(callback) {

    var tfIdfMongo = this

    MongoClient.connect(url,function (err, connection) {
        if (err) {
            callback(err)
        } else {

            var documents = connection.collection('documents');

            documents.count({}, function (error, numOfDocs) {
                if (numOfDocs == 0) {
                    tfIdfMongo.__insertTestRecords(connection,function(err) {
                        callback(err,connection)
                    })
                } else {
                    callback(undefined,connection)
                }
            })
        }
    })
}

TfIdfMongo.prototype.__insertTestRecords = function(connection,callback) {

    var documents = connection.collection('documents');

    var latestDocuments = [
        {words:["latest","sprint"]},
        {words:["lair","laugh","fault"]},
        {words:["lemma","on"]}
    ]

    documents.insert(latestDocuments,
        function(err,result) {
            callback(err)
        })

}

TfIdfMongo.prototype.__tf = function(connection,term,idf,callback) {

    var tfIdfMongo = this

    var documents = connection.collection('documents');

    documents.mapReduce(
        tfIdfMongo.__mapTf,
        function(key,values) {

            return values
        },
        {
            scope: {permTerm:term,permIdf:idf},
            out: "tf_results"
        },
        function(err,results) {

            if(err) {
                callback(err)
            }

            results.find({},function(err,docs) {

                if(err) {
                    callback(err)
                }

                docs.toArray(function (err,documents) {
                    callback(err,documents)
                })
            })
        }
    )
}

TfIdfMongo.prototype.__mapTf = function() {

    var term = permTerm
    var idf = permIdf

    var occurrence = 0

    for(var i=0;i<this.words.length;i++) {

        var word = this.words[i]
        if (word.indexOf(term) != -1) {

            occurrence += 1
        }
    }

    var weight = idf*(occurrence/this.words.length)

    emit(this, weight)
}


TfIdfMongo.prototype.__idf = function(connection,term,callback) {

    var tfIdfMongo = this

    var documents = connection.collection('documents');

    documents.mapReduce(
        tfIdfMongo.__mapIdf,
        tfIdfMongo.__reduceIdf,
        {
            scope: {permterm:term},
            out: "tfidf_results"
        },
        function(err,results) {

            if(err) {
                callback(err)
            }

            results.findOne({},function(err,result) {

                if(err) {
                    callback(err)
                }

                if(result.value.occurrence==0) {
                    return;
                }

                var idf = Math.log(result.value.count/result.value.occurrence)

                callback(undefined,idf)
            })
        }
    )
}

TfIdfMongo.prototype.__mapIdf = function() {

    var term = permterm

    var occurrence = 0

    for (var i = 0; i < this.words.length; i++) {

        var word = this.words[i]

        if (word.indexOf(term) != -1) {

            if (occurrence <=0 ) {

                occurrence = 1
            }
        }
    }

     emit(this.__id, occurrence)
}

TfIdfMongo.prototype.__reduceIdf = function(key,values) {

    var result = {count:values.length,occurrence:0}

    for(var i=0;i<values.length;i++) {

        if(values[i]==1) {
            result.occurrence += 1
        }
    }

    return result
}



module.exports = TfIdfMongo

Our test show case

var TfIdf = require('./TfIdf')
var TfIdfMongo = require('./TfIdfMongo')

var tfIdf = new TfIdf()

var docs = [["latest","sprint"],["lair","laugh","fault"],["lemma","on"]]


console.log("The results are "+JSON.stringify(tfIdf.tfIdf(docs,"la")))

var tfIdfMongo = new TfIdfMongo()

tfIdfMongo.tfIdf("la",function(err,results) {


    console.log("The results are "+JSON.stringify(results))

})

And we get the same results for both cases.

The results are [{"weight":0.2027325540540822,"doc":["latest","sprint"]},{"weight":0.27031007207210955,"doc":["lair","laugh","fault"]},{"weight":0,"doc":["lemma","on"]}]
The results are [{"_id":{"_id":"55f46602947446bb1a7f7933","words":["latest","sprint"]},"value":0.2027325540540822},{"_id":{"_id":"55f46602947446bb1a7f7934","words":["lair","laugh","fault"]},"value":0.27031007207210955},{"_id":{"_id":"55f46602947446bb1a7f7935","words":["lemma","on"]},"value":0}]

Why Should I use map reduce for this problem?

The tf-idf ranking problem, is a problem which includes computations, that can be parallelised.
The sequential approach could be an option for other environments but for Node.js there are many drawbacks.
Node.js is a single threaded environment, it was not designed for heavy computational tasks.
Its magic has to do with how good it executes I/O operations.
Consider the scenario of a large data set problem.
While the Node.js process would be executing the time consuming computations, the requests issued won’t be able to be executed appropriately.
However there are some workarounds for solutions based on Node.js, such as spawning extra nodes and implement a way of communication between them.

To sum up

Map reduce fits well to the ranking problem. Not only it takes away much of the computational overhead but also from the implementation overhead.

Integrate Redis to a Node.js project

On this article we are going to add caching to our Node.js application using Redis.

We will install the recommended client for node.js as mentioned on the official Redis page.

npm install redis --save

Next we shall create our client connection

var redis = require('redis')

var hostname = '127.0.0.1'
var port = '6379'
var password = 'yourpassword'

var client = redis.createClient(port,hostname,{no_ready_check: true})

client.auth(password)

client.on('connect', function() {
        console.log('Client was connected')
})

The password provided on auth will be stashed and used on every connect.

Let’s see some basic actions

SMEMBERS actions


client.sadd('aset', 2)
client.sadd('aset', 1)
client.sadd('aset', 5)

client.smembers('aset',function(err,reply) {
    console.log(reply)
})

Get and Set actions

client.set('akey', "This is the value")

client.get('akey',function(err,reply) {
    console.log(reply)
})

Hash value actions

client.hset('hashone', 'fieldone', 'some value');
client.hset('hashone', 'fieldtwo', 'another value');

var hash = 'hashone'

client.hkeys(hash, function (err, fields) {

    fields.forEach(function(field,i) {

        console.log('The field is '+field)

        client.hget(hash,field,function (err, value) {
            console.log('The content is '+value)
        })
    })

});

List actions

client.rpush(['mylist', 'firstItem', 'secondItem'], function(err, listsize) {
    console.log(listsize)
});

client.lrange('mylist',0,-1,function(err,values) {
    console.log(values)
})

client.lpop('mylist',function(err,value) {
    console.log('Got '+value)
})

 

Conclusion

The Redis client for Node.js is pretty straightforward and easy to get started.

Keep in mind that one connection is adequate. Redis is single threaded therefore there is no need in opening multiple connections.

You can refer to the github page for more examples and usage showcases.

Why I use Node.js

It has been a while since I took up Node.js development.
My early impressions were pretty positive and after some months of Node.js development I have to say that I am amazed.

There are many reasons to continue using Node.js for my projects.

Great for applications with heavy I/O

The asynchronous nature of Node.js enables you to stay focused on your implementation. You don’t need to proceed to any extra configurations as you do with multithreaded environments. Also long I/O operations don’t have to be dispatched to any custom mechanisms, enabling you to avoid any extra costs on development.
Provided your application is mostly based on I/O and less on computation, chances are that Node.js will work for you.

Bootstrapping

Node.js is one of the most bootstrapping experiences that I had with a programming environment. All you need is to have node and npm installed. There are libraries for almost everything you need and the configurations needed are minimal.
Also getting started with the implementation of your Node.js extension takes you no time at all.

Set up Simplicity

All it takes to setup your project is your source code and a package.json file with your dependencies.

Make use of your Javascript skills

Although a backend developer I had to write some javascript in the past. The same applies to other developers I know, even to the most backend focused. Learning a language is an investment.You can make more out of your javascript knowledge, by using Node.js on your projects provided it suits to their needs.

Not another web framework

Node.js is not another web application framework. Due to It’s asynchronous nature and efficiency it can be applied to many problems. For example it can be used as a glue among components of your infrastructure. Also due to heavy development you don’t just have a runtime environment, you have a whole ecosystem with tools that apply to a wide variety of problems.

Conclusion

Node.js is already a part of the tools that I use on a daily basis. However it should be used wise and make sure that it fits your project’s nature.
It is really challenging to deal with the callback hell, but in exchange you get a pretty promising and fast growing ecosystem.

Mocha and Node.js

Mocha is my choice of testing framework when it comes to node.js.

We should add it as a dependency on package.json

  "dependencies": {
    "mocha": "~2.2.4",
  }

Our test would be mocha-example-test.js

var assert = require('assert');

describe('Mocha Example',function(){
    it('Plain Check',function() {
        assert(true)
    })
})

We can handle asynchronous code with the done callback

var http = require('http')

it('Asynchronous Example with http connection',function(done) {
    var request = http.request({host:"www.google.com",path:"/"},function(result) {
        result.on('end',function() {
            done()
        })    
    })

    request.on('error',function(e) {
        done(e)
    })
 
    request.end()
})

Also we can issue action before each test executes or before the execution of the test suite

For example we open a Mongodb connection before test cases being executed.


var MongoClient = require('mongodb').MongoClient

var connection

before(function(done) {
    MongoClient.connect(url,function(err,db) {

        if(err) {
            done(err)
        } else {
            connection = db
            done()
        }
    })
})

Before each test case we set up the mongodb collection that will be used.

var collection
beforeEach(function() {
    collection = connection.collection('example')
})

In order to run a mocha test we just issue

./node_modules/mocha/bin/mocha mocha-example-test.js

Set up Jenkins for Node.js

Jenkis is a great ci server with many options and plugins.
With a bit of time and configuration we can use Jenkins for our Node.js applications.

First of all the Node.js plugin on Jenkins enables us to run node.js scripts as a build step.

Screen Shot 2015-06-20 at 3.10.17 PM

In order to proceeed executing various node.js tasks we can use shell build step.

The next step is to install packages using npm

Screen Shot 2015-06-20 at 3.16.59 PM

The next problem is how to keep our application up and running.

To achieve this we can use forever.

Just add the latest version as a dependency inside your package.json

"dependencies": {
"forever": "0.14.1"
}

Then in the same build step we should stop any already running applications.

./node_modules/forever/bin/forever stopall

And by calling forever with BUILD_ID=dontKillMe in front of it we will ensure that Jenkins would not kill our node.js process running, which is most probably an application listening to a port.

BUILD_ID=dontKillMe ./node_modules/forever/bin/forever start bin/www

 

Screen Shot 2015-06-20 at 3.23.46 PM

For unit testing I prefer the mocha framework.
xunit-file is a great plugin that will transform the results of mocha into a JUnit result file. Therefore we will add the mocha and xunit-file dependencies.

"dependencies": {
"mocha": "~2.2.4",
"xunit-file":"0.0.6"
}

As a build step we should setup

./node_modules/.bin/mocha --recursive --reporter xunit-file

 

Screen Shot 2015-06-20 at 3.31.53 PM 1

The xunit file produced on the workspace would be used from Jenkins in order to produce a JUnit report.

Therefore we should add a post build action to Jenkins that will publish the JUnit results.

Screen Shot 2015-06-20 at 3.33.31 PM

Liang’s hyphenation algorithm implementation in node.js

Currently I am involved in a project that requires some string hyphenation. Initially I took a shot by using Liang’s hyphenation algorithm (taken from here liang hyphenation in python).
However it could not match my needs therefore I had to switch to the one open-office uses.

Anyway here it is the javascript implementation as a node.js module.


function LiangHyphenator(patterns) {

    this.tree = {}
    this.patterns = patterns

    for(var i= 0;i<patterns.length;i++) {
        var pattern = patterns[i]

        this.__insertPattern(pattern)
    }

}

LiangHyphenator.prototype.__insertPattern = function(pattern) {

    var chars = this.__clearPattern(pattern)
    var points = this.__createPoints(pattern)

    this.__addPatternToTree(points,chars)
}

LiangHyphenator.prototype.__clearPattern = function(pattern) {
    var numericsExpression = new RegExp('[0-9]','g')
    return pattern.replace(numericsExpression,'')
}

LiangHyphenator.prototype.__createPoints = function(pattern) {

    var charExpression = new RegExp('[.a-z]','g')
    var splitted = pattern.split(charExpression)

    for(var i= 0;i<splitted.length;i++) {
        if(splitted[i]==='') {
            splitted[i]=0
        } else {
            splitted[i] = parseInt(splitted[i])
        }
    }

    return splitted
}

LiangHyphenator.prototype.__addPatternToTree = function(points,chars) {
    var tree = this.tree
    for(var i=0;i<chars.length;i++) {

        var c = chars[i]
        if(!tree[c]) {
            tree[c] = {}
        }
        tree = tree[c]

    }


    tree['None'] = points
}

LiangHyphenator.prototype.hyphenateWord = function(word) {
    if(word.length<=4) {
        return [word]
    }

    var work = '.'+word.toLowerCase()+'.'

    var points = this.__createZeroArray(work.length+1)

    var tree = {}

    for(var j=0;j<work.length;j++) {

        var restWord = work.slice(j)
        tree = this.tree

        for(var i=0;i<restWord.length;i++) {
            var char = restWord[i]
            if(tree[char]) {
                tree = tree[char]
                if(tree['None']) {
                    var p = tree['None']
                    for(var pi=0;pi< p.length;pi++) {
                        points[pi+j] = Math.max(points[pi+j],p[pi])
                    }
                }
            } else {
                break
            }
        }
    }

    points[1] = 0
    points[2] = 0
    points[points.length-2] = 0
    points[points.length-3] = 0

    var pieces = ['']
    var zipped = this.__zip([word.split(''),points.slice(2)])

    for(var i=0;i<zipped.length;i++) {
        var c = zipped[i][0]
        var p = zipped[i][1]

        pieces[pieces.length-1] += c

        if(p%2!=0) {
            pieces.push('')
        }
    }

    return pieces

}

LiangHyphenator.prototype.__createZeroArray = function(size) {

    zeroArray = []

    for(var i=0;i<size;i++) {
        zeroArray.push(0)
    }

    return zeroArray
}

LiangHyphenator.prototype.__zip = function (arrays) {
    var serial = Array.apply(null,Array(arrays[0].length)).map(function(_,i){
        return arrays.map(function(array){return array[i]})
    });

    return serial
}


module.exports = LiangHyphenator

Make objects with node.js

Node.js is definitely awesome, especially on an i/o heavy application.
Being a Java developer, my first take was on making a class.
ResultExtractor.js would be the file used to define a module.

function ResultExtractor(wordToMatch) {
    this.wordToMatch = wordToMatch
}

ResultExtractor.prototype.filterRecords = function(jsonResults) {
    var filteredRecords = []

    for(var i=0;i<jsonResults.length;i++) {
        var jsonResult = jsonResults[i]
        if(this.recordContains(jsonResult))
            filteredRecords.push(jsonResult)
    }

    return filteredRecords
}

ResultExtractor.prototype.recordContains = function(jsonResult) {
    return jsonResult['word'].indexOf(this.wordToMatch)!=-1;
}

module.exports = ResultExtractor

On your other file that imports the module you can use the ResultExtractor class


var ResultExtractor = require('./ResultExtractor')

var resultExtractor = new ResultExtractor('whatever')
var testData = [{'word':'whatever'},{'word':'where'},{'word':'when'}]
var filteredData = resultExtractor.filterRecords(testData)

console.log(filteredData)

That’s it!!! More post will follow, node.js seems really promising.

Make a simple jQuery plugin

Let’s say you want to implement a textarea that has a logging nature (this is just an example,of course there are real tools out there for javascript logging 😛 )

You can develop a jquery plugin.
For example

(function( $ ) {
  
    $.fn.AreaLogger = function() {
      
        //assign our element to a variable
        var mytextarea = $(this);
        
        if(this.is("textarea")==false) {
            throw { 
                name:        "Jquery Plugin Error", 
                level:       "Logger Initializaition", 
                message:     "Wrong html Element, please give a textarea.", 
                htmlMessage: "Wrong html Element, please give a textarea." 
            }
        }

        //returning a 'logger' element 
        return {
            info: function(message) {
                mytextarea.val(mytextarea.val()+"INFO "+new Date().toString("dddd, mmmm dS, yyyy, h:MM:ss TT")+": "+message+"\n");
            }
        };
    };
})( jQuery );

On your html code you can just write

<!DOCTYPE html>
<html>
    <head>
        <meta http-equiv="Content-Type" content="text/html; charset=UTF-8">
        <title>Jquery Plugin Test</title>
        <script src="jquery.min.js">
        </script>        
        <script src="ArreaLogger.js"></script>
        <script>
            $(document).ready(function() {            
                
                var areaLogger = $("#logarea").AreaLogger();
                    
                $("#logtest").click(function() {
                    areaLogger.info($("#test").val());
                });
            });
        </script>
    </head>
    <body>
        <input id="test"/>
        </br>
        <textarea id="logarea"></textarea>
        <div id="wut"></div>
        </br>
        <button id="logtest">Test</button>
    </body>
</html>

You can download jquery from here