Thursday, August 16, 2012

WCF : http vs https configuration

http vs https configuration

HTTP Web.config settings.

<system.serviceModel>
    <services>
      <service name="MyService.ServiceImplementation.MyServiceCall" behaviorConfiguration="MyService.ServiceImplementation.MyServiceCall_Behavior">
        <!-- Service Endpoints -->
          <endpoint address="" binding="basicHttpBinding" bindingConfiguration="basicHttpBindingConfig" contract="MyService.ServiceContract.IMyService" bindingNamespace="http://www.MyService.com/">
          <!--
          <endpoint address="" binding="basicHttpBinding" contract="MyService.ServiceContract.IMyService" bindingNamespace="http://www.MyService.com/">
              Upon deployment, the following identity element should be removed or replaced to reflect the
              identity under which the deployed service runs.  If removed, WCF will infer an appropriate identity
              automatically.
          -->
                    <identity>
                        <dns value="localhost"/>
                    </identity>
                </endpoint>
                <endpoint address="mex" binding="mexHttpBinding" contract="IMetadataExchange"/>
            </service>
        </services>
        <bindings>
        <wsHttpBinding>
            <binding name="wsHttpBinding" closeTimeout="00:20:00" openTimeout="00:20:00" receiveTimeout="00:20:00" sendTimeout="00:20:00" messageEncoding="Mtom">
                <readerQuotas maxDepth="2147483647" maxStringContentLength="2147483647" maxArrayLength="2147483647" maxBytesPerRead="2147483647" maxNameTableCharCount="2147483647"/>
                <!--<httpTransport maxBufferPoolSize="2147483647" maxReceivedMessageSize="2147483647" authenticationScheme="Anonymous" maxBufferSize="2147483647" transferMode="Buffered"/>-->
            </binding>
           
        </wsHttpBinding>
            <basicHttpBinding>
                <binding name="basicHttpBindingConfig" closeTimeout="00:20:00" openTimeout="00:20:00" receiveTimeout="00:20:00" sendTimeout="00:20:00" transferMode="StreamedResponse" maxReceivedMessageSize="2147483647" maxBufferSize="2147483647" >
                    <readerQuotas  maxDepth="2147483647" maxStringContentLength="2147483647" maxArrayLength="2147483647" maxBytesPerRead="2147483647" maxNameTableCharCount="2147483647"/>
                    <!--<httpTransport maxBufferPoolSize="2147483647" maxReceivedMessageSize="2147483647"  maxBufferSize="2147483647" transferMode="StreamedResponse"/>-->
                </binding>
            </basicHttpBinding>
        </bindings>
     
        <behaviors>
            <serviceBehaviors>
                <behavior name="MyService.ServiceImplementation.MyServiceCall_Behavior">
                    <!-- To avoid disclosing metadata information, set the value below to false and remove the metadata endpoint above before deployment -->
                    <serviceMetadata httpGetEnabled="true"/>
                    <!-- To receive exception details in faults for debugging purposes, set the value below to true.  Set to false before deployment to avoid disclosing exception information -->
                    <serviceDebug includeExceptionDetailInFaults="true"/>
                </behavior>
            </serviceBehaviors>
        </behaviors>
    </system.serviceModel>


HTTPS Web.config settings.


 <system.serviceModel>
    <!-- We have multiple host headers on our server -->
    <serviceHostingEnvironment multipleSiteBindingsEnabled="true">
    </serviceHostingEnvironment>
  
<bindings>
      <basicHttpBinding>
        <binding name="SecureTransport" >
          <security mode="Transport">
            <transport clientCredentialType="None"/>
          </security>
        </binding>

   
    <binding name="basicHttpBindingConfigMTOM" closeTimeout="00:20:00" openTimeout="00:20:00" receiveTimeout="00:20:00" sendTimeout="00:20:00" messageEncoding="Mtom" >
<security mode="Transport">
            <transport clientCredentialType="None"/>
          </security>

        <readerQuotas maxDepth="2147483647" maxStringContentLength="2147483647" maxArrayLength="2147483647" maxBytesPerRead="2147483647" maxNameTableCharCount="2147483647"/>
    </binding>
    <binding name="basicHttpBindingConfigTEXT" closeTimeout="00:20:00" openTimeout="00:20:00" receiveTimeout="00:20:00" sendTimeout="00:20:00" messageEncoding="Text" transferMode="StreamedResponse">
<security mode="Transport">
            <transport clientCredentialType="None"/>
          </security>
        <readerQuotas maxDepth="2147483647" maxStringContentLength="2147483647" maxArrayLength="2147483647" maxBytesPerRead="2147483647" maxNameTableCharCount="2147483647"/>               
    </binding>
      </basicHttpBinding>
    </bindings>


    <services>
      <service name="MyService.ServiceImplementation.MyServiceCall" behaviorConfiguration="MyService.ServiceImplementation.MyServiceCall_Behavior">
        <!-- Service Endpoints -->

        <endpoint address="" bindingConfiguration="basicHttpBindingConfigMTOM" binding="basicHttpBinding" contract="MyService.ServiceContract.IMyService" bindingNamespace="http://www.MyService.com" >

          <!--<endpoint address="Reporting/MyService.svc" binding="basicHttpBinding" contract="MyService.ServiceContract.IMyService">
Make the endpoint relative
                -->

          <!--
              Upon deployment, the following identity element should be removed or replaced to reflect the
              identity under which the deployed service runs.  If removed, WCF will infer an appropriate identity
              automatically.
          -->
          <identity>

            <dns value="test.MyService.com" />
          </identity>
        </endpoint>
        <endpoint address="mex" binding="mexHttpBinding" contract="IMetadataExchange" />
      </service>
    </services>


    <behaviors>
      <serviceBehaviors>
        <behavior name="MyService.ServiceImplementation.MyServiceCall_Behavior">
          <!-- To avoid disclosing metadata information, set the value below to false and remove the metadata endpoint above before deployment -->
          <serviceMetadata httpGetEnabled="false" httpsGetEnabled="True" httpsGetUrl=""/>
          <!-- To receive exception  details in faults for debugging purposes, set the value below to true.  Set to false before deployment to avoid disclosing exception information -->
          <serviceDebug includeExceptionDetailInFaults="true" />
        </behavior>
      </serviceBehaviors>
    </behaviors>

  </system.serviceModel>

Monday, May 14, 2012

SQL SERVER: DB backup Script

--EXEC BackupDBUtility @Path = 'D:\DBBackup\My\', @dbName = 'Test1'

Create proc BackupDBUtility(@Path VARCHAR(100), @DBName VARCHAR(20))
AS
begin
SET NOCOUNT ON

DECLARE @Now CHAR(14) -- current date in the form of yyyymmddhhmmss
DECLARE @Result INT -- stores the result of the dir DOS command
DECLARE @RowCnt INT -- stores @@ROWCOUNT
DECLARE @filename VARCHAR(200) -- stores the path and file name of the BAK file
DECLARE @cmd SYSNAME -- stores the dynamically created DOS command
declare @Sql varchar(4000)

-- Get the current date using style 120, remove all dashes, spaces, and colons
 SELECT @Now = REPLACE(REPLACE(REPLACE(CONVERT(VARCHAR(50), GETDATE(), 120), '-', ''), ' ', ''), ':', '')

 -- Build the .BAK path and file name
 SELECT @filename = @Path + @DBName + '\' + @DBName + '_' + @Now + '.BAK'
 -- Build the dir command that will check to see if the directory exists
 SELECT @Path =  @Path + @DBName

 --create the folder Path
 DECLARE @Dir TABLE (subdirectory nvarchar(255), depth INT)
 INSERT INTO @Dir(subdirectory, depth)
 EXEC master.sys.xp_dirtree @Path

 IF NOT EXISTS (SELECT 1 FROM @Dir WHERE subdirectory = @Path)
 EXEC master.dbo.xp_create_subdir @Path
 --Take backup
BACKUP DATABASE @DBName TO DISK = @filename

end

SQL SERVER : Enable xp_cmdshell

sp_configure 'xp_cmdshell', '1' 
reconfigure
GO

sp_configure 'xp_cmdshell'
GO

ASP.NET : Locking Objects

These limit the number of threads that can perform some activity or execute a section of code at a time. Exclusive locking constructs are most common — these allow just one thread in at a time, and allow competing threads to access common data without interfering with each other. The standard exclusive locking constructs are lock (Monitor.Enter/Monitor.Exit), Mutex, and SpinLock. The nonexclusive locking constructs are Semaphore, SemaphoreSlim, and the reader/writer locks.

lock(obj)
  • is a CLR construct that for (intra-object?) thread synchronization. Ensures that only one thread can take ownership of the object's lock & enter the locked block of code. Other threads must wait till the current owner relinquishes the lock by exiting the block of code. Also it is recommended that you lock on a private member object of your class.
Monitors
  • lock(obj) is implemented internally using a Monitor. You should prefer lock(obj) because it prevents you from goofing up like forgetting the cleanup procedure. It 'idiot-proof's the Monitor construct if you will.
    Using Monitor is generally preferred over mutexes, because monitors were designed specifically for the .NET Framework and therefore make better use of resources.
Using a lock or monitor is useful for preventing the simultaneous execution of thread-sensitive blocks of code, but these constructs do not allow one thread to communicate an event to another. This requires synchronization events, which are objects that have one of two states, signaled and un-signaled, that can be used to activate and suspend threads. Mutex, Semaphores are OS-level concepts. e.g with a named mutex you could synchronize across multiple (managed) exes (ensuring that only one instance of your application is running on the machine.)

Mutex:
  • Unlike monitors, however, a mutex can be used to synchronize threads across processes. When used for inter-process synchronization, a mutex is called a named mutex because it is to be used in another application, and therefore it cannot be shared by means of a global or static variable. It must be given a name so that both applications can access the same mutex object. In contrast, the Mutex class is a wrapper to a Win32 construct. While it is more powerful than a monitor, a mutex requires interop transitions that are more computationally expensive than those required by the Monitor class.
  • Use the Semaphore class to control access to a pool of resources. Threads enter the semaphore by calling the WaitOne method, which is inherited from the WaitHandle class, and release the semaphore by calling the Release method. The count on a semaphore is decremented each time a thread enters the semaphore, and incremented when a thread releases the semaphore. When the count is zero, subsequent requests block until other threads release the semaphore. When all threads have released the semaphore, the count is at the maximum value specified when the semaphore was created. A thread can enter the semaphore multiple times..The Semaphore class does not enforce thread identity on WaitOne or Release.. programmers responsibility to not muck up. Semaphores are of two types: local semaphores and named system semaphores. If you create a Semaphore object using a constructor that accepts a name, it is associated with an operating-system semaphore of that name. Named system semaphores are visible throughout the operating system, and can be used to synchronize the activities of processes. A local semaphore exists only within your process. It can be used by any thread in your process that has a reference to the local Semaphore object. Each Semaphore object is a separate local semaphore.
 Reference:
 

Thursday, May 10, 2012

UnitTest : Moq

What?

Moq (pronounced "Mock-you" or just "Mock") is the only mocking library for .NET developed from scratch to take full advantage of .NET 3.5 (i.e. Linq expression trees) and C# 3.0 features (i.e. lambda expressions) that make it the most productive, type-safe and refactoring-friendly mocking library available. And it supports mocking interfaces as well as classes. Its API is extremely simple and straightforward, and doesn't require any prior knowledge or experience with mocking concepts.

 Checkout the QuickStart for more examples!

 

Features at a glance

Moq offers the following features:
  • Strong-typed: no strings for expectations, no object-typed return values or constraints
  • Unsurpassed VS intellisense integration: everything supports full VS intellisense, from setting expectations, to specifying method call arguments, return values, etc.
  • No Record/Replay idioms to learn. Just construct your mock, set it up, use it and optionally verify calls to it (you may not verify mocks when they act as stubs only, or when you are doing more classic state-based testing by checking returned values from the object under test)
  • VERY low learning curve as a consequence of the previous three points. For the most part, you don't even need to ever read the documentation.
  • Granular control over mock behavior with a simple MockBehavior enumeration (no need to learn what's the theoretical difference between a mock, a stub, a fake, a dynamic mock, etc.)
  • Mock both interfaces and classes
  • Override expectations: can set default expectations in a fixture setup, and override as needed on tests
  • Pass constructor arguments for mocked classes
  • Intercept and raise events on mocks
  • Intuitive support for out/ref arguments 

Installing and Setting Up Moq

Moq is maintained as a Google Code project. You can download the Moq binaries and API documentation here:
http://code.google.com/p/moq/
After you download the Moq binaries from Google Code, make sure that you unblock the archive by right-clicking the file, selecting Properties, and pushing the Unblock button (see Figure 1). If you forget to do this, you run into confusing security issues when you try to use Moq within Visual Studio.
clip_image002
The archive includes an assembly named Moq.dll. You need to add this assembly to your test project in Visual Studio. You also need to add the Moq namespace to your test classes.

What Can Be Mocked?

You can use Moq to create mocks from both interfaces and existing classes. There are some requirements on the classes. The class can’t be sealed. Furthermore, the method being mocked must be marked as virtual. You cannot mock static methods (use the adaptor pattern to mock a static method).
These limitations are the same as the limitations you face when working with Rhino Mocks. Both Moq and Rhino Mocks use proxy classes under the covers. Indeed, both frameworks derive from the same Castle DynamicProxy code base.

Mocking Methods and Properties

Imagine that you are building a database-driven web application. Imagine, for example, that you are creating an online store. You want to concentrate on writing all of the business logic for your store before doing anything else. In particular, you don’t want to devote any time to writing your data access components before writing your business components.
This is a good situation to take advantage of a Mock Object Framework. In this situation, you can create an interface that describes how you want your data access component to look. Then, you can simply mock the interface and take advantage of the mock while testing your business logic. The mock enables you to avoid writing code until you are really ready to do it.

UnitTest : Mock Framework

One way of thinking about an object-oriented program is that each object is an entity that interacts with other objects (which we will call 'collaborators') in order to do its work. This is illustrated in the following diagram:
MockObjects1
The idea behind Mock Objects is this: when unit testing an object, you replace its collaborators with 'Mock' collaborators. This is illustrated below:
MockObjects2
So how is this different from stubs? Well, whilst there are some similarities, there are also two important distinctions:
  1. Mock Objects test themselves - ie, they check that they have been called in the right manner at the right time by the object being tested. Stubs generally just return stubbed data, which can be configured to change depending on how they are called.
  2. Mock Objects are generally extremely lightweight. There are often totally different instances of them set up for every test. Stubs, on the otherhand, are relatively heavyweight and are often reused between tests.

Design Implications

It's important to realise that, depending on the degree to which you use them, Mock Objects have two major implications for your software design. These are:

Everything by Interfaces

Most inter-object interaction must be done via interfaces. This is so that either real or mock implementations can be passed to objects being tested. Another way of thinking about this is that you essentially have to introduce an extra layer of indirection between an object and any collaborator that you want to mock. This extra indirection can make the code difficult to follow at first.

Dependency Injection

Generally, the collaborators of an object should be given to the object - the object should not get the collaborators itself. This is an existing design strategy known as 'dependency injection', or 'inversion of control' - perhaps it's most well-known use is in the Spring framework. I won't go into this pattern in too much detail - Martin Fowler has written a useful article about it. I found that whilst its usage could generally lead to better design, when used for testing at a very low level it can have quite major and disturbing implications on your code. I will discuss this further in the section Experiences with Mock Objects.

When Are Mock Objects Useful?

Perhaps the main benefit of using Mock Objects is that because you are replacing the collaborators of an object, you don't have to set up all of the complex state that may be required to run those collaborators.
For example, I was recently working on some business logic that took data from an XML file, processed it, and inserted it in a database. When it came to testing the intricacies of the business logic, I didn't want to have to write an automated test to painstakingly set up XML files, trigger the business logic and then carefully check the contents of the database tables. All I cared about was the the business logic interacted correctly with the XML Parser and the database layer - ie, that it made the right calls with the right parameters at the right time. So I carefully defined my interfaces with the XML parser and the database, and then plugged 'Mock' implementations of these interfaces into the business logic when I was testing it.
An important thing to emphasis about Mock Objects is that they are really just a tool that helps you to test details of an object that would have otherwise been inaccessible or impractical to test.
For example, by mocking up a JDBC driver used by a Data Access Object, you can easily simulate the driver throwing SQLExceptions, and thus test how your DAO deals with them. Simulating such conditions with a real JDBC driver in an automated test would be much more difficult. And whilst mocking up parts of a JDBC driver can seem like pain, I believe that in the long run it's less painful than trying to set up a database to induce a particular behaviour that you are trying to test.
Overall I found that when developing with Mock Objects, I spent most of my time writing lots of small, focused tests, instead of spending lots of time thinking about how I could contrive a single big test that would exercise a deeply embedded piece of functionality.
The flipside of this is that using Mock Objects does not test the integration of your units. As you would know, sometimes defects can occur during the integration of components - there can be mismatches between what collaborators expect from one another.
For example, at some stage it would be a good idea to try your unit with a real JDBC driver just in case the query that you're passing to it is garbage. Consequently, higher-level tests of your unit are still a good idea. It's just that if it gets to a point where you're spending more time writing code that is 'non-core' to the test - ie, code for setting up or tearing down the particular state required - it might be worth using Mock Objects instead.

There are many Framework available for the same.
 
 This is just a quicky, doesn't mean much, but allows a look at how each framework's syntax allows to setup a expectation & return value for a method:
  • NMock:
    IMock mock = new DynamicMock(typeof(IRequest));
    IRequest request = (IRequest)mock.MockInstance;
    mock.ExpectAndReturn("Username","Ayende");
    testedObject.HandleRequest(request);
    mock.Verify();
    EasyMock.Net:
    MockControl control = MockControl.CreateControl(typeof(IRequest));
    IRequest request = (IRequest)control.GetMock();
    control.ExpectAndReturn(request.Username,"Ayende");
    control.Replay();
    testedObject.HandleRequest(mock);
    control.Verify();
    TypeMock.Net: [Update: This is the correct syntax]
    Mock requestMock = MockManager.Mock(typeof(ConcreteRequest));
    requestMock.ExpectAndReturn("Username","Ayende");
    //TypeMock.Net should take care of intercepting the calls for Username
    testedObject.HandleRequest(new ConcreteRequest());
    MockManager.Verify();
    Rhino Mocks Version 1.0:
    MockControl mockRequest = MockControl.CreateControl(typeof(IRequest));
    IRequest request = (IRequest)mockRequest.MockInstance;
    mockRequest.ExpectAndReturn(request.Username,"Ayende");
    mockRequest.Replay();
    testedObject.HandleRequest(request);
    mockRequest.Verify();
    NMock2:
    using(Mockery mocks = new Mockery())
    {
    IRequest request = (IRequest)mocks.NewMock(typeof(IRequest),"IRequest");
    Expect.Once.On(request).GetProperty("Username").Will(Return.Value("Ayende"));
    testedObject.HandleRequest(request);
    }
    Rhino Mocks Version 2.0:
    using(MockRepository mocks = new MocksRepository)
    {
    IRequest request = (IRequest)mocks.CreateMock(typeof(IRequest));
    Expect.On(request).Call(request.Username).Return("Ayende");
    mocks.ReplayAll();
    testedObject.HandleRequest(request);
    }
     

Monday, May 7, 2012

WCF : DataContract Vs MessageContract

1. Comparison

Data Contracts

WCF data contracts provide a mapping function between .NET CLR types that are defined in code and XML Schemas Definitions defined by the W3C organization (www.w3c.org/) that are used for communication outside the service.
you can say “Data contract is a formal agreement between a service and a client that abstractly describes the data to be exchanged”. That is, to communicate, the client and the service do not have to share the same types, only the same data contracts. A data contract precisely defines, for each parameter or return type, what data is serialized (turned into XML) to be exchanged.

Message Contracts

Message contracts describe the structure of SOAP messages sent to and from a service and enable you to inspect and control most of the details in the SOAP header and body. Whereas data contracts enable interoperability through the XML Schema Definition (XSD) standard, message contracts enable you to interoperate with any system that communicates through SOAP.
Using message contracts gives you complete control over the SOAP message sent to and from a service by providing access to the SOAP headers and bodies directly. This allows use of simple or complex types to define the exact content of the SOAP parts.

2. Why use MessageContract when DataContract is there?

Data contracts are used to define the data structure. Messages that are simply a .NET type, lets say in form of POCO (plain old CLR object), and generate the XML for the data you want to pass.
Message contracts are preferred only when there is a need to “control” the layout of your message(the SOAP message); for instance, adding specific headers/footer/etc to a message.
Sometimes complete control over the structure of a SOAP message is just as important as control over its contents. This is especially true when interoperability is important or to specifically control security issues at the level of the message or message part. In these cases, you can create a message contract that enables you to use a type for a parameter or return value that serializes directly into the precise SOAP message that you need.

3. Why we use MessageContract to pass SOAP headers ?

Passing information in SOAP headers is useful if you want to communicate information “out of band” from the operation signature.
For instance, session or correlation information can be passed in headers, rather than adding additional parameters to operations or adding this information as fields in the data itself.
Another example is security, where you may want to implement a custom security protocol (bypassing WS-Security) and pass credentials or tokens in custom SOAP headers.
A third example, again with security, is signing and encrypting SOAP headers, where you may want to sign and/or encrypt some or all header information. All these cases can be handled with message contracts. The downside with this technique is that the client and service must manually add and retrieve the information from the SOAP header, rather than having the serialization classes associated with data and operation contracts do it for you.

When we use MessageContract ?



A very simple answer to the question is, when you need a higher level of control over the message, such as sending custom SOAP header, you then use MessageContract instead of DataContract. But in my opinion, most of the messaging needs can be catered by DataContracts.
This is the extract from MSDN : Sometimes complete control over the structure of a SOAP message is just as important as control over its contents. This is especially true when interoperability is important or to specifically control security issues at the level of the message or message part. In these cases, you can create a message contract that enables you to use a type for a parameter or return value that serializes directly into the precise SOAP message that you need.
To elborate on the above mentioned MSDN extract, a message contract allows you to specifically control which elements will be in the SOAP header, and which will be in the SOAP body and this isn't possible using the DataContract. As DataContracts represent types. Messages are not used as types but rather the payload a method operates on and they are specific to the operation they are passed to and from.
This situation arises when you have a communication partner which requires a very specific format and you have to tweak your SOAP messages to match that given layout exactly. In my view, always use DataContracts unless you have to use MessageContract for a very good reason.
One of the hypothetical situation could be your communication partner would like to have a custom security header with username and hashed password. So you could have a message contract something similar to the below

01[MessageContract]
02public class BankingTransaction
03{
04  [MessageHeader] public string UserName;
05  [MessageHeader] public string Password;
06  [MessageBodyMember] private Account sourceAccount;
07  [MessageBodyMember] private Account targetAccount;
08  [MessageBodyMember] public int amount;
09 
10}
Notice on the above contract the UserName and Password are decorated with MessageHeader attribute. The generated SOAP message with WCF basicHttpBinding will look like following:
02  <s:Header>
03    <Action s:mustUnderstand="1"
06</Action>
07    <h:Password xmlns:h="http://tempuri.org/">HashPassword</h:Password>
08    <h:UserName xmlns:h="http://tempuri.org/">TestUser</h:UserName>
09  </s:Header>
10  <s:Body>
11    <BankingTransaction xmlns="http://tempuri.org/">
12      <amount>10</amount>
13      <sourceAccount>1234</sourceAccount>
14      <targetAccount>5678</targetAccount>
15    </BankingTransaction>
16  </s:Body>
17</s:Envelope>

Notice the SOAP the password and username members are serialized as SOAP header and the remaining members were serialized as SOAP body.

4. Can’t mix datacontracts and messagecontracts.

Because message-based programming and parameter-based programming cannot be mixed, so you cannot specify a DataContract as an input argument to an operation and have it return a MessageContract, or specify a MessageContract as the input argument to an operation and have it return a DataContract. You can mix typed and untyped messages, but not messageContracts and DataContracts. Mixing message and data contracts will cause a runtime error when you generate WSDL from the service.

AOP : Aspect Oriented Programming Basic

What is AOP?

Aspect Oriented Programming or AOP is an interesting concept that can be applied to many of the programming problems we solve everyday. In our Visual Studio team system code we have a lot of web-services and remoting code that essentially does the following
public void MyMethod(int parameter)
{
    Trace.EnteredMethod("MyMethod", parameter);
    SecurityCheck();
    // Bunch of processing
    Trace.ExitMethod("MyMethod");
}
This is not just peculiar to our domain but is seen across different domains. In OO programming classes and methods are designed for performing specific operations and common/duplicate functionality are factored out into common classes. However, there are cross-cutting concerns that span accross all classes and methods, like logging and security checks. OOP only partially solves this problem by requiring users to define separate classes for logging and security checks and requiring each class/methods needing these services to call them. AOP targets and solves this problem elegantly.
AOP divides code into base-code (code for your functionality) and a new construct called aspect. Aspect encapsulates these cross-cutting concerns using the following concepts
  • join-points: The points in the structure of base-code where the cross-cutting functionality needs to execute. This is typically when specific methods are entered or exited or properties are accessed.
  • point-cut: A logical description of join-points using some specific syntax
  • advice: additional code like logging and security check that each of these methods need to perform
The most mature AOP language is probably AspectJ which adds AOP extensions to Java. However, for this blog, I'd stick to .NET languages like AspectDNG, Aspect# and C#.


There is a lot of AOP implementation in C#, VB.net. this is some of AOP Implementations:
  • Aspect.NET
  • LOOM.NET
  • Enterprise Library 3.0 Policy Injection Application Block
  • Puzzle.NAspect
  • AspectDNG
  • Aspect#
  • Compose*
  • PostSharp
  • Seasar.NET
  • DotSpect (.SPECT)
  • The Spring.NET Framework as part of its functionality
  • Wicca and Phx.Morph
  • SetPoint
Another Article:

Index

Introduction

I just completed my second book ".NET Interview questions" and now on my third book "SQL Server 2005 Interview questions". Had a decent time in between to update my skills and be in tune with new developments. Thanks to my publisher for giving me a free pass for the Aspect Oriented Programming seminar in Mumbai, that's what has inspired me to write this article. A lot has been written about AOP (Aspect Oriented Programming) on the Web, but none of the articles cover how to implement it practically in C#.
Let's start with a small definition on AOP first:
"Aspect Oriented Programming is a methodology to separate cross cut code across different modules in a software system."
In short all the cross cut code is moved to a separate module, thus increasing more modularity and bringing in ease of maintenance. Okay, that was a theoretical definition, let's try to understand why we really need AOP when we have decent methodologies like "Object Oriented Programming" and "Procedural Oriented Programming".
"Aspect oriented programming is not introduced in order to replace OOP, but assists it to remove its short comings. I see AOP as a brother of OOP".

Every requirement is a Concern

"Software development exists because of business concerns".
Software development is nothing but addressing a collection of concerns in real life. For instance, a customer sales software application has the following concerns:
  • User should be able to add, update and delete customer related information.
  • User should be able to track sales related to customer.
  • User should have a facility to print customer and sales information.
  • User should have a facility to email customer and sales information.
Okay, now readers will be wondering what's so special about these concerns, it can easily be implemented using OOP. In the next section we will try to implement the above concerns using the OOP methodology and see why we need AOP.

Cross Cutting Concern and Tangled code

Figure 1.1 Class diagram for Customer Sales Software Application
Above is the class diagram for the Customer Sales Software Application discussed in the first section. We are trying to address the four concerns for the application: Customer Maintenance, Sales Maintenance, Printing and Sending Email. So by abiding to all laws of OOP, the above class diagram is drawn. "ClsCustomer" class is responsible for adding, updating and deleting the customer records. "ClsSales" class is responsible for maintaining sales information for a customer, you can see the link between "ClsCustomer" and "ClsSales" classes. There were also some technical concerns. Printing and sending email are addressed by "ClsPrint" and "ClsEmail" classes. Also note both classes "ClsCustomer" and "ClsSales" have a dependency relationship on both of these classes ("ClsPrint" and "ClsEmail") to achieve the technical functionality.
Now according to OOP literature the first very important thing is that every object should be independent and should be concerned only about its functionality. Example, the "ClsCustomer" should only be concerned about adding, updating and deleting customer records. The Customer class should not have responsibilities of Sales or Print class. All the objects should work using messaging to achieve certain business functionality. In the above class diagram, all the classes are collaborating to make work the complete "Customer Sales Application".
OK, now it's time to look at the implementation of the above class diagram.
Figure 1.2 Explorer look of Customer Sales Application
As dictated by the class diagram, all the classes are included in the Explorer. OK, let's look at one of the implementations, that is the customer class implementation, i.e. the "Add" method of the Customer class. Below is a paste of the "Add" method of the Customer class.
public void Add()
{
  /////////////////////////////////////////
  // This method adds customer information
  // 
  // Adding code will go here
  ////////////////////////////////////////

  // After adding to database email is sent
  ClsEmail pobjClsEmail = new ClsEmail();
  pobjClsEmail.Send();

  // After sending email its printed
  ClsPrint pobjClsPrint = new ClsPrint();
  pobjClsPrint.Print();
}
Okay, the "Add" method of "ClsCustomer" is doing some really heavy duty like:
  • It adds the customer data to the customer database.
  • Then it sends email using the class "ClsEmail".
  • Finally it prints the customer details using the "ClsPrint" class.
Don't you think guys "ClsCustomer" is doing some really heavy job, specially it's doing a lot of things which is not its concern. Example: sending email and printing is not its concern at all. Also note the same implementation has to be done with the "ClsSales" class. So in the Sales class also, we have to use both the classes: "ClsEmail" and "ClsPrint". In short the "Print" and "Email" span across more than one module. Such types of concerns are called as "Cross Cut Concerns". The code over there is quiet messed up as we are using lots of objects. These types of code are called as "Tangled code" in AOP terminology.
Okay, so here are some observations about concerns. All software applications have two types of concerns:
  • Core / Main concern. (Example: Customer and Sales maintenance concerns).
  • Cross cut concerns. (Printing, logging, sending email etc. which spans across modules).
Now we are aware of our problem "tangled code".

Solution for Tangled code: Weaving

Simply separate the Cross cut concerns from the Core concerns. So create modules for Cross cut and Core Concerns separately and then feed both the modules to the compiler. AOP supported compilers then compile both the modules and generate one single executable......isn't that cool guys? Hmmm.. now how do we attain that with .NET compilers? Well, till now .NET compilers did not support actual AOP compiling. So we had to do quiet a hack to attain AOP functionality in .NET. AspectJ is an AOP compiler which does AOP implementation for Java. But I hope the way .NET framework is architected it should not be a big deal to get AOP to action....can you hear me Microsoft, we trust you.
"Weaving is a process of compiling the Core and Cross cut concerns together".
Figure 1.3 AOP weaver in action
In the coming sections, we will try to see the different kinds of weavers documented for AOP. Let us see which type of weaving can we implement in C#. But for now, let's try to understand some basic terminology which you will come across again and again.

Join points, Point cuts and Advice

Join points, Point cuts and Advice are some basics which you will need to understand for AOP.
Join point is a point where a concern will cross cut the main code. Join points can be a method call, function, constructor etc. Join points are useful in identifying problem points in a code. In our customer sales application, we have two join points:
  • pobjClsEmail.Send();
  • pobjClsPrint.Print();
Point Cut tells when it should match a Joint Point. In the above example I can say to match the email join point only when it's called in conjunction with "Invoke". Example:
Invoke( pobjClsEmail.Send(); )
So I have defined my point cut with the Email joint point using the Invoke method.
Advice when defined decides the sequence of execution of advice code with respect to joint point. Advice code is the code which you want to execute before or after the joint point. In AOP you can specify the advice code to execute before, around or after the joint point is matched.
Note: Whatever is the case before, after or around, point cut must trigger first.
So depending on what you have specified, the advice code will execute before, after or around the joint point. Like in our example we will want to execute the send and print afterwards.

Types of AOP compilers

OK, AOP compilers come in different flavors and the type of weaving decides what type of compiler it is. There are four types of compilers, or to be specific, weaving types in AOP:
  • Compile time weaving: This type of weaving happens at the compiler level and is not supported currently in .NET. But there are other compilers which I will discuss in my next part of the AOP tutorial. In compile time, Core concern code and the Cross cut code is weaved before being compiled to MSIL code. So before the JIT compilation takes place, using .NET compilers Aspect code is compiled and fed to the main compiler. There are many third party compilers which are available which extend the .NET compiler module and implement this feature. I am sure when Microsoft implements this feature in .NET compilers......it's going to be party time guys.
  • Link time weaving: This type of compilers compile core and cross cut code after the MSIL is generated. Again this has to be done at linker level. So at this moment not supported, we will either have to use third party or wait for Microsoft's AOP compiler.
  • Run time weaving: This type of weaving is done by using the .NET runtime. In short your code detects the Core, Cross cut etc. and executes them at run time. This is supported at this moment in .NET and we will see how we can implement Run time weaving. Again many AOP gurus do argue that it is not actual AOP.... I leave that to the readers.
From my point of view, compiler is AOP featured when it has keyword support for Aspect, Join points, Point cut, Advice etc. So till then .NET guys can go around using the round about methods (which I will explain in the second part) to claim that .NET is AOP featured.
I hope I was able to explain the fundamentals of AOP. In the second part of this tutorial, we will see how we can implement the above AOP features in .NET.
OK guys, just a short note: do give me a feedback on my collection of .NET Interview questions on my website.

Microsoft : Enterprise Library 5.0

Overview

Enterprise Library consists of reusable software components that are designed to assist developers with common enterprise development challenges. It includes a collection of functional application blocks addressing specific cross-cutting concerns such as data access, logging, or validation; and wiring blocks, Unity and the Interception/Policy Injection Application Block, designed to help implement more loosely coupled testable, and maintainable software systems.
Different applications have different requirements, and you will find that not every application block is useful in every application that you build. Before using an application block, you should have a good understanding of your application requirements and of the scenarios that the application block is designed to address. Note that this release of the Enterprise Library includes a selective installer that allows you to choose which of the blocks you wish to install.
Microsoft Enterprise Library 5.0 contains the following application blocks:
  • Caching Application Block. Developers can use this application block to incorporate a cache in their applications. Pluggable cache providers and persistent backing stores are supported.
  • Cryptography Application Block. Developers can use this application block to incorporate hashing and symmetric encryption in their applications.
  • Data Access Application Block. Developers can use this application block to incorporate standard database functionality in their applications, including both synchronous and asynchronous data access and returning data in a range of formats.
  • Exception Handling Application Block. Developers and policy makers can use this application block to create a consistent strategy for processing exceptions that occur throughout the architectural layers of enterprise applications.
  • Logging Application Block. Developers can use this application block to include logging functionality for a wide range of logging targets in their applications. This release further improves logging performance.
  • Policy Injection Application Block. Powered by the Interception mechanism built in Unity, this application block can be used to implement interception policies to streamline the implementation of common features, such as logging, caching, exception handling, and validation, across a system.
  • Security Application Block. Developers can use this application block to incorporate authorization and security caching functionality in their applications.
  • Unity Application Block. Developers can use this application block as a lightweight and extensible dependency injection container with support for constructor, property, and method call injection, as well as instance and type interception.
  • Validation Application Block. Developers can use this application block to create validation rules for business objects that can be used across different layers of their applications.
Enterprise Library also includes a set of core functions, including configuration and instrumentation, and object lifecycle management. These functions are used by all other application blocks.

Design Pattern : Command patterns Implementation

Command pattern allows a request to exist as an object. Ok let’s understand what it means. Consider the figure ‘Menu and Commands’ we have different actions depending on which menu is clicked. So depending on which menu is clicked we have passed a string which will have the action text in the action string. Depending on the action string we will execute the action. The bad thing about the code is it has lot of ‘IF’ condition which makes the coding more cryptic.
Figure: - Menu and Commands
Command pattern moves the above action in to objects. These objects when executed actually execute the command.
As said previously every command is an object. We first prepare individual classes for every action i.e. exit, open, file and print. Al l the above actions are wrapped in to classes like Exit action is wrapped in ‘clsExecuteExit’ , open action is wrapped in ‘clsExecuteOpen’, print action is wrapped in ‘clsExecutePrint’ and so on. All these classes are inherited from a common interface ‘IExecute’.
Figure: - Objects and Command
Using all the action classes we can now make the invoker. The main work of invoker is to map the action with the classes which have the action.
So we have added all the actions in one collection i.e. the arraylist. We have exposed a method ‘getCommand’ which takes a string and gives back the abstract object ‘IExecute’. The client code is now neat and clean. All the ‘IF’ conditions are now moved to the ‘clsInvoker’ class.

Figure: - Invoker and the clean client

Example 2

Our implementation.

Sample Code

BaseCommand is abstract base class used to define contract.

  1. public abstract class BaseCommand  
  2. {  
  3.         protected Transaction _transaction;  
  4.   
  5.         public abstract int Execute(Transaction transaction);  
  6.   
  7.         public abstract int Undo();  
  8. }  
ConcreteCommandDeposit and ConcreteCommandWithdraw are the concrete command as well as Receiver classes.

  1. public class ConcreteCommandDeposit : BaseCommand  
  2. {  
  3.         #region Command Members  
  4.   
  5.         public override int Execute(Transaction transaction)  
  6.         {  
  7.             this._transaction = transaction;  
  8.             _transaction.BalanceAmount += _transaction.Amount;  
  9.             return _transaction.BalanceAmount;  
  10.         }  
  11.   
  12.         public override int Undo()  
  13.         {  
  14.             _transaction.BalanceAmount -= _transaction.Amount;  
  15.             return _transaction.BalanceAmount;  
  16.         }  
  17.  
  18.         #endregion  
  19. }  

  1. public class ConcreteCommandWithdraw : BaseCommand  
  2. {  
  3.         #region Command Members  
  4.   
  5.         public override int Execute(Transaction transaction)  
  6.         {  
  7.             this._transaction = transaction;  
  8.             _transaction.BalanceAmount -= _transaction.Amount;  
  9.             return _transaction.BalanceAmount;  
  10.         }  
  11.   
  12.         public override int Undo()  
  13.         {  
  14.             _transaction.BalanceAmount += _transaction.Amount;  
  15.             return _transaction.BalanceAmount;  
  16.         }  
  17.  
  18.         #endregion  
  19. }  
CommandInvoker class encapsulates call to command object and used to invoke command.

  1. public class CommandInvoker  
  2. {  
  3.         private BaseCommand _Command { getset; }  
  4.          
  5.         public CommandInvoker(BaseCommand command)  
  6.         {  
  7.             this._Command = command;  
  8.         }  
  9.   
  10.         public int ExecuteCommand(Transaction param)  
  11.         {  
  12.             return _Command.Execute(param);  
  13.         }  
  14.   
  15.         public int UndoCommand()  
  16.         {  
  17.             return _Command.Undo();  
  18.         }  
  19. }  
Transaction class is just a DTO used to pass parameters to the commands. This is not a part of Command Pattern.

  1. public class Transaction  
  2. {  
  3.         public int Amount { getset; }  
  4.   
  5.         public int BalanceAmount { getset; }  
  6. }  
Following is how clients will use the command pattern to invoke commands.

  1. static void Main(string[] args)  
  2. {  
  3.             int bal = 0;  
  4.              
  5.             BaseCommand cmdDeposit = CommandFactory.GetCommand("Deposit");  
  6.   
  7.             Transaction trans1 = new Transaction();  
  8.             trans1.Amount = 1000;  
  9.   
  10.             CommandInvoker invoker1 = new CommandInvoker(cmdDeposit);  
  11.   
  12.             bal = invoker1.ExecuteCommand(trans1);  
  13.   
  14.             Console.Write("Amount deposited. Your balance is: " + bal.ToString());  
  15.   
  16.             //------------------             
  17.   
  18.             BaseCommand cmdWithdraw = CommandFactory.GetCommand("Withdraw");  
  19.   
  20.             trans1.Amount = 400;  
  21.   
  22.             CommandInvoker invoker2 = new CommandInvoker(cmdWithdraw);  
  23.   
  24.             bal = invoker2.ExecuteCommand(trans1);  
  25.   
  26.             Console.Write("Amount withdrawn. Your balance is: " + bal.ToString());  
  27.   
  28.             // -- Undo withdraw  
  29.             bal = invoker2.UndoCommand();  
  30.   
  31.             Console.Write("Withdrawal Undone. Your balance is: " + bal.ToString());  
  32.   
  33.             Console.Read();  
  34.         }  
  35. }  
CommandFactory is just a utility class. This is not a part of Command Pattern.

  1. public static class CommandFactory  
  2. {  
  3.         public static BaseCommand GetCommand(String command)  
  4.         {  
  5.             switch (command)  
  6.             {  
  7.                 case "Deposit":  
  8.                     return new ConcreteCommandDeposit();  
  9.   
  10.                 case "Withdraw":  
  11.                     return new ConcreteCommandWithdraw();  
  12.                       
  13.                 default:  
  14.                     return null;  
  15.             }  
  16.   
  17.         }  
  18. }  

Summary

Using command objects makes it easier to construct general components that need to delegate, sequence or execute method calls without the need to know the owner of the method or the method parameters.

Reference: