You will probably find a lot about depth buffers on websites dealing with shadow mapping etc. as rendering the scene depth from the lights perspective to a (preferably) floating point surface / texture is one of the processes needed to achieve this.
As you can see from the screenshot below, the scene is rendered to a surface using a simple shader, and visible in the upper left corner and then mapped onto the scene later.
Where xLightWorldViewProjection is the product of the lights view and projection matrices and xMaxDepth is used to normalize the value so that it’s smaller than 1.0 and can be used as a colour.
To get the xLightWorldViewProjection you could do something like:
D3DXMATRIX matLightWorld;
D3DXMatrixIdentity( &matLightWorld );

D3DXMATRIX matLightView;
D3DXMatrixIdentity( &matLightView );
D3DXMatrixLookAtLH(&matLightView, &D3DXVECTOR3(-2500.0f, 400.0f, -200.0f), &D3DXVECTOR3(0.0f, 0.0f, 0.0f), &D3DXVECTOR3(0.0f, 1.0f, 0.0f));

D3DXMATRIX matLightProjection;
D3DXMatrixIdentity( &matLightProjection );
D3DXMatrixPerspectiveFovLH( &matLightProjection, D3DXToRadian(45.0f), (FLOAT)SCREEN_WIDTH / (FLOAT)SCREEN_HEIGHT, 1.0f, 4000.0f );
D3DXMATRIX matLightWorldViewProjection = matLightWorld * matLightView * matLightProjection;

struct SMapVertexToPixel
{
float4 Position : POSITION;
float3 Position2D : TEXCOORD0;
};
struct SMapPixelToFrame
{
float4 Color : COLOR0;
};
SMapVertexToPixel ShadowMapVertexShader( float4 inPos : POSITION)
{
SMapVertexToPixel Output = (SMapVertexToPixel)0;
Output.Position = mul(inPos, xLightWorldViewProjection);
Output.Position2D = Output.Position;

return Output;
}

SMapPixelToFrame ShadowMapPixelShader(SMapVertexToPixel PSIn)
{
SMapPixelToFrame Output = (SMapPixelToFrame)0;
Output.Color = PSIn.Position2D.z/xMaxDepth;
return Output;
}