Linux PCI DMA mit Streue-Sammelauslöser, der nicht den Interrupt auslöstLinux

Linux verstehen
Anonymous
 Linux PCI DMA mit Streue-Sammelauslöser, der nicht den Interrupt auslöst

Post by Anonymous »

Ich entwickle einen Linux -PCI -Treiber für ein benutzerdefiniertes PCI -Gerät, das eine FPGA und eine PLX -Brücke enthält. Das Gerät führt DMA -Übertragung in den Systemspeicher durch, und mein Treiber ist dafür verantwortlich, diese Daten an den Benutzerbereich weiterzugeben. Aus dem Benutzerbereich initiiere ich einen Befehl (sglist_cha), der den Treiber auslöst, um 10 Puffer von jeweils 4096 -Bytes mit DMA_ALLOC_COHERENT () zuzuweisen. Diese Puffer werden zu einer benutzerdefinierten DMA_List-Struktur (mit Feldern für PADR, LADR, SIZ und DPR) typisiert, die SG (Streuzather-) DMA-Informationen enthält. Die Deskriptorliste wird erstellt und die physische Adresse des ersten Deskriptors wird in das DMADPR0 -Register des Geräts geschrieben. Dann sende ich aus dem Benutzerraum ~ 16 KB -Puffer (10 -mal), die die Treiberstifte mit pin_user_pages () und Karten mit DMA_MAP_SG () verwenden. Diese zugeordneten SG -Einträge werden in die Deskriptorentabelle geschrieben, und jede 4K -Übertragung wird mit DMA_Read | gekennzeichnet Enable_term_int und erwarten Sie nach Abschluss einen Interrupt. Obwohl der Speicher vom Gerät korrekt geschrieben zu sein scheint, erhalte ich keine Interrupts. Ich verwende DMA_Set_Mask_and_coherent (& PDEV-> Dev, DMA_Bit_Mask (32)) aufgrund von Hardware-Einschränkungen mit einer 64-Bit-Adressierung. Ich rufe auch DMA_SYNC_SINGLE_FOR_DEVICE () auf dem Deskriptorpuffer auf, bevor ich das DMA initiiert. Ich versuche zu verstehen, ob ein Linux -DMA -API -Missbrauch, eine unsachgemäße Speichersynchronisation, das Problem der Ausrichtung des Deskriptors oder etwas anderes, das dies verursacht. Jede Hilfe oder Anleitung zur Lösung dieses Interrupt -Problems wäre sehr geschätzt. < /Strong>

Bitte beachten

Code: Select all

    DMA_LIST *pDmaListA[MAX_LIST_COUNT];
DMA_LIST *pDmaListB[MAX_LIST_COUNT];
dma_addr_t ulStartPtrA[MAX_LIST_COUNT + 1];
dma_addr_t ulStartPtrB[MAX_LIST_COUNT + 1];
dma_addr_t ulHwStartPtrA[MAX_LIST_COUNT + 1];
dma_addr_t ulHwStartPtrB[MAX_LIST_COUNT + 1];
ULONG iListCntA,iListCntB;

UCHAR* pchVirtAddr[MAX_DEVICES] = {NULL} ;
ULONG ulTotalDmaListA[MAX_DEVICES][MAX_LIST_COUNT];
ULONG ulTotalDmaListB[MAX_DEVICES][MAX_LIST_COUNT];
UCHAR uchIntChannelA[MAX_DEVICES];
UCHAR uchIntChannelB[MAX_DEVICES];
UCHAR uchIntChannelAB[MAX_DEVICES];

ULONG ulOpen[MAX_DEVICES];
ULONG iListCntA,iListCntB;
ULONG ulListCntA[MAX_DEVICES],ulListCntB[MAX_DEVICES];
ULONG ulIntCntA[MAX_DEVICES],ulIntCntB[MAX_DEVICES];
ULONG ulPages_listA[MAX_LIST_COUNT],ulPages_listB[MAX_LIST_COUNT],ulPages_memA[MAX_LIST_COUNT],ulPages_memB[MAX_LIST_COUNT];
void* dma_coherent_virtA[MAX_LIST_COUNT];
dma_addr_t dma_coherent_handleA[MAX_LIST_COUNT];
void* dma_coherent_virtCHA[MAX_LIST_COUNT] ;
dma_addr_t dma_coherent_handleCHA[MAX_LIST_COUNT];
void *Base_Add;
void* ITP_Mapped_Addr[3] = {0};
unsigned long long mem_size[3];  //64 Bit
void *BaseAddress[3];
CONFIG_STRUCT stConfig;

struct cdev Struct_ITP;
struct page **pBuffId = NULL ;
struct page **pEventA;
struct page **pEventB;
struct page **pages_list_sgA[MAX_LIST_COUNT];
struct page **pages_list_sgB[MAX_LIST_COUNT];
struct page **pages_mem_sgA[MAX_LIST_COUNT];
struct page **pages_mem_sgB[MAX_LIST_COUNT];
struct scatterlist *dma_list_sgA[MAX_DEVICES][MAX_LIST_COUNT] = {{NULL}};
struct scatterlist *dma_list_sgB[MAX_DEVICES][MAX_LIST_COUNT] = {{NULL}};
struct scatterlist *dma_mem_sgA[MAX_DEVICES][MAX_LIST_COUNT]  = {{NULL}};
struct scatterlist *dma_mem_sgB[MAX_DEVICES][MAX_LIST_COUNT]  = {{NULL}};
int dma_list_sgcntA[MAX_DEVICES][MAX_LIST_COUNT],dma_list_sgcntB[MAX_DEVICES][MAX_LIST_COUNT];
int dma_mem_sgcntA[MAX_DEVICES][MAX_LIST_COUNT],dma_mem_sgcntB[MAX_DEVICES][MAX_LIST_COUNT];

ULONG ulDmaLength[MAX_DEVICES][2] ;
long lDmaRemndr[MAX_DEVICES][2] ;
int  bDevCloseStatusA[MAX_DEVICES] ;
int  bDevCloseStatusB[MAX_DEVICES] ;

typedef struct {
uint32_t  u32PADR;       // PCI Low address
uint32_t  u32LADR;       // Local Address - Card side
uint32_t  u32SIZ;        // DMA Transfer size
uint32_t  u32DPR;        // Descriptor pointer
uint32_t  u32HPADR ;    // PCI High address
uint32_t  dummy2 ;
uint32_t  dummy3;
uint32_t  dummy4 ;
} DMA_LIST;
int iCount;
UCHAR irq[MAX_DEVICES];
UCHAR uchChannel;
PUCHAR chBuffId;
PUCHAR eventA;
PUCHAR eventB;
dev_t devnum;
#define DEBUG 1
#define __DEBUG__ 1
void* PCM_Mapped_Addr[MAX_DEVICES];
void* EEPROM_Mapped_Addr[MAX_DEVICES];
void *Base_Addr[MAX_DEVICES];
pid_t processno;
int   ITPflag[MAX_DEVICES];
ULONG   SignalNo[MAX_DEVICES][2];
ULONG   Pid[MAX_DEVICES][2],gMinor;
struct pid *pPidVal[MAX_DEVICES][2] = {{NULL}};
int   cnt=0;
struct pci_device_id devid[3];
struct pci_device_id *gDevid;
struct device* gDev[MAX_DEVICES];
int count=0,h;
dma_addr_t simple_region_start,simple_region_size;
void ITP_do_tasklet(struct tasklet_struct *unused );
//void ITP_do_tasklet(unsigned long );
DECLARE_TASKLET(ITP_tasklet, ITP_do_tasklet);
//DECLARE_TASKLET(ITP_tasklet, ITP_do_tasklet,1);
irqreturn_t ITP_isr(int ,void *);
void ITP_unallocate(struct device*,int,int);

void ITPDmaExecutionRoutine(struct scatterlist*,int,int);

static struct pci_device_id ITP[]  = {
{ PCI_DEVICE(VENDOR,DEVICE)},
{0, }
};

MODULE_DEVICE_TABLE(pci,ITP);

static int probe(struct pci_dev *dev, const struct pci_device_id *id)
{
int j,ret,status;
unsigned long  max,min,ulAddr;
int iBaseCount = 0;
ret = pci_enable_device(dev);
if(ret)
{
printk("  Error in enabling PCI device") ;
}
for(j=0;jdev, DMA_BIT_MASK(32)))
{
printk("DRIVER:Device No : %d DMA mask Operation is Denied\n",count);
}
else
{
printk("DRIVER:Device No : %d DMA mask Operation is Allowd : \n",count);
}
if (dma_set_coherent_mask(&dev->dev, DMA_BIT_MASK(32)))
{
if (dma_set_mask (&dev->dev, 0x07FFFFF))
printk("DRIVER:Device No : %d DMA Operation is Allowed\n",count);
else
printk("DRIVER:Device No : %d DMA Operation is Denied\n",count);
}
else
{
printk("Using fallback 32-bit DMA mask\n");
}
/*if (dma_set_mask_and_coherent(&dev->dev, DMA_BIT_MASK(32)))
{
if (dma_set_mask (&dev->dev, 0x07FFFFF))
printk("DRIVER:Device No : %d DMA Operation is Allowed\n",count);
else
printk("DRIVER:Device No : %d DMA Operation is Denied\n",count);
}
else
{
printk("Using fallback 32-bit DMA mask\n");
}
*/

irq[count] = (UCHAR)dev->irq;

devid[count] = *id;
gDev[count] = &dev->dev;
/*status = request_irq(irq[count],&ITP_isr,IRQF_SHARED,"PCIITP",&devid[count]);// in RHEL 8 onwards IRQF_DISABLED become obsolete in RHEL 6 //SA_INTERRUPT changes to IRQF_DISABLED
if(status)
printk("Error:IRQ Request Failed  %d\n",status);
else
{
printk("IRQ Request Succ+eded  %d %d  with SZ of DMA_LIST = %lu \n",status,irq[count],sizeof(DMA_LIST));
}*/
count++;

return 0;

}
void remove(struct pci_dev *dev)
{
int iBaseCnt = 0;
#ifdef DEBUG
printk("Device Remove\n");
#endif
for(iBaseCnt = 0;iBaseCnt < TOTAL_DEVICE_RESOURCE;iBaseCnt++)
{
iounmap(ITP_Mapped_Addr[iBaseCnt]);
}
}
static int ITP_open(struct inode *inode, struct file *filp)
{
int minor,status;
minor = MINOR(filp->f_path.dentry->d_inode->i_rdev); //MINOR(filp->f_dentry->d_inode->i_rdev);
if(minor == 0)
return 0;
else
minor = minor - 1;
#ifdef DEBUG
printk("Device Opened Minor: %d\n",minor);
printk("Device Opened IRQ: %d\n",irq[minor]);
#endif
if(ulOpen[minor] == 0)
{
//enable_irq(irq[minor]);
//status = request_irq(irq[minor],&ITP_isr,IRQF_SHARED | IRQF_DISABLED,"PCIITP",&devid[minor]);//in RHEL 6                                      //SA_INTERRUPT changes to IRQF_DISABLED
status = request_irq(irq[minor],&ITP_isr,IRQF_SHARED,"PCIITP",&devid[minor]);// in RHEL 8 onwards IRQF_DISABLED become obsolete in RHEL 6 //SA_INTERRUPT changes to IRQF_DISABLED
if(status)
printk("Error:IRQ Request Failed  %d\n",status);
else
{
printk("IRQ Request Succ+eded  %d %d  with SZ of DMA_LIST = %lu \n",status,irq[minor],sizeof(DMA_LIST));
}
ulDmaLength[minor][0] = 0 ;
ulDmaLength[minor][1] = 0 ;
lDmaRemndr[minor][0] = 0 ;
lDmaRemndr[minor][1] = 0 ;
bDevCloseStatusA[minor] = 0 ;  //false
bDevCloseStatusB[minor] = 0 ;
}
ulOpen[minor]++;

cnt=0;
return 0;

}
static int ITP_close(struct inode *inode, struct file *filp)
{

int minor,i;
minor = MINOR(filp->f_path.dentry->d_inode->i_rdev); //MINOR(filp->f_path.dentry->d_inode->i_rdev);
if(minor == 0)
return 0;
else
minor = minor -1;
#ifdef DEBUG
printk("Device Closed Minor: %d\n",minor);
#endif
ulOpen[minor]--;
if(ulOpen[minor] == 0)
{
bDevCloseStatusA[minor] = 1 ;          //true
bDevCloseStatusB[minor] = 1 ;
#ifdef DEBUG
printk("Stoppting Acquisition: %d\n",minor);
#endif
if(EEPROM_Mapped_Addr[minor])
{
writel( 0x1414 ,(UINT *)EEPROM_Mapped_Addr[minor] + DMACSR0/4);
}
if(PCM_Mapped_Addr[minor])
{
writel( 0 ,(UINT *)PCM_Mapped_Addr[minor] + DMA_TRANS_ENABLE0);
writel( 0 ,(UINT *)PCM_Mapped_Addr[minor] + DMA_TRANS_ENABLE1);
}

for(i = 0;i< MAX_LIST_COUNT;i++)
{
ulTotalDmaListA[minor][i] = 0;
pDmaListA[i] = NULL;
ulStartPtrA[i] = 0;
}
ulIntCntA[minor] = 0;
ulListCntA[minor] = 0;
iListCntA   = 0;
uchIntChannelA[minor] = 0;

for(i = 0;i< MAX_LIST_COUNT;i++)
{
ulTotalDmaListB[minor][i] = 0;
pDmaListB[i] = NULL;
ulStartPtrB[i] = 0;
}
ulIntCntB[minor]  = 0;
ulListCntB[minor] = 0;
iListCntB   = 0;
uchIntChannelB[minor] = 0;

ITP_unallocate(gDev[minor],minor,CH_A);
ITP_unallocate(gDev[minor],minor,CH_B);

tasklet_kill(&ITP_tasklet);
free_irq(irq[minor],&devid[minor]);

ulDmaLength[minor][0] = 0 ;
ulDmaLength[minor][1] = 0 ;
lDmaRemndr[minor][0] = 0 ;
lDmaRemndr[minor][1] = 0 ;

}
return 0;
}
ssize_t ITP_read(struct file *filp, char *buff, size_t count, loff_t *offset)
{
int minor,result = 0,i = 0; //force = 1
int err = 0;
dma_addr_t NoPages = 0,addr = 0;
dma_addr_t ulLength = 0,uloffset1 = 0,ulTotalLen = 0;
void* vPtr = NULL;
unsigned long first,last,ulOffset,uaddr;

dma_addr_t dma_handle;
minor = MINOR(filp->f_path.dentry->d_inode->i_rdev); //MINOR(filp->f_dentry->d_inode->i_rdev);
if(minor == 0)
return 0;
else
minor = minor -1;
#ifdef DEBUG
printk("Device Reading Operation  Minor: %d\n",minor);
printk("Memory size to be mapped for id %d %ld\n",uchChannel,(ULONG)count);
#endif

count = 4096;

if(uchChannel == SGLIST_CHA)
{
result = 1 ;
vPtr = dma_alloc_coherent(gDev[minor], count, &dma_handle, GFP_KERNEL|GFP_ATOMIC);
if (!vPtr) {
printk("dma_alloc_coherent failed for data buffer\n");
return -ENOMEM;
}
dma_coherent_virtA[iListCntA] = vPtr;
dma_coherent_handleA[iListCntA] = dma_handle;
pchVirtAddr[minor] = vPtr;
ulStartPtrA[iListCntA] = dma_handle ;

#ifdef DEBUG
printk("SGLIST_CHA :  %lu pchVirtAddr %X \n",iListCntA,pchVirtAddr[minor] );
printk("Allocated: virt=%p (aligned=%d), phys=0x%llx (aligned=%d) Offset: %lu No Pages %lu",vPtr, IS_ALIGNED((unsigned long)vPtr, 16),dma_handle, !(dma_handle &  0xF),ulOffset,NoPages);

#endif
ITPDmaExecutionRoutine(dma_handle,minor,dma_list_sgcntA[minor][iListCntA]);

}

if(uchChannel == CHA)
{

result = 1 ;
count = 4096 ;
NoPages = 1 ;
ulOffset = 0 ;
vPtr = dma_alloc_coherent(gDev[minor], 4096, &dma_handle, GFP_KERNEL|GFP_ATOMIC);
if (!vPtr)
{
printk("dma_alloc_coherent failed for data buffer\n");
return -ENOMEM;
}

dma_coherent_virtCHA[iListCntA] = vPtr;
dma_coherent_handleCHA[iListCntA] = dma_handle;
ITPDmaExecutionRoutine(dma_coherent_handleCHA[iListCntA] ,minor,dma_mem_sgcntA[minor][iListCntA]);
dma_sync_single_for_device(gDev[minor],dma_coherent_handleCHA[iListCntA] ,4096,DMA_TO_DEVICE) ;
ulDmaLength[minor][0] = count ;
lDmaRemndr[minor][0] = 0 ;
bDevCloseStatusA[minor] = 0 ;
}

ulTotalLen = 0;

return 0;
}
void ITPDmaExecutionRoutine(struct scatterlist *sglist,int minor,int NumberOfElements)
{
uint32_t *desc ;
UCHAR ucQwrdAlign;
UCHAR uchFlag = 0;
UINT i;
int index = minor;
dma_addr_t ulNextDmaList = 0,alligned_addr ;
struct scatterlist *sgEntry ;
if(uchChannel == SGLIST_CHA)
{
uchFlag = PCI_DESC | DMA_READ | ENABLE_TERM_INT;
ulStartPtrA[iListCntA]      = dma_coherent_handleA[iListCntA] ;
ucQwrdAlign                 =  (ulStartPtrA[iListCntA] & 0x0F) ;//(UCHAR)(0x10 - (ulStartPtrA[iListCntA] & 0x0F));
if(ucQwrdAlign != 0 )
{
ucQwrdAlign = (UCHAR)(0x10 - (ulStartPtrA[iListCntA] & 0x0F));
}
alligned_addr               = ulStartPtrA[iListCntA]  + ucQwrdAlign;
ulStartPtrA[iListCntA]      = ulStartPtrA[iListCntA]  + ucQwrdAlign;
ulStartPtrA[iListCntA]      = ulStartPtrA[iListCntA] | uchFlag;
pDmaListA[iListCntA]        = (DMA_LIST*)(dma_coherent_virtA[iListCntA] + ucQwrdAlign);
iListCntA++;
if(iListCntA == MAX_LIST_COUNT)
{

writel( ulStartPtrA[0] ,(UINT *)EEPROM_Mapped_Addr[minor] + DMADPR0/4);
iListCntA = 0;
wmb();

}
}
if(uchChannel == CHA)
{
uchFlag = PCI_DESC | DMA_READ | ENABLE_TERM_INT;
i = 0 ;
ulNextDmaList      =   (ulStartPtrA[iListCntA] & 0xFFFFFFF0) ;
alligned_addr      = dma_coherent_handleCHA[iListCntA] ;
pDmaListA[iListCntA][0].u32PADR = lower_32_bits(alligned_addr) ;
pDmaListA[iListCntA][0].u32LADR = LOCAL_DEVICE_ADDRESS_A;
pDmaListA[iListCntA][0].u32SIZ  = 4096;
pDmaListA[iListCntA][0].u32HPADR  = upper_32_bits(alligned_addr) ;
pDmaListA[iListCntA][0].u32DPR    = (ulStartPtrA[iListCntA+1] & 0xFFFFFFF0) | uchFlag ;  // next address
iListCntA++;
if(iListCntA == MAX_LIST_COUNT)
{
pDmaListA[iListCntA - 1][0].u32DPR =  (ulStartPtrA[0] & 0xFFFFFFF0) | uchFlag ;
iListCntA = 0;
desc = (uint32_t*)(dma_coherent_virtA[0] ) ;
}

}

}
ssize_t ITP_write(struct file *filp, const char *buf, size_t count, loff_t *offset)
{
int minor,i=0,ret= 0 ;
Uchar bufVal[10] ;
minor = MINOR(filp->f_path.dentry->d_inode->i_rdev); //MINOR(filp->f_dentry->d_inode->i_rdev);
if(minor == 0)
return 0;
else
minor = minor -1;
#ifdef DEBUG
printk("Device Writing Operation  Minor: %d\n",minor);
#endif
ret = copy_from_user(bufVal,(UCHAR*)buf,sizeof(UCHAR)*count) ;
if(bufVal[0] == RESET_VALUES_A)
{
for(i = 0;i< MAX_LIST_COUNT;i++)
{
ulTotalDmaListA[minor][i] = 0;
pDmaListA[i] = NULL;
ulStartPtrA[i] = 0;
}
ulIntCntA[minor] = 0;
ulListCntA[minor] = 0;
iListCntA   = 0;
uchIntChannelA[minor] = 0;
ITP_unallocate(gDev[minor],minor,CH_A);

ulDmaLength[minor][0] = 0 ;
lDmaRemndr[minor][0] = 0 ;

}
else if (bufVal[0] == RESET_VALUES_B)
{
for(i = 0;i<  MAX_LIST_COUNT;i++)
{
ulTotalDmaListB[minor][i] = 0;
pDmaListB[i] = NULL;
ulStartPtrB[i] = 0;
}
ulIntCntB[minor]  = 0;
ulListCntB[minor] = 0;
iListCntB   = 0;
uchIntChannelB[minor] = 0;
ITP_unallocate(gDev[minor],minor,CH_B);
ulDmaLength[minor][1] = 0 ;
lDmaRemndr[minor][1] = 0 ;
}
else
uchChannel = bufVal[0] ;
return 0;
}
irqreturn_t ITP_isr(int i, void *dev_id)
{

int j,minor = 0;
//struct pid *pPidVal = NULL ;
ULONG  ulChanReg,ulLocalInt=0,ulLocalInt2 = 0 ;
UCHAR  uchClrIntReg;
printk(" In ISR") ;
for(j = 0;j= (long)ulDmaLength[minor][0] ) && !(bDevCloseStatusA[minor]))

if(ulIntCntA[minor] >= ulTotalDmaListA[minor][ulListCntA[minor]])
{
ulLocalInt = readl((UINT *)PCM_Mapped_Addr[minor]+ 0x20);
ulIntCntA[minor] = 0;
chBuffId[0] = (UCHAR)ulListCntA[minor];
//ret = kill_proc( Pid[minor][0], SignalNo[minor][0], 1);
//instesd of kill_proc()  sreejith

if(pPidVal[minor][0])
{
kill_pid(pPidVal[minor][0],SignalNo[minor][0], 1);
}
else
printk("ERROR: CHA Sending signal Failed\n");

//Added for Interrupt Miss

ulListCntA[minor]++;
if(ulListCntA[minor] == MAX_LIST_COUNT)
ulListCntA[minor] = 0;
}
}

return IRQ_HANDLED;
}
void ITP_do_tasklet(struct tasklet_struct *unused)
{
int minor;
minor = h;
#ifdef DEBUG
printk("DRIVER:TASKLET routine %d\n",minor);
#endif
}
void simple_vma_open(struct vm_area_struct *vma)
{
printk(KERN_NOTICE "Simple VMA open, virt %lx, phys %lx\n",
vma->vm_start, vma->vm_pgoff vm_pgoff);
printk("DRIVER:MMAP routine size %lX\n",vma->vm_end - vma->vm_start);
#endif
result = remap_pfn_range(   vma,
vma->vm_start,
vma->vm_pgoff,
vma->vm_end - vma->vm_start,
vma->vm_page_prot
);
if(result)
return -EAGAIN;
vma->vm_ops = &simple_remap_vm_ops;
simple_vma_open(vma);
return 0;
}

void ITP_unallocate(struct device *dev,int minor,int ch)
{
int i,j;
printk("Deallocating Lists\n") ;
if(ch == CH_A)
{
for(i = 0;i0))
{
dma_unmap_sg(dev,dma_list_sgA[minor][i], dma_list_sgcntA[minor][i],DMA_TO_DEVICE);

kfree(dma_list_sgA[minor][i]);
dma_list_sgA[minor][i]= NULL;

dma_list_sgcntA[minor][i] = 0 ;
}
if(dma_mem_sgA[minor] && (dma_mem_sgcntA[minor][i]>0))
{
dma_unmap_sg(dev,dma_mem_sgA[minor][i], dma_mem_sgcntA[minor][i],DMA_FROM_DEVICE);
kfree(dma_mem_sgA[minor][i]);
dma_mem_sgA[minor][i] = NULL;

dma_mem_sgcntA[minor][i] = 0 ;
}

if (dma_coherent_virtA[i] != NULL && dma_coherent_handleA[i] != 0)
{
dma_free_coherent(gDev[minor], DMA_LIST_SIZE, dma_coherent_virtA[i], dma_coherent_handleA[i]);
dma_coherent_virtA[i] = NULL;
dma_coherent_handleA[i] = 0;
}

if(pages_list_sgA[i])
{
for(j =0;jd_inode->i_rdev );
minor = g_iMinor_No;
printk(" In IOCTL With Comand %X ",command) ;
if(minor == 0)
{
if(command == DRIVER_VERSION)
{
strcpy(stVersion.date,VERSION_DATE);
stVersion.version = VERSION;
retVal = copy_to_user((ITP_VERSION *)argument,&stVersion,sizeof(ITP_VERSION));
return 0;
}
TotalCard = count;
retVal = copy_to_user((int*)argument,&TotalCard ,sizeof(int));
}
else
minor = minor -1;
switch(command)
{

case ITP_TOTAL_DEVICES:
TotalCard = count;
retVal = copy_to_user((int*)argument,&TotalCard ,sizeof(int));
break;
7       case ITP_GET_PHYSICAL_ADDR :
retVal = copy_from_user(&ulData,(PULONG)argument,sizeof(ULONG));
iData  = (int)ulData;
ulData = (ULONG)BaseAddress[iData];
retVal = copy_to_user((PULONG)argument,&ulData ,sizeof(ULONG));
break;

case ITP_LIST_HD_CNT_A:

retVal = copy_to_user((int*)argument,&chBuffId[0] ,sizeof(UCHAR));
break ;
case ITP_LIST_HD_CNT_B:

retVal = copy_to_user((int*)argument,&chBuffId[1] ,sizeof(UCHAR));
break ;
case ITP_STOP_DEV_A:
bDevCloseStatusA[minor] = 1 ;
printk("Stop Device invoked in CHA \n") ;
break ;
case ITP_STOP_DEV_B:
bDevCloseStatusB[minor] = 1 ;
printk("Stop Device invoked in CHB \n") ;
break ;
default :
break;
}
return 0;
}

struct file_operations ITP_fops = {
read    :       ITP_read,
write   :       ITP_write,
open    :       ITP_open,
release :       ITP_close,
unlocked_ioctl   :       ITP_control,
mmap    :       ITP_mmap,
owner   :       THIS_MODULE,
};

static struct pci_driver ITP_driver = {
.name     = "PCIITP",
.id_table = ITP,
.probe    = probe,
//.remove   = __devexit_p(remove),
};
int __init ITP_Init(void)
{
int status,i;
printk("/********** PciITP Module Init********************************/\n");
status = pci_register_driver(&ITP_driver);
if(status >= 0)
{
printk("Pci registeraion succeeded\n");
}
else
{
printk("Pci registeraion Failed\n");
}
cdev_init(&Struct_ITP,&ITP_fops);
Struct_ITP.owner = THIS_MODULE;
Struct_ITP.ops = &ITP_fops;
i = 0;
for(i = 0;i

Quick Reply

Change Text Case: 
   
  • Similar Topics
    Replies
    Views
    Last post