sprd_pcie: remove package in replace of kernel buildin

This commit is contained in:
coolsnowwolf 2024-10-30 11:18:34 +08:00
parent b926a6f9cb
commit 2d97f3db67
56 changed files with 0 additions and 24320 deletions

View File

@ -1,46 +0,0 @@
#
# Copyright (C) 2015 OpenWrt.org
#
# This is free software, licensed under the GNU General Public License v2.
# See /LICENSE for more information.
#
include $(TOPDIR)/rules.mk
PKG_NAME:=sprd_pcie
PKG_VERSION:=1.6
PKG_RELEASE:=1
include $(INCLUDE_DIR)/kernel.mk
include $(INCLUDE_DIR)/package.mk
define KernelPackage/sprd_pcie
SUBMENU:=WWAN Support
TITLE:=Kernel pcie driver for SPRD device
FILES:=$(PKG_BUILD_DIR)/sprd_pcie.ko
AUTOLOAD:=$(call AutoLoad,41,sprd_pcie)
endef
define KernelPackage/sprd_pcie/description
Kernel module for register a custom pcispd platform device.
endef
MAKE_OPTS:= \
ARCH="$(LINUX_KARCH)" \
CROSS_COMPILE="$(TARGET_CROSS)" \
CXXFLAGS="$(TARGET_CXXFLAGS)" \
M="$(PKG_BUILD_DIR)" \
$(EXTRA_KCONFIG)
define Build/Prepare
mkdir -p $(PKG_BUILD_DIR)
$(CP) ./src/* $(PKG_BUILD_DIR)/
endef
define Build/Compile
$(MAKE) -C "$(LINUX_DIR)" \
$(MAKE_OPTS) \
modules
endef
$(eval $(call KernelPackage,sprd_pcie))

View File

@ -1,33 +0,0 @@
#
# Makefile for the sprd staging modem files
#
EXTRA_CFLAGS += -Wno-error -Wno-packed-bitfield-compat
ccflags-y += -DCONFIG_SPRD_PCIE_EP_DEVICE -DCONFIG_SPRD_SIPA -DCONFIG_SPRD_ETHERNET
obj-m += sprd_pcie.o
sprd_pcie-objs := pcie/sprd_pcie_ep_device.o pcie/pcie_host_resource.o pcie/sprd_pcie_quirks.o sipc/sipc.o sipc/sblock.o sipc/sbuf.o \
sipc/sipc_debugfs.o sipc/smem.o sipc/smsg.o sipc/spipe.o sipc/spool.o power_manager/power_manager.o \
sipa/sipa_core.o sipa/sipa_eth.o sipa/sipa_nic.o sipa/sipa_skb_send.o sipa/sipa_skb_recv.o sipa/sipa_dummy.o sipa/sipa_debugfs.o sipa/sipa_dele_cmn.o \
sipa/sipa_phy_v0/sipa_fifo_irq_hal.o sipa/sipa_phy_v0/sipa_common_fifo_hal.o
PWD := $(shell pwd)
ifeq ($(ARCH),)
ARCH := $(shell uname -m)
endif
ifeq ($(CROSS_COMPILE),)
CROSS_COMPILE :=
endif
ifeq ($(KDIR),)
KDIR := /lib/modules/$(shell uname -r)/build
endif
sprd_pcie: clean
$(MAKE) ARCH=${ARCH} CROSS_COMPILE=${CROSS_COMPILE} -C $(KDIR) M=$(PWD) modules
#cp sprd_pcie.ko /tftpboot/
clean:
$(MAKE) ARCH=${ARCH} CROSS_COMPILE=${CROSS_COMPILE} -C $(KDIR) M=$(PWD) clean
find . -name *.o.ur-safe | xargs rm -f
install: sprd_pcie
sudo cp sprd_pcie.ko /lib/modules/${shell uname -r}/kernel/drivers/pci/
sudo depmod

View File

@ -1,31 +0,0 @@
#ifndef _MDM_CTRL_H
#define _MDM_CTRL_H
/*
* For mcd driver,it offer modem_ctrl_send_abnormal_to_ap
* function for others. It means you can use this function to notify ap,
* some errors has been catched,by this way,ap will triger this error
* and to do something for recovery.
*/
#include <linux/notifier.h>
enum {
MDM_CTRL_POWER_OFF = 0,
MDM_CTRL_POWER_ON,
MDM_CTRL_WARM_RESET,
MDM_CTRL_COLD_RESET,
MDM_WATCHDOG_RESET,
MDM_ASSERT,
MDM_PANIC,
MDM_CTRL_PCIE_RECOVERY,
MDM_POWER_OFF,
MDM_CTRL_SET_CFG
};
void modem_ctrl_send_abnormal_to_ap(int status);
void modem_ctrl_poweron_modem(int on);
void modem_ctrl_enable_cp_event(void);
int modem_ctrl_register_notifier(struct notifier_block *nb);
void modem_ctrl_unregister_notifier(struct notifier_block *nb);
#endif

View File

@ -1,49 +0,0 @@
#ifndef _PCIE_RC_SPRD_H
#define _PCIE_RC_SPRD_H
#include <linux/platform_device.h>
enum sprd_pcie_event {
SPRD_PCIE_EVENT_INVALID = 0,
SPRD_PCIE_EVENT_LINKDOWN = 0x1,
SPRD_PCIE_EVENT_LINKUP = 0x2,
SPRD_PCIE_EVENT_WAKEUP = 0x4,
};
struct sprd_pcie_register_event {
u32 events;
struct platform_device *pdev;
void (*callback)(enum sprd_pcie_event event, void *data);
void *data;
};
/*
* SPRD PCIe root complex (e.g. UD710 SoC) can't support PCI hotplug
* capability. Therefore, the standard hotplug driver can't be used.
*
* Whenever one endpoint is plugged or powered on, the EP driver must
* call sprd_pcie_configure_device() in order to add EP device to system
* and probe EP driver. If one endpoint is unplugged or powered off,
* the EP driver must call sprd_pcie_unconfigure_device() in order to
* remove all PCI devices on PCI bus.
*
* return 0 on success, otherwise return a negative number.
*/
/* dumy sprd api */
static inline int sprd_pcie_configure_device(struct platform_device *pdev) { return 0; }
static inline int sprd_pcie_unconfigure_device(struct platform_device *pdev) { return 0; }
static inline void sprd_pcie_teardown_msi_irq(unsigned int irq) { }
static inline void sprd_pcie_dump_rc_regs(struct platform_device *pdev) { }
static inline int sprd_pcie_register_event(struct sprd_pcie_register_event *reg) { return 0; }
static inline int sprd_pcie_deregister_event(struct sprd_pcie_register_event *reg) { return 0; }
#ifdef CONFIG_SPRD_PCIE_AER
void sprd_pcie_alloc_irq_vectors(struct pci_dev *dev, int *irqs, int services) { }
#else
static inline void sprd_pcie_alloc_irq_vectors(struct pci_dev *dev, int *irqs,
int services)
{
}
#endif
#endif

View File

@ -1,59 +0,0 @@
#ifndef _SIPA_H_
#define _SIPA_H_
#include <linux/skbuff.h>
#include <linux/types.h>
#include <linux/if_ether.h>
enum sipa_evt_type {
SIPA_RECEIVE,
SIPA_ENTER_FLOWCTRL,
SIPA_LEAVE_FLOWCTRL,
SIPA_ERROR,
};
typedef void (*sipa_notify_cb)(void *priv, enum sipa_evt_type evt,
unsigned int data);
enum sipa_term_type {
SIPA_TERM_PCIE0 = 0x10,
SIPA_TERM_PCIE1 = 0x11,
SIPA_TERM_PCIE2 = 0x12,
SIPA_TERM_CP0 = 0x4,
SIPA_TERM_CP1 = 0x5,
SIPA_TERM_VCP = 0x6,
SIPA_TERM_MAX = 0x20, /* max 5-bit register */
};
enum sipa_nic_id {
SIPA_NIC_BB0,
SIPA_NIC_BB1,
SIPA_NIC_BB2,
SIPA_NIC_BB3,
SIPA_NIC_BB4,
SIPA_NIC_BB5,
SIPA_NIC_BB6,
SIPA_NIC_BB7,
SIPA_NIC_BB8,
SIPA_NIC_BB9,
SIPA_NIC_BB10,
SIPA_NIC_BB11,
SIPA_NIC_MAX,
};
struct sk_buff *sipa_recv_skb(int *netid, int index);
bool sipa_check_recv_tx_fifo_empty(void);
int sipa_nic_open(enum sipa_term_type src, int netid,
sipa_notify_cb cb, void *priv);
void sipa_nic_close(enum sipa_nic_id nic_id);
int sipa_nic_tx(enum sipa_nic_id nic_id, enum sipa_term_type dst,
int netid, struct sk_buff *skb);
int sipa_nic_rx(int *netid, struct sk_buff **out_skb, int index);
int sipa_nic_rx_has_data(enum sipa_nic_id nic_id);
int sipa_nic_trigger_flow_ctrl_work(enum sipa_nic_id nic_id, int err);
u32 sipa_nic_get_filled_num(void);
void sipa_nic_restore_irq(void);
void sipa_nic_set_tx_fifo_rp(u32 rptr);
#endif

File diff suppressed because it is too large Load Diff

View File

@ -1,85 +0,0 @@
/*
* Copyright (C) 2019 Spreadtrum Communications Inc.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef __SIPC_BIG_TO_LITTLE_H
#define __SIPC_BIG_TO_LITTLE_H
//#define CONFIG_SIPC_BIG_TO_LITTLE /* sipc little */
#define BL_READB(addr) \
({ unsigned char __v = (*(volatile unsigned char *) (addr)); __v; })
#define BL_WRITEB(b,addr) (void)((*(volatile unsigned int *) (addr)) = (b))
#define BL_GETB(v) ((v))
#define BL_SETB(v, b) ((v) = (b))
#ifdef CONFIG_SIPC_BIG_TO_LITTLE
/* little 0x78563412
0x12
0x34
0x56
0x78
read:
big: 0x12345678==>0x78563412
write: 0x78563412 ===> 0x12345678*/
#define BL_READW(addr) \
({ unsigned short __t = (*(volatile unsigned short *) (addr)); \
unsigned short __v = ((__t & 0x00ff) << 8) + ((__t & 0xff00) >> 8); \
__v; })
#define BL_READL(addr) \
({ unsigned int __t = (*(volatile unsigned int *) (addr)); \
unsigned int __v = ((__t & 0x000000ff) << 24) + ((__t & 0x0000ff00) << 8) + \
((__t & 0x00ff0000) >> 8) + ((__t & 0xff000000) >> 24); \
__v; })
#define BL_WRITEW(b,addr) \
({ unsigned short __v = (((b) & 0x00ff) << 8) + (((b) & 0xff00) >> 8); \
(*(volatile unsigned short *) (addr)) = __v; })
#define BL_WRITEL(b,addr) \
({ unsigned int __v = (((b) & 0x000000ff) << 24) + (((b) & 0xff00) >> 8) + \
(((b) & 0x00ff0000) >> 8) + (((b) & 0xff000000) >> 24); \
(*(volatile unsigned int *) (addr)) = __v; })
#define BL_GETL(v) \
({unsigned int __v = (((v) & 0x000000ff) << 24) + (((v) & 0x0000ff00) << 8) + \
(((v) & 0x00ff0000) >> 8) + (((v) & 0xff000000) >> 24); \
__v; })
#define BL_SETL(v, b) \
((v) = (((b) & 0x000000ff) << 24) + (((b) & 0x0000ff00) << 8) + \
(((b) & 0x00ff0000) >> 8) + (((b) & 0xff000000) >> 24))
#define BL_GETW(v) \
({unsigned int __v = (((v) & 0x00ff) << 8) + (((v) & 0xff00) >> 8); \
__v; })
#define BL_SETW(v, b) \
((v) = (((b) & 0x00ff) << 8) + (((b) & 0xff00) >> 8))
#else
#define BL_GETW(v) v
#define BL_GETL(v) v
#define BL_SETW(v, b) ((v) = (b))
#define BL_SETL(v, b) ((v) = (b))
#define BL_READW(addr) \
({ unsigned short __v = (*(volatile unsigned short *) (addr)); __v; })
#define BL_READL(addr) \
({ unsigned int __v = (*(volatile unsigned int *) (addr)); __v; })
#define BL_WRITEW(b,addr) (void)((*(volatile unsigned short *) (addr)) = (b))
#define BL_WRITEL(b,addr) (void)((*(volatile unsigned int *) (addr)) = (b))
#endif
#endif

View File

@ -1,184 +0,0 @@
/*
* Copyright (C) 2019 Spreadtrum Communications Inc.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
/* MPM: modem power manger
* PMS: power manage source which be used to request
* a modem power manage resource.
*/
#ifndef _SPRD_MPM_H
#define _SPRD_MPM_H
/*
* MPM modem powermanger source state define,
* if in idle state, we can release
* the related resources(such as pcie) of modem.
*/
enum {
SPRD_MPM_IDLE = 0,
SPRD_MPM_BUSY
};
/*
* @sprd_pms: the power manager source data struct,
* can usd it to request wake lock or request modem resource.
*
* @name: the name of a pms.
* @data: the point of MPM.
* @multitask: whether to support multitasking, default is false.
* false, the source can only be used in single task context.
* true, the source can be used multitask context.
* @awake: whether stay awake.
* @awake_cnt: total awake times.
* @pre_awake_cnt pre_awake_cnt.
* @active_cnt: the active counter of the pms.
* @expires: the timer expires value.
* @active_lock: use for protect the active_cnt member.
* @expires_lock: use for protect expires member.
* @entry: an entry of all pms list.
* @wake_timer: used for delay release wakelock.
*/
struct sprd_pms {
const char *name;
void *data;
bool multitask;
bool awake;
unsigned int awake_cnt;
unsigned int pre_awake_cnt;
unsigned int active_cnt;
unsigned long expires;
spinlock_t active_lock;
spinlock_t expires_lock;
struct list_head entry;
struct timer_list wake_timer;
};
/**
* sprd_mpm_create - create a modem powermanger source instacnce.
*
* @dst, which mpm (PSCP, SP, WCN, etc.) will be created.
* @later_idle, will release resource later (in ms).
*/
int sprd_mpm_create(unsigned int dst,
const char *name,
unsigned int later_idle);
/**
* sprd_mpm_init_resource_ops - int resource ops for mpm.
*
* @wait_resource, used to wait request resource ready.
* @request_resource, used to request a resource
* @release_resource, used to release a resource
*/
int sprd_mpm_init_resource_ops(unsigned int dst,
int (*wait_resource)(unsigned int dst,
int timeout),
int (*request_resource)(unsigned int dst),
int (*release_resource)(unsigned int dst));
/**
* sprd_mpm_destroy - destroy a modem powermanger source instacnce.
*
* @dst, which mpm (PSCP, SP, WCN, etc.) will be destroyed.
*/
int sprd_mpm_destroy(unsigned int dst);
/**
* sprd_pms_create - init a pms,
* a module which used it to request a modem power manage resource.
* All the pms interface are not safe in multi-thread or multi-cpu.
* if you want use in multi-thread, please use the pms_ext interface.
*
* @dst, the pms belong to which mpm.
* @name, the name of this pms.
* @pms, the point of this pms.
* @multitask: support multitask.
*
* Returns: NULL failed, > 0 succ.
*/
struct sprd_pms *sprd_pms_create(unsigned int dst,
const char *name, bool multitask);
/**
* sprd_pms_destroy - destroy a pms.
*
* @pms, the point of this pms.
*/
void sprd_pms_destroy(struct sprd_pms *pms);
/**
* sprd_pms_request_resource - request mpm resource
*
* @pms, the point of this pms.
* @timeout, in ms.
*
* Returns:
* 0 resource ready,
* < 0 resoure not ready,
* -%ERESTARTSYS if it was interrupted by a signal.
*/
int sprd_pms_request_resource(struct sprd_pms *pms, int timeout);
/**
* sprd_pms_release_resource - release mpm resource.
*
* @pms, the point of this pms.
*/
void sprd_pms_release_resource(struct sprd_pms *pms);
/**
* sprd_pms_request_wakelock - request wakelock
*
* @pms, the point of this pms.
*/
void sprd_pms_request_wakelock(struct sprd_pms *pms);
/**
* sprd_pms_release_wakelock - release wakelock
*
* @pms, the point of this pms.
*/
void sprd_pms_release_wakelock(struct sprd_pms *pms);
/**
* sprd_pms_request_wakelock_period -
* request wake lock, and will auto reaslse in msec ms.
*
* @pms, the point of this pms.
* @msec, will auto reaslse in msec ms
*/
void sprd_pms_request_wakelock_period(struct sprd_pms *pms, unsigned int msec);
/**
* sprd_pms_release_wakelock_later - release wakelock later.
*
* @pms, the point of this pms.
* @msec, later time (in ms).
*/
void sprd_pms_release_wakelock_later(struct sprd_pms *pms,
unsigned int msec);
/**
* sprd_pms_power_up - just powe up, not wait result.
*
* @pms, the point of this pms.
*/
void sprd_pms_power_up(struct sprd_pms *pms);
/**
* sprd_pms_power_up - just power down,.
*
* @pms, the point of this pms.
* @immediately, whether immediately power down.
*/
void sprd_pms_power_down(struct sprd_pms *pms, bool immediately);
#endif

View File

@ -1,99 +0,0 @@
/**
* SPRD ep device driver in host side for Spreadtrum SoCs
*
* Copyright (C) 2019 Spreadtrum Co., Ltd.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 of
* the License as published by the Free Software Foundation.
*
* This program is used to control ep device driver in host side for
* Spreadtrum SoCs.
*/
#ifndef __SPRD_PCIE_EP_DEVICE_H
#define __SPRD_PCIE_EP_DEVICE_H
#include <linux/interrupt.h>
/* host receive msi irq */
enum {
PCIE_MSI_SIPC_IRQ = 0,
PCIE_MSI_REQUEST_RES,
PCIE_MSI_EP_READY_FOR_RESCAN,
PCIE_MSI_RELEASE_RES,
PCIE_MSI_SCANNED_RESPOND,
PCIE_MSI_REMOVE_RESPOND,
PCIE_MSI_IPA,
PCIE_MSI_MAX_IRQ
};
/* host send doorbell irq */
enum {
PCIE_DBELL_SIPC_IRQ = 0,
PCIE_DBEL_EP_SCANNED,
PCIE_DBEL_EP_REMOVING,
PCIE_DBEL_IRQ_MAX
};
enum {
PCIE_EP_MODEM = 0,
/* PCIE_EP_WCN, */
PCIE_EP_NR
};
enum {
PCIE_EP_PROBE = 0,
PCIE_EP_REMOVE,
PCIE_EP_PROBE_BEFORE_SPLIT_BAR
};
#ifdef CONFIG_SPRD_SIPA
enum {
PCIE_IPA_TYPE_MEM = 0,
PCIE_IPA_TYPE_REG
};
#endif
#define MINI_REGION_SIZE 0x10000 /*64 K default */
int sprd_ep_dev_register_notify(int ep,
void (*notify)(int event, void *data),
void *data);
int sprd_ep_dev_unregister_notify(int ep);
int sprd_ep_dev_register_irq_handler(int ep,
int irq,
irq_handler_t handler,
void *data);
int sprd_ep_dev_unregister_irq_handler(int ep, int irq);
int sprd_ep_dev_register_irq_handler_ex(int ep,
int from_irq,
int to_irq,
irq_handler_t handler,
void *data);
int sprd_ep_dev_unregister_irq_handler_ex(int ep,
int from_irq,
int to_irq);
int sprd_ep_dev_set_irq_addr(int ep, void __iomem *irq_addr);
int sprd_ep_dev_raise_irq(int ep, int irq);
int sprd_ep_dev_clear_doolbell_irq(int ep, int irq);
int sprd_ep_dev_set_backup(int ep);
int sprd_ep_dev_clear_backup(int ep);
void __iomem *sprd_ep_map_memory(int ep,
phys_addr_t cpu_addr,
size_t size);
void sprd_ep_unmap_memory(int ep, const void __iomem *bar_addr);
int sprd_ep_dev_pass_smem(int ep, u32 base, u32 size);
int sipa_module_init(struct device *dev);
void sipa_module_exit(void);
int sipa_eth_init(void);
void sipa_eth_exit(void);
int sipa_dummy_init(void);
void sipa_dummy_exit(void);
#ifdef CONFIG_SPRD_SIPA
phys_addr_t sprd_ep_ipa_map(int type, phys_addr_t target_addr, size_t size);
int sprd_ep_ipa_unmap(int type, phys_addr_t cpu_addr);
#endif
#endif

View File

@ -1,107 +0,0 @@
/*
* Copyright (C) 2019 Spreadtrum Communications Inc.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
/* mpms: modem powermanger source */
#ifndef _SPRD_PCIE_RESOURCE_H
#define _SPRD_PCIE_RESOURCE_H
#ifdef CONFIG_SPRD_PCIE_EP_DEVICE
#include <linux/platform_device.h>
#endif
#if 0
//#undef pr_debug
//#define pr_debug pr_emerg
#undef pr_info
#define pr_info pr_emerg
#undef pr_err
#define pr_err pr_emerg
#undef dev_dbg
#define dev_dbg dev_emerg
#undef dev_info
#define dev_info dev_emerg
#undef dev_err
#define dev_err dev_emerg
#endif
#if defined(CONFIG_SPRD_PCIE_EP_DEVICE) || defined(CONFIG_PCIE_EPF_SPRD)
/*
* sprd_pcie_wait_resource
* Returns:
* 0 resource ready,
* < 0 resoure not ready,
* -%ERESTARTSYS if it was interrupted by a signal.
*/
int sprd_pcie_wait_resource(u32 dst, int timeout);
int sprd_pcie_request_resource(u32 dst);
int sprd_pcie_release_resource(u32 dst);
int sprd_pcie_resource_trash(u32 dst);
bool sprd_pcie_is_defective_chip(void);
#else
/* dummy functions */
static inline int sprd_pcie_wait_resource(u32 dst, int timeout) {return 0; }
static inline int sprd_pcie_request_resource(u32 dst) {return 0; }
static inline int sprd_pcie_release_resource(u32 dst) {return 0; }
static inline int sprd_pcie_resource_trash(u32 dst) {return 0; }
static inline bool sprd_pcie_is_defective_chip(void) {return false; }
#endif
#ifdef CONFIG_PCIE_EPF_SPRD
int sprd_pcie_resource_client_init(u32 dst, u32 ep_fun);
int sprd_register_pcie_resource_first_ready(u32 dst,
void (*notify)(void *p),
void *data);
#endif
#ifdef CONFIG_SPRD_PCIE_EP_DEVICE
int sprd_pcie_resource_host_init(u32 dst, u32 ep_dev,
struct platform_device *pcie_dev);
/*
* sprd_pcie_resource_reboot_ep
* reboot ep contains rescan ep device.
*/
void sprd_pcie_resource_reboot_ep(u32 dst);
/*
* sprd_pcie_wait_load_resource
* In case of the open the feature CONFIG_PCIE_SPRD_SPLIT_BAR,
* It has 2 times pcie scan action in host side boot process.
* After the first scan, the ep only have 2 bar can be used for
* memory map, the pcie resource is not completely ready,
* but the host can load images for ep, so we add the special api
* sprd_pcie_wait_load_resource, this api will return after
* the first scan action.
* Returns:
* 0 resource ready,
* < 0 resoure not ready,
* -%ERESTARTSYS if it was interrupted by a signal.
*/
int sprd_pcie_wait_load_resource(u32 dst);
/* Because the ep bar can only be split by ep itself,
* After all modem images be loaded, notify pcie resource
* can rescan ep now.
*/
void sprd_pcie_resource_notify_load_done(u32 dst);
#endif /* CONFIG_SPRD_PCIE_EP_DEVICE */
#endif /* _SPRD_PCIE_RESOURCE_H */

View File

@ -1,7 +0,0 @@
config SPRD_MCD
tristate "SPRD modem power control module"
default n
help
mcd is a module for spreadtrum AP/CP communicaiton control driver,
it can control modem power on/off,triger modem event of assert,watchdog
reset,panic.

View File

@ -1 +0,0 @@
obj-y += modem_ctrl.o

View File

@ -1,814 +0,0 @@
/*
* Copyright (C) 2019 Spreadtrum Communications Inc.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/of_device.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/cdev.h>
#include <linux/platform_device.h>
#include <linux/interrupt.h>
#include <linux/sched.h>
#include <linux/kthread.h>
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/uaccess.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
#include <linux/gpio/consumer.h>
#include <linux/reboot.h>
#ifdef CONFIG_PCIE_PM_NOTIFY
#include <linux/pcie_notifier.h>
#endif
#include "../include/sprd_pcie_resource.h"
#include "../include/sipc.h"
#include "../include/mdm_ctrl.h"
enum {
ROC1_SOC = 0,
ORCA_SOC
};
static char *const mdm_stat[] = {
"mdm_power_off", "mdm_power_on", "mdm_warm_reset", "mdm_cold_reset",
"mdm_watchdog_reset", "mdm_assert", "mdm_panic"
};
#define REBOOT_MODEM_DELAY 1000
#define POWERREST_MODEM_DELAY 2000
#define RESET_MODEM_DELAY 50
char cdev_name[] = "mdm_ctrl";
struct modem_ctrl_init_data {
char *name;
struct gpio_desc *gpio_poweron; /* Poweron */
struct gpio_desc *gpio_reset; /* Reset modem */
struct gpio_desc *gpio_preset; /* Pcie reset */
struct gpio_desc *gpio_cpwatchdog;
struct gpio_desc *gpio_cpassert;
struct gpio_desc *gpio_cppanic;
struct gpio_desc *gpio_cppoweroff;
u32 irq_cpwatchdog;
u32 irq_cpassert;
u32 irq_cppanic;
u32 irq_cppoweroff;
u32 modem_status;
bool enable_cp_event;
};
struct modem_ctrl_device {
struct modem_ctrl_init_data *init;
int major;
int minor;
struct cdev cdev;
struct device *dev;
int soc_type;
};
static struct class *modem_ctrl_class;
static struct modem_ctrl_device *mcd_dev;
/* modem control evnet notify */
static ATOMIC_NOTIFIER_HEAD(modem_ctrl_chain);
int modem_ctrl_register_notifier(struct notifier_block *nb)
{
return atomic_notifier_chain_register(&modem_ctrl_chain, nb);
}
EXPORT_SYMBOL(modem_ctrl_register_notifier);
void modem_ctrl_unregister_notifier(struct notifier_block *nb)
{
atomic_notifier_chain_unregister(&modem_ctrl_chain, nb);
}
EXPORT_SYMBOL(modem_ctrl_unregister_notifier);
static void send_event_msg(struct kobject *kobj)
{
char *msg[3];
char buff[100];
char mbuff[100];
memset(mbuff, 0, sizeof(mbuff));
if (!mcd_dev || !mcd_dev->init || !kobj)
return;
snprintf(buff, sizeof(buff), "MODEM_STAT=%d",
mcd_dev->init->modem_status);
snprintf(mbuff, sizeof(mbuff), "MODEM_EVENT=%s",
mdm_stat[mcd_dev->init->modem_status]);
msg[0] = buff;
msg[1] = mbuff;
msg[2] = NULL;
kobject_uevent_env(kobj, KOBJ_CHANGE, msg);
dev_dbg(mcd_dev->dev, "send uevent to userspace\n");
}
static irqreturn_t cpwatchdogtriger_handler(int irq, void *dev_id)
{
if (!mcd_dev || !mcd_dev->init || !mcd_dev->init->enable_cp_event)
return IRQ_NONE;
mcd_dev->init->modem_status = MDM_WATCHDOG_RESET;
atomic_notifier_call_chain(&modem_ctrl_chain, MDM_WATCHDOG_RESET, NULL);
send_event_msg(&mcd_dev->dev->kobj);
return IRQ_HANDLED;
}
static irqreturn_t cpasserttriger_handler(int irq, void *dev_id)
{
if (!mcd_dev || !mcd_dev->init || !mcd_dev->init->enable_cp_event)
return IRQ_NONE;
mcd_dev->init->modem_status = MDM_ASSERT;
atomic_notifier_call_chain(&modem_ctrl_chain, MDM_ASSERT, NULL);
send_event_msg(&mcd_dev->dev->kobj);
return IRQ_HANDLED;
}
static irqreturn_t cppanictriger_handler(int irq, void *dev_id)
{
if (!mcd_dev || !mcd_dev->init || !mcd_dev->init->enable_cp_event)
return IRQ_NONE;
mcd_dev->init->modem_status = MDM_PANIC;
atomic_notifier_call_chain(&modem_ctrl_chain, MDM_PANIC, NULL);
send_event_msg(&mcd_dev->dev->kobj);
return IRQ_HANDLED;
}
static irqreturn_t cppoweroff_handler(int irq, void *dev_id)
{
if (!mcd_dev || !mcd_dev->init)
return IRQ_NONE;
/* To this reserve here for receve power off event from AP*/
atomic_notifier_call_chain(&modem_ctrl_chain,
MDM_POWER_OFF, NULL);
kernel_power_off();
return IRQ_HANDLED;
}
static int request_gpio_to_irq(struct gpio_desc *cp_gpio,
struct modem_ctrl_device *mcd_dev)
{
int ret = 0;
if (!mcd_dev || !mcd_dev->init)
return -EINVAL;
ret = gpiod_to_irq(cp_gpio);
if (ret < 0) {
dev_err(mcd_dev->dev, "requset irq %d failed\n", ret);
return ret;
}
dev_dbg(mcd_dev->dev, "gpio to irq %d\n", ret);
if (cp_gpio == mcd_dev->init->gpio_cpwatchdog) {
mcd_dev->init->irq_cpwatchdog = ret;
ret = devm_request_threaded_irq(mcd_dev->dev,
mcd_dev->init->irq_cpwatchdog,
NULL, cpwatchdogtriger_handler,
IRQF_ONESHOT | IRQF_TRIGGER_FALLING,
"cpwatchdog_irq", mcd_dev);
if (ret < 0) {
dev_err(mcd_dev->dev, "can not request irq for cp watchdog\n");
return ret;
}
enable_irq_wake(mcd_dev->init->irq_cpwatchdog);
} else if (cp_gpio == mcd_dev->init->gpio_cpassert) {
mcd_dev->init->irq_cpassert = ret;
ret = devm_request_threaded_irq(mcd_dev->dev,
mcd_dev->init->irq_cpassert,
NULL, cpasserttriger_handler,
IRQF_ONESHOT | IRQF_TRIGGER_FALLING,
"cpassert_irq", mcd_dev);
if (ret < 0) {
dev_err(mcd_dev->dev, "can not request irq for cp assert\n");
return ret;
}
enable_irq_wake(mcd_dev->init->irq_cpassert);
} else if (cp_gpio == mcd_dev->init->gpio_cppanic) {
mcd_dev->init->irq_cppanic = ret;
ret = devm_request_threaded_irq(mcd_dev->dev,
mcd_dev->init->irq_cppanic,
NULL, cppanictriger_handler,
IRQF_ONESHOT | IRQF_TRIGGER_FALLING,
"cppanic_irq", mcd_dev);
if (ret < 0) {
dev_err(mcd_dev->dev,
"can not request irq for panic\n");
return ret;
}
enable_irq_wake(mcd_dev->init->irq_cppanic);
} else if (cp_gpio == mcd_dev->init->gpio_cppoweroff) {
mcd_dev->init->irq_cppoweroff = ret;
ret = devm_request_threaded_irq(mcd_dev->dev,
mcd_dev->init->irq_cppoweroff,
NULL, cppoweroff_handler,
IRQF_ONESHOT | IRQF_TRIGGER_LOW,
"cppoweroff_irq", mcd_dev);
if (ret < 0) {
dev_err(mcd_dev->dev,
"can not request irq for cppoweroff\n");
return ret;
}
enable_irq_wake(mcd_dev->init->irq_cppoweroff);
}
return 0;
}
static int modem_gpios_init(struct modem_ctrl_device *mcd_dev, int soc_type)
{
int ret;
if (!mcd_dev || !mcd_dev->init)
return -EINVAL;
if (soc_type == ROC1_SOC) {
gpiod_direction_input(mcd_dev->init->gpio_cpwatchdog);
gpiod_direction_input(mcd_dev->init->gpio_cpassert);
gpiod_direction_input(mcd_dev->init->gpio_cppanic);
ret = request_gpio_to_irq(mcd_dev->init->gpio_cpwatchdog,
mcd_dev);
if (ret)
return ret;
ret = request_gpio_to_irq(mcd_dev->init->gpio_cpassert,
mcd_dev);
if (ret)
return ret;
ret = request_gpio_to_irq(mcd_dev->init->gpio_cppanic,
mcd_dev);
if (ret)
return ret;
/* IRQF_TRIGGER_LOW, default must set to high */
gpiod_set_value_cansleep(mcd_dev->init->gpio_cppoweroff, 1);
} else {
gpiod_direction_input(mcd_dev->init->gpio_cppoweroff);
ret = request_gpio_to_irq(mcd_dev->init->gpio_cppoweroff,
mcd_dev);
if (ret)
return ret;
/* TRIGGER_FALLING, defaultmust set to high */
gpiod_set_value_cansleep(mcd_dev->init->gpio_cpwatchdog, 1);
gpiod_set_value_cansleep(mcd_dev->init->gpio_cpassert, 1);
gpiod_set_value_cansleep(mcd_dev->init->gpio_cppanic, 1);
}
return 0;
}
void modem_ctrl_enable_cp_event(void)
{
if (mcd_dev && mcd_dev->init)
mcd_dev->init->enable_cp_event = true;
}
EXPORT_SYMBOL_GPL(modem_ctrl_enable_cp_event);
void modem_ctrl_send_abnormal_to_ap(int status)
{
struct gpio_desc *gpiodesc;
if (!mcd_dev || !mcd_dev->init)
return;
if (mcd_dev->soc_type != ORCA_SOC) {
dev_err(mcd_dev->dev, "operation not be allowed for %d\n",
mcd_dev->soc_type);
return;
}
switch (status) {
case MDM_WATCHDOG_RESET:
gpiodesc = mcd_dev->init->gpio_cpwatchdog;
break;
case MDM_ASSERT:
gpiodesc = mcd_dev->init->gpio_cpassert;
break;
case MDM_PANIC:
gpiodesc = mcd_dev->init->gpio_cppanic;
break;
default:
dev_info(mcd_dev->dev,
"get status %d is not right for operation\n", status);
return;
}
mcd_dev->init->modem_status = status;
dev_info(mcd_dev->dev,
"operation unnormal status %d send to ap\n",
status);
if (!IS_ERR(gpiodesc))
gpiod_set_value_cansleep(gpiodesc, 0);
}
static void modem_ctrl_send_cmd_to_cp(int status)
{
struct gpio_desc *gpiodesc = NULL;
if (!mcd_dev || !mcd_dev->init)
return;
if (mcd_dev->soc_type != ROC1_SOC) {
dev_err(mcd_dev->dev, "operation not be allowed for %d\n",
mcd_dev->soc_type);
return;
}
if (status == MDM_POWER_OFF)
gpiodesc = mcd_dev->init->gpio_cppoweroff;
mcd_dev->init->modem_status = status;
dev_info(mcd_dev->dev,
"operation cmd %d ms send to cp\n",
status);
if (!IS_ERR(gpiodesc)) {
gpiod_set_value_cansleep(gpiodesc, 0);
msleep(20);
gpiod_set_value_cansleep(gpiodesc, 20);
}
}
static void modem_ctrl_notify_abnormal_status(int status)
{
if (!mcd_dev || !mcd_dev->init)
return;
if (mcd_dev->soc_type != ORCA_SOC) {
dev_err(mcd_dev->dev, "operation not be allowed for %d\n",
mcd_dev->soc_type);
return;
}
if (status < MDM_WATCHDOG_RESET || status > MDM_PANIC) {
dev_err(mcd_dev->dev,
"operation not be allowed for status %d\n", status);
return;
}
modem_ctrl_send_abnormal_to_ap(status);
}
void modem_ctrl_poweron_modem(int on)
{
if (!mcd_dev || !mcd_dev->init)
return;
switch (on) {
case MDM_CTRL_POWER_ON:
if (!IS_ERR(mcd_dev->init->gpio_poweron)) {
atomic_notifier_call_chain(&modem_ctrl_chain,
MDM_CTRL_POWER_ON, NULL);
dev_info(mcd_dev->dev, "set modem_poweron: %d\n", on);
gpiod_set_value_cansleep(mcd_dev->init->gpio_poweron,
1);
/* Base the spec modem boot flow that need to wait 1s */
msleep(REBOOT_MODEM_DELAY);
mcd_dev->init->modem_status = MDM_CTRL_POWER_ON;
gpiod_set_value_cansleep(mcd_dev->init->gpio_poweron,
0);
}
break;
case MDM_CTRL_POWER_OFF:
/*
*To do
*/
break;
case MDM_CTRL_SET_CFG:
/*
*To do
*/
break;
case MDM_CTRL_WARM_RESET:
if (!IS_ERR(mcd_dev->init->gpio_reset)) {
atomic_notifier_call_chain(&modem_ctrl_chain,
MDM_CTRL_WARM_RESET, NULL);
dev_dbg(mcd_dev->dev, "set warm reset: %d\n", on);
gpiod_set_value_cansleep(mcd_dev->init->gpio_reset, 1);
/* Base the spec modem that need to wait 50ms */
msleep(RESET_MODEM_DELAY);
mcd_dev->init->modem_status = MDM_CTRL_WARM_RESET;
gpiod_set_value_cansleep(mcd_dev->init->gpio_reset, 0);
}
break;
case MDM_CTRL_COLD_RESET:
if (!IS_ERR(mcd_dev->init->gpio_poweron)) {
mcd_dev->init->enable_cp_event = false;
atomic_notifier_call_chain(&modem_ctrl_chain,
MDM_CTRL_COLD_RESET, NULL);
dev_info(mcd_dev->dev, "modem_power reset: %d\n", on);
gpiod_set_value_cansleep(mcd_dev->init->gpio_poweron,
1);
/* Base the spec modem boot flow that need to wait 2s */
msleep(POWERREST_MODEM_DELAY);
mcd_dev->init->modem_status = MDM_CTRL_COLD_RESET;
gpiod_set_value_cansleep(mcd_dev->init->gpio_poweron,
0);
}
break;
case MDM_CTRL_PCIE_RECOVERY:
#ifdef CONFIG_PCIE_PM_NOTIFY
pcie_ep_pm_notify(PCIE_EP_POWER_OFF);
/* PCIE poweroff to poweron need 100ms*/
msleep(100);
pcie_ep_pm_notify(PCIE_EP_POWER_ON);
#endif
break;
case MDM_POWER_OFF:
atomic_notifier_call_chain(&modem_ctrl_chain,
MDM_POWER_OFF, NULL);
modem_ctrl_send_cmd_to_cp(MDM_POWER_OFF);
break;
default:
dev_err(mcd_dev->dev, "cmd not support: %d\n", on);
}
}
EXPORT_SYMBOL_GPL(modem_ctrl_poweron_modem);
#if defined(CONFIG_DEBUG_FS)
static int modem_ctrl_debug_show(struct seq_file *m, void *private)
{
dev_dbg(mcd_dev->dev, "%s\n", __func__);
return 0;
}
static int modem_ctrl_debug_open(struct inode *inode, struct file *file)
{
return single_open(file, modem_ctrl_debug_show, inode->i_private);
}
static const struct file_operations modem_ctrl_debug_fops = {
.open = modem_ctrl_debug_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
#endif /* CONFIG_DEBUG_FS */
static int modem_ctrl_open(struct inode *inode, struct file *filp)
{
struct modem_ctrl_device *modem_ctrl;
modem_ctrl = container_of(inode->i_cdev,
struct modem_ctrl_device, cdev);
filp->private_data = modem_ctrl;
dev_dbg(modem_ctrl->dev, "modem_ctrl: %s\n", __func__);
return 0;
}
static int modem_ctrl_release(struct inode *inode, struct file *filp)
{
struct modem_ctrl_device *modem_ctrl;
modem_ctrl = container_of(inode->i_cdev,
struct modem_ctrl_device, cdev);
dev_dbg(modem_ctrl->dev, "modem_ctrl: %s\n", __func__);
return 0;
}
static ssize_t modem_ctrl_read(struct file *filp,
char __user *buf,
size_t count,
loff_t *ppos)
{
char tmpbuf[30];
int r;
struct modem_ctrl_device *mcd_dev = filp->private_data;
if (!mcd_dev || !mcd_dev->init)
return -EINVAL;
r = snprintf(tmpbuf, sizeof(tmpbuf), "%s\n",
mdm_stat[mcd_dev->init->modem_status]);
return simple_read_from_buffer(buf, count, ppos, tmpbuf, r);
}
static ssize_t modem_ctrl_write(struct file *filp,
const char __user *buf,
size_t count, loff_t *ppos)
{
char sbuf[100];
int ret;
u32 mcd_cmd;
struct modem_ctrl_device *mcd_dev = filp->private_data;
if (!mcd_dev)
return -EINVAL;
if (unalign_copy_from_user((void *)sbuf, buf, count)) {
dev_err(mcd_dev->dev, "copy buf %s error\n", buf);
return -EFAULT;
}
dev_dbg(mcd_dev->dev, "get info:%s", sbuf);
sbuf[count - 1] = '\0';
ret = kstrtouint(sbuf, 10, &mcd_cmd);
if (ret) {
dev_err(mcd_dev->dev, "Invalid input!\n");
return ret;
}
if (mcd_dev->soc_type == ROC1_SOC) {
if (mcd_cmd >= MDM_CTRL_POWER_OFF &&
mcd_cmd <= MDM_CTRL_SET_CFG)
modem_ctrl_poweron_modem(mcd_cmd);
else
dev_info(mcd_dev->dev, "cmd not support!\n");
} else {
modem_ctrl_notify_abnormal_status(mcd_cmd);
}
return count;
}
static long modem_ctrl_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg)
{
if (!mcd_dev || mcd_dev->soc_type == ORCA_SOC)
return -EINVAL;
switch (cmd) {
case MDM_CTRL_POWER_OFF:
modem_ctrl_poweron_modem(MDM_CTRL_POWER_OFF);
break;
case MDM_CTRL_POWER_ON:
modem_ctrl_poweron_modem(MDM_CTRL_POWER_ON);
break;
case MDM_CTRL_WARM_RESET:
modem_ctrl_poweron_modem(MDM_CTRL_WARM_RESET);
break;
case MDM_CTRL_COLD_RESET:
modem_ctrl_poweron_modem(MDM_CTRL_COLD_RESET);
break;
case MDM_CTRL_PCIE_RECOVERY:
modem_ctrl_poweron_modem(MDM_CTRL_PCIE_RECOVERY);
break;
case MDM_CTRL_SET_CFG:
break;
default:
return -EINVAL;
}
return 0;
}
static const struct file_operations modem_ctrl_fops = {
.open = modem_ctrl_open,
.release = modem_ctrl_release,
.read = modem_ctrl_read,
.write = modem_ctrl_write,
.unlocked_ioctl = modem_ctrl_ioctl,
.owner = THIS_MODULE,
.llseek = default_llseek,
};
static int modem_ctrl_parse_modem_dt(struct modem_ctrl_init_data **init,
struct device *dev)
{
struct modem_ctrl_init_data *pdata = NULL;
pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata)
return -ENOMEM;
pdata->name = cdev_name;
/* Triger watchdog,assert,panic of orca */
pdata->gpio_cpwatchdog = devm_gpiod_get(dev,
"cpwatchdog",
GPIOD_OUT_HIGH);
if (IS_ERR(pdata->gpio_cpwatchdog))
return PTR_ERR(pdata->gpio_cpwatchdog);
pdata->gpio_cpassert = devm_gpiod_get(dev, "cpassert", GPIOD_OUT_HIGH);
if (IS_ERR(pdata->gpio_cpassert))
return PTR_ERR(pdata->gpio_cpassert);
pdata->gpio_cppanic = devm_gpiod_get(dev, "cppanic", GPIOD_OUT_HIGH);
if (IS_ERR(pdata->gpio_cppanic))
return PTR_ERR(pdata->gpio_cppanic);
pdata->gpio_cppoweroff = devm_gpiod_get(dev, "cppoweroff", GPIOD_IN);
if (IS_ERR(pdata->gpio_cpassert))
return PTR_ERR(pdata->gpio_cppoweroff);
*init = pdata;
return 0;
}
static int modem_ctrl_parse_dt(struct modem_ctrl_init_data **init,
struct device *dev)
{
struct modem_ctrl_init_data *pdata;
pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata)
return -ENOMEM;
pdata->name = cdev_name;
pdata->gpio_poweron = devm_gpiod_get(dev, "poweron", GPIOD_OUT_LOW);
if (IS_ERR(pdata->gpio_poweron))
return PTR_ERR(pdata->gpio_poweron);
pdata->gpio_reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW);
if (IS_ERR(pdata->gpio_reset))
return PTR_ERR(pdata->gpio_reset);
/* Triger watchdog,assert,panic of orca */
pdata->gpio_cpwatchdog = devm_gpiod_get(dev, "cpwatchdog", GPIOD_IN);
if (IS_ERR(pdata->gpio_cpwatchdog))
return PTR_ERR(pdata->gpio_cpwatchdog);
pdata->gpio_cpassert = devm_gpiod_get(dev, "cpassert", GPIOD_IN);
if (IS_ERR(pdata->gpio_cpassert))
return PTR_ERR(pdata->gpio_cpassert);
pdata->gpio_cppanic = devm_gpiod_get(dev, "cppanic", GPIOD_IN);
if (IS_ERR(pdata->gpio_cppanic))
return PTR_ERR(pdata->gpio_cppanic);
pdata->gpio_cppoweroff = devm_gpiod_get(dev,
"cppoweroff", GPIOD_OUT_HIGH);
if (IS_ERR(pdata->gpio_cpassert))
return PTR_ERR(pdata->gpio_cppoweroff);
pdata->modem_status = MDM_CTRL_POWER_OFF;
*init = pdata;
return 0;
}
static inline void
modem_ctrl_destroy_pdata(struct modem_ctrl_init_data **init)
{
struct modem_ctrl_init_data *pdata = *init;
pdata = NULL;
}
static int modem_ctrl_restart_handle(struct notifier_block *this,
unsigned long mode, void *cmd)
{
if (!mcd_dev || mcd_dev->soc_type == ROC1_SOC)
return NOTIFY_DONE;
modem_ctrl_notify_abnormal_status(MDM_PANIC);
while (1)
;
return NOTIFY_DONE;
}
static struct notifier_block modem_ctrl_restart_handler = {
.notifier_call = modem_ctrl_restart_handle,
.priority = 150,
};
static int modem_ctrl_probe(struct platform_device *pdev)
{
struct modem_ctrl_init_data *init = pdev->dev.platform_data;
struct modem_ctrl_device *modem_ctrl_dev;
dev_t devid;
int rval;
struct device *dev = &pdev->dev;
modem_ctrl_dev = devm_kzalloc(dev, sizeof(*modem_ctrl_dev), GFP_KERNEL);
if (!modem_ctrl_dev)
return -ENOMEM;
mcd_dev = modem_ctrl_dev;
if (of_device_is_compatible(pdev->dev.of_node, "sprd,roc1-modem-ctrl"))
modem_ctrl_dev->soc_type = ROC1_SOC;
else
modem_ctrl_dev->soc_type = ORCA_SOC;
if (modem_ctrl_dev->soc_type == ROC1_SOC) {
rval = modem_ctrl_parse_dt(&init, &pdev->dev);
if (rval) {
dev_err(dev,
"Failed to parse modem_ctrl device tree, ret=%d\n",
rval);
return rval;
}
} else {
rval = modem_ctrl_parse_modem_dt(&init, &pdev->dev);
if (rval) {
dev_err(dev,
"Failed to parse modem_ctrl device tree, ret=%d\n",
rval);
return rval;
}
}
dev_dbg(dev, "after parse device tree, name=%s soctype=%d\n",
init->name,
modem_ctrl_dev->soc_type);
rval = alloc_chrdev_region(&devid, 0, 1, init->name);
if (rval != 0) {
dev_err(dev, "Failed to alloc modem_ctrl chrdev\n");
goto error3;
}
cdev_init(&modem_ctrl_dev->cdev, &modem_ctrl_fops);
rval = cdev_add(&modem_ctrl_dev->cdev, devid, 1);
if (rval != 0) {
dev_err(dev, "Failed to add modem_ctrl cdev\n");
goto error2;
}
modem_ctrl_dev->major = MAJOR(devid);
modem_ctrl_dev->minor = MINOR(devid);
modem_ctrl_dev->dev = device_create(modem_ctrl_class, NULL,
MKDEV(modem_ctrl_dev->major,
modem_ctrl_dev->minor),
NULL, "%s", init->name);
if (!modem_ctrl_dev->dev) {
dev_err(dev, "create dev failed\n");
rval = -ENODEV;
goto error1;
}
modem_ctrl_dev->init = init;
platform_set_drvdata(pdev, modem_ctrl_dev);
rval = modem_gpios_init(modem_ctrl_dev, modem_ctrl_dev->soc_type);
if (rval) {
dev_err(dev, "request gpios error\n");
goto error0;
}
rval = register_restart_handler(&modem_ctrl_restart_handler);
if (rval) {
dev_err(dev, "cannot register restart handler err=%d\n", rval);
goto error0;
}
return 0;
error0:
device_destroy(modem_ctrl_class,
MKDEV(modem_ctrl_dev->major,
modem_ctrl_dev->minor));
error1:
cdev_del(&modem_ctrl_dev->cdev);
error2:
unregister_chrdev_region(devid, 1);
error3:
modem_ctrl_destroy_pdata(&init);
return rval;
}
static int modem_ctrl_remove(struct platform_device *pdev)
{
struct modem_ctrl_device *modem_ctrl_dev = platform_get_drvdata(pdev);
unregister_reboot_notifier(&modem_ctrl_restart_handler);
device_destroy(modem_ctrl_class,
MKDEV(modem_ctrl_dev->major,
modem_ctrl_dev->minor));
cdev_del(&modem_ctrl_dev->cdev);
unregister_chrdev_region(MKDEV(modem_ctrl_dev->major,
modem_ctrl_dev->minor), 1);
modem_ctrl_destroy_pdata(&modem_ctrl_dev->init);
platform_set_drvdata(pdev, NULL);
return 0;
}
static void modem_ctrl_shutdown(struct platform_device *pdev)
{
if (mcd_dev->soc_type == ROC1_SOC) {
atomic_notifier_call_chain(&modem_ctrl_chain,
MDM_POWER_OFF, NULL);
/*
* sleep 50 ms for other module to do something
* before orca power down.
*/
msleep(50);
modem_ctrl_send_cmd_to_cp(MDM_POWER_OFF);
/* Sleep 500ms for cp to deal power down process otherwise
* cp will not power down clearly.
*/
msleep(500);
}
}
static const struct of_device_id modem_ctrl_match_table[] = {
{.compatible = "sprd,roc1-modem-ctrl", },
{.compatible = "sprd,orca-modem-ctrl", },
};
static struct platform_driver modem_ctrl_driver = {
.driver = {
.name = "modem_ctrl",
.of_match_table = modem_ctrl_match_table,
},
.probe = modem_ctrl_probe,
.remove = modem_ctrl_remove,
.shutdown = modem_ctrl_shutdown,
};
int modem_ctrl_init(void)
{
modem_ctrl_class = class_create(THIS_MODULE, "modem_ctrl");
if (IS_ERR(modem_ctrl_class))
return PTR_ERR(modem_ctrl_class);
return platform_driver_register(&modem_ctrl_driver);
}
EXPORT_SYMBOL_GPL(modem_ctrl_init);
void modem_ctrl_exit(void)
{
class_destroy(modem_ctrl_class);
platform_driver_unregister(&modem_ctrl_driver);
}
EXPORT_SYMBOL_GPL(modem_ctrl_exit);

View File

@ -1,7 +0,0 @@
config SPRD_PCIE_EP_DEVICE
tristate "SPRD PCIE EP device"
default n
depends on PCI
help
SPRD pcie ep device driver in host side for Spreadtrum.

View File

@ -1,6 +0,0 @@
ccflags-y += -DCONFIG_SPRD_PCIE_EP_DEVICE -DCONFIG_SPRD_SIPA
obj-y += sprd_pcie_ep_device.o
obj-y += pcie_host_resource.o
obj-y += sprd_pcie_quirks.o
obj-$(CONFIG_PCIE_EPF_SPRD) += pcie_client_resource.o
obj-$(CONFIG_SPRD_SIPA_RES) += pcie_sipa_res.o

View File

@ -1,528 +0,0 @@
/*
* Copyright (C) 2019 Spreadtrum Communications Inc.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/kthread.h>
#include <linux/mdm_ctrl.h>
#include <linux/pcie-epf-sprd.h>
#include <linux/sched.h>
#include <linux/sipc.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/wait.h>
#include "../include/sprd_pcie_resource.h"
#ifdef CONFIG_SPRD_SIPA_RES
#include "pcie_sipa_res.h"
#endif
enum ep_msg {
RC_SCANNED_MSG = 0,
RC_REMOVING_MSG,
EPC_UNLINK_MSG,
EPC_LINKUP_MSG
};
enum pcie_ep_state {
SPRD_PCIE_WAIT_FIRST_READY = 0,
SPRD_PCIE_WAIT_SCANNED,
SPRD_PCIE_SCANNED,
SPRD_PCIE_WAIT_REMOVED,
SPRD_PCIE_REMOVED,
SPRD_PCIE_WAIT_POWER_OFF
};
struct sprd_pci_res_notify {
void (*notify)(void *p);
void *data;
};
struct sprd_pcie_res {
u32 dst;
u32 ep_fun;
enum pcie_ep_state state;
bool msi_later;
bool wakeup_later;
#ifdef CONFIG_SPRD_SIPA_RES
void *sipa_res;
#endif
/*
* in client(Orca), The PCIE module wll blocks the chip Deep,
* so we must get a wake lock when pcie work to avoid this situation:
* the system is deep, but the PCIE is still working.
*/
struct wakeup_source ws;
wait_queue_head_t wait_pcie_ready;
struct sprd_pci_res_notify first_ready_notify;
};
static struct sprd_pcie_res *g_pcie_res[SIPC_ID_NR];
/* the state machine of ep, init SPRD_PCIE_WAIT_FIRST_READY.
* SPRD_PCIE_WAIT_FIRST_READY (receive RC scanned) ==> SPRD_PCIE_SCANNED
* SPRD_PCIE_SCANNED (receive RC removing)==> SPRD_PCIE_WAIT_REMOVED
* SPRD_PCIE_WAIT_REMOVED(receive epc unlink)==>SPRD_PCIE_REMOVED
* SPRD_PCIE_REMOVED(receive epc linkup)==>SPRD_PCIE_WAIT_SCANNED
* SPRD_PCIE_WAIT_SCANNED(receive RC scanned)==>SPRD_PCIE_SCANNED
* SPRD_PCIE_WAIT_POWER_OFF can do nothing, just wait shutdown.
*/
static const char *change_msg[EPC_LINKUP_MSG + 1] = {
"rc scanned",
"rc removing",
"epc unlink",
"epc linkup"
};
static const char *state_msg[SPRD_PCIE_REMOVED + 1] = {
"wait first ready",
"wait sacanned",
"scanned",
"wait remove",
"removed"
};
static void pcie_resource_client_change_state(struct sprd_pcie_res *res,
enum ep_msg msg)
{
u32 old_state = res->state;
if (old_state == SPRD_PCIE_WAIT_POWER_OFF)
return;
pr_debug("pcie res: change state msg=%s, old_state=%s.\n",
change_msg[msg], state_msg[old_state]);
switch (msg) {
case RC_SCANNED_MSG:
if (old_state != SPRD_PCIE_WAIT_FIRST_READY
&& old_state != SPRD_PCIE_WAIT_SCANNED) {
pr_err("pcie res: %s msg err, old state=%s",
change_msg[msg], state_msg[old_state]);
return;
}
res->state = SPRD_PCIE_SCANNED;
break;
case RC_REMOVING_MSG:
if (old_state != SPRD_PCIE_SCANNED) {
pr_err("pcie res: %s msg err, old state=%s",
change_msg[msg], state_msg[old_state]);
return;
}
res->state = SPRD_PCIE_WAIT_REMOVED;
break;
case EPC_UNLINK_MSG:
if (old_state != SPRD_PCIE_WAIT_REMOVED) {
if (old_state != SPRD_PCIE_WAIT_FIRST_READY)
pr_err("pcie res: %s msg err, old state=%s",
change_msg[msg], state_msg[old_state]);
return;
}
res->state = SPRD_PCIE_REMOVED;
break;
case EPC_LINKUP_MSG:
if (old_state != SPRD_PCIE_REMOVED) {
if (old_state != SPRD_PCIE_WAIT_FIRST_READY)
pr_err("pcie res: %s msg err, old state=%s",
change_msg[msg], state_msg[old_state]);
return;
}
res->state = SPRD_PCIE_WAIT_SCANNED;
break;
}
pr_info("pcie res: change state from %s to %s.\n",
state_msg[old_state], state_msg[res->state]);
}
static void sprd_pcie_resource_first_ready_notify(struct sprd_pcie_res *res)
{
void (*notify)(void *p);
pr_info("pcie res: first ready.\n");
#ifdef CONFIG_SPRD_SIPA_RES
/*
* in client side, producer res id is SIPA_RM_RES_PROD_PCIE_EP,
* consumer res id is SIPA_RM_RES_CONS_WWAN_DL.
*/
res->sipa_res = pcie_sipa_res_create(res->dst,
SIPA_RM_RES_PROD_PCIE_EP,
SIPA_RM_RES_CONS_WWAN_DL);
if (!res->sipa_res)
pr_err("pcie res:create ipa res failed.\n");
#endif
notify = res->first_ready_notify.notify;
if (notify)
notify(res->first_ready_notify.data);
}
static void pcie_resource_client_epf_notify(int event, void *private)
{
struct sprd_pcie_res *res = (struct sprd_pcie_res *)private;
if (res->state == SPRD_PCIE_WAIT_POWER_OFF)
return;
switch (event) {
case SPRD_EPF_BIND:
pr_info("pcie res: epf be binded.\n");
if (sprd_pcie_is_defective_chip())
sprd_pci_epf_raise_irq(res->ep_fun,
PCIE_MSI_EP_READY_FOR_RESCAN);
break;
case SPRD_EPF_UNBIND:
pr_info("pcie res: epf be unbinded.\n");
break;
case SPRD_EPF_REMOVE:
pr_info("pcie res: epf be removed.\n");
break;
case SPRD_EPF_LINK_UP:
/* get a wakelock */
__pm_stay_awake(&res->ws);
pr_info("pcie res: epf linkup.\n");
pcie_resource_client_change_state(res, EPC_LINKUP_MSG);
/* first ready notify */
if (res->state == SPRD_PCIE_WAIT_FIRST_READY)
sprd_pcie_resource_first_ready_notify(res);
break;
case SPRD_EPF_UNLINK:
/* Here need this log to debug pcie scan and remove */
pr_info("pcie res: epf unlink.\n");
pcie_resource_client_change_state(res, EPC_UNLINK_MSG);
/* if has wakeup pending, send wakeup to rc */
if (res->wakeup_later) {
res->wakeup_later = false;
pr_info("pcie res: send wakeup to rc.\n");
if (sprd_pci_epf_start(res->ep_fun))
pr_err("pcie res: send wakeup to rc failed.\n");
}
/* relax a wakelock */
__pm_relax(&res->ws);
break;
default:
break;
}
}
static irqreturn_t pcie_resource_client_irq_handler(int irq, void *private)
{
struct sprd_pcie_res *res = (struct sprd_pcie_res *)private;
if (res->state == SPRD_PCIE_WAIT_POWER_OFF)
return IRQ_HANDLED;
if (irq == PCIE_DBEL_EP_SCANNED) {
pcie_resource_client_change_state(res, RC_SCANNED_MSG);
/* wakeup all blocked thread */
pr_info("pcie res: scanned, wakup all.\n");
wake_up_interruptible_all(&res->wait_pcie_ready);
/* if has msi pending, send msi to rc */
if (res->msi_later) {
res->msi_later = false;
pr_info("pcie res: request msi to rc.\n");
sprd_pci_epf_raise_irq(res->ep_fun,
PCIE_MSI_REQUEST_RES);
}
} else if (irq == PCIE_DBEL_EP_REMOVING) {
pr_info("pcie res: removing.\n");
pcie_resource_client_change_state(res, RC_REMOVING_MSG);
}
return IRQ_HANDLED;
}
static int sprd_pcie_resource_client_mcd(struct notifier_block *nb,
unsigned long mode, void *cmd)
{
struct sprd_pcie_res *res;
int i;
pr_info("pcie res: mcd event mode=%ld.\n", mode);
if (mode != MDM_POWER_OFF)
return NOTIFY_DONE;
for (i = 0; i < SIPC_ID_NR; i++) {
res = g_pcie_res[i];
if (res)
res->state = SPRD_PCIE_WAIT_POWER_OFF;
}
return NOTIFY_DONE;
}
static struct notifier_block mcd_notify = {
.notifier_call = sprd_pcie_resource_client_mcd,
.priority = 149,
};
int sprd_pcie_resource_client_init(u32 dst, u32 ep_fun)
{
struct sprd_pcie_res *res;
if (dst >= SIPC_ID_NR)
return -EINVAL;
res = kzalloc(sizeof(*res), GFP_KERNEL);
if (!res)
return -ENOMEM;
res->dst = dst;
res->state = SPRD_PCIE_WAIT_FIRST_READY;
res->ep_fun = ep_fun;
wakeup_source_init(&res->ws, "pcie_res");
init_waitqueue_head(&res->wait_pcie_ready);
sprd_pci_epf_register_irq_handler_ex(res->ep_fun,
PCIE_DBEL_EP_SCANNED,
PCIE_DBEL_EP_REMOVING,
pcie_resource_client_irq_handler,
res);
sprd_pci_epf_register_notify(res->ep_fun,
pcie_resource_client_epf_notify,
res);
modem_ctrl_register_notifier(&mcd_notify);
g_pcie_res[dst] = res;
return 0;
}
int sprd_pcie_resource_trash(u32 dst)
{
struct sprd_pcie_res *res;
if (dst >= SIPC_ID_NR || !g_pcie_res[dst])
return -EINVAL;
res = g_pcie_res[dst];
#ifdef CONFIG_SPRD_SIPA_RES
if (res->sipa_res)
pcie_sipa_res_destroy(res->sipa_res);
#endif
sprd_pci_epf_unregister_irq_handler_ex(res->ep_fun,
PCIE_DBEL_EP_SCANNED,
PCIE_DBEL_EP_REMOVING);
sprd_pci_epf_unregister_notify(res->ep_fun);
modem_ctrl_unregister_notifier(&mcd_notify);
kfree(res);
g_pcie_res[dst] = NULL;
return 0;
}
int sprd_pcie_wait_resource(u32 dst, int timeout)
{
struct sprd_pcie_res *res;
int ret, wait;
unsigned long delay;
if (dst >= SIPC_ID_NR || !g_pcie_res[dst])
return -EINVAL;
res = g_pcie_res[dst];
/* pcie ready, return succ immediately */
if (res->state == SPRD_PCIE_SCANNED)
return 0;
if (timeout == 0)
return -ETIME;
if (timeout < 0) {
wait = wait_event_interruptible(
res->wait_pcie_ready,
res->state == SPRD_PCIE_SCANNED
);
ret = wait;
} else {
/*
* timeout must add 1s,
* because the pcie rescan may took some time.
*/
delay = msecs_to_jiffies(timeout + 1000);
wait = wait_event_interruptible_timeout(res->wait_pcie_ready,
res->state ==
SPRD_PCIE_SCANNED,
delay);
if (wait == 0)
ret = -ETIME;
else if (wait > 0)
ret = 0;
else
ret = wait;
}
if (ret < 0 && ret != -ERESTARTSYS)
pr_err("pcie res: wait resource, val=%d.\n", ret);
return ret;
}
int sprd_pcie_request_resource(u32 dst)
{
struct sprd_pcie_res *res;
int ret = 0;
if (dst >= SIPC_ID_NR || !g_pcie_res[dst])
return -EINVAL;
res = g_pcie_res[dst];
if (res->state == SPRD_PCIE_WAIT_POWER_OFF)
return -EINVAL;
pr_debug("pcie res: request res, state=%d.\n", res->state);
switch (res->state) {
case SPRD_PCIE_WAIT_FIRST_READY:
case SPRD_PCIE_WAIT_SCANNED:
pr_info("pcie res: later send request msi to rc.\n");
res->msi_later = true;
break;
case SPRD_PCIE_WAIT_REMOVED:
pr_info("pcie res: later send wakeup to rc.\n");
res->wakeup_later = true;
break;
case SPRD_PCIE_SCANNED:
/*
* if pcie state is SCANNED, just send
* PCIE_MSI_REQUEST_RES to the host.
* After host receive res msi interrupt,
* it will increase one vote in modem power manger.
*/
pr_info("pcie res: send request msi to rc.\n");
ret = sprd_pci_epf_raise_irq(res->ep_fun,
PCIE_MSI_REQUEST_RES);
break;
case SPRD_PCIE_REMOVED:
/*
* if pcie state is removed, poll wake_up singnal
* to host, and he host will rescan the pcie.
*/
pr_info("pcie res: send wakeup to rc.\n");
if (sprd_pci_epf_start(res->ep_fun) == 0)
break;
/* may receive ep reset, wait linkup and scanned */
pr_info("pcie res: later send request msi to rc.\n");
res->msi_later = true;
break;
default:
pr_err("pcie res: request res err, state=%d.\n",
res->state);
ret = -EPERM;
break;
}
return ret;
}
int sprd_pcie_release_resource(u32 dst)
{
struct sprd_pcie_res *res;
int ret = 0;
if (dst >= SIPC_ID_NR || !g_pcie_res[dst])
return -EINVAL;
res = g_pcie_res[dst];
if (res->state == SPRD_PCIE_WAIT_POWER_OFF)
return -EINVAL;
switch (res->state) {
case SPRD_PCIE_SCANNED:
/*
* if pcie state is SCANNED, send PCIE_MSI_RELEASE_RES
* to the host, else, do nothing. After host receive res msi
* interrupt, it will decrease one vote in modem power manger,
* and if modem power manger is idle, the host will remove
* the pcie.
*/
pr_info("pcie res: send release msi to rc.\n");
ret = sprd_pci_epf_raise_irq(res->ep_fun,
PCIE_MSI_RELEASE_RES);
break;
case SPRD_PCIE_WAIT_FIRST_READY:
/* if has msi pending, remove it */
if (res->msi_later)
res->msi_later = false;
break;
default:
pr_err("pcie res: release res state=%d.\n", res->state);
ret = -EPERM;
break;
}
return ret;
}
int sprd_register_pcie_resource_first_ready(u32 dst,
void (*notify)(void *p), void *data)
{
struct sprd_pcie_res *res;
if (dst >= SIPC_ID_NR || !g_pcie_res[dst])
return -EINVAL;
res = g_pcie_res[dst];
res->first_ready_notify.data = data;
res->first_ready_notify.notify = notify;
return 0;
}
bool sprd_pcie_is_defective_chip(void)
{
static bool first_read = true, defective;
if (first_read) {
first_read = false;
defective = sprd_kproperty_chipid("UD710-AB") == 0;
}
return defective;
}

View File

@ -1,720 +0,0 @@
/*
* Copyright (C) 2019 Spreadtrum Communications Inc.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/kthread.h>
#include <linux/pci.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/wait.h>
#include <linux/workqueue.h>
#include <linux/version.h>
#ifdef CONFIG_SPRD_SIPA_RES
#include "pcie_sipa_res.h"
#endif
#include "../include/pcie-rc-sprd.h"
#include "../include/sipc.h"
//#include "../include/mdm_ctrl.h"
#include "../include/sprd_pcie_ep_device.h"
#include "../include/sprd_mpm.h"
#include "../include/sprd_pcie_resource.h"
#define PCIE_REMOVE_SCAN_GAP msecs_to_jiffies(200)
#define MAX_PMS_WAIT_TIME 5000
#define MAX_PMS_DEFECTIVE_CHIP_FIRST_WAIT_TIME (55 * 1000)
enum rc_state {
SPRD_PCIE_WAIT_FIRST_READY = 0,
SPRD_PCIE_WAIT_SCANNED,
SPRD_PCIE_SCANNED,
SPRD_PCIE_WAIT_REMOVED,
SPRD_PCIE_REMOVED,
SPRD_PCIE_SCANNED_2BAR,
SPRD_PCIE_WAIT_POWER_OFF
};
struct sprd_pcie_res {
u32 dst;
u32 ep_dev;
u32 state;
u32 scan_cnt;
u32 max_wait_time;
bool ep_power_on;
bool ep_dev_probe;
bool smem_send_to_ep;
unsigned long action_jiff;
struct sprd_pms *pms;
char pms_name[20];
wait_queue_head_t wait_pcie_ready;
bool ep_ready_for_rescan;
wait_queue_head_t wait_load_ready;
wait_queue_head_t wait_first_rescan;
struct task_struct *thread;
#ifdef CONFIG_SPRD_SIPA_RES
void *sipa_res;
#endif
#if (LINUX_VERSION_CODE > KERNEL_VERSION( 4,18,0 ))
struct wakeup_source *ws;
#else
struct wakeup_source ws;
#endif
struct work_struct scan_work;
struct work_struct remove_work;
struct workqueue_struct *wq;
struct platform_device *pcie_dev;
struct sprd_pcie_register_event reg_event;
};
static int sprd_pcie_resource_rescan(struct sprd_pcie_res *res);
static struct sprd_pcie_res *g_pcie_res[SIPC_ID_NR];
static void sprd_pcie_resource_host_first_rescan_do(struct sprd_pcie_res *res)
{
int ret = sprd_pcie_register_event(&res->reg_event);
if (ret)
pr_err("pcie res: register pci ret=%d.\n", ret);
/* power up for ep after the first scan. */
res->ep_power_on = true;
sprd_pms_power_up(res->pms);
#ifdef CONFIG_SPRD_SIPA_RES
/*
* in host side, producer res id is SIPA_RM_RES_PROD_PCIE3,
* consumer res id is SIPA_RM_RES_CONS_WWAN_UL.
*/
res->sipa_res = pcie_sipa_res_create(res->dst,
SIPA_RM_RES_PROD_PCIE3,
SIPA_RM_RES_CONS_WWAN_UL);
if (!res->sipa_res)
pr_err("pcie res:create ipa res failed.\n");
#endif
}
static void sprd_pcie_resource_host_ep_notify(int event, void *data)
{
struct sprd_pcie_res *res = (struct sprd_pcie_res *)data;
u32 base, size;
/* wait power off, do nothing */
if (res->state == SPRD_PCIE_WAIT_POWER_OFF)
return;
switch (event) {
case PCIE_EP_PROBE:
/* set state to scanned */
res->state = SPRD_PCIE_SCANNED;
res->scan_cnt++;
res->ep_dev_probe = true;
//modem_ctrl_enable_cp_event();
if (smem_get_area(SIPC_ID_MINIAP, &base, &size) == 0)
sprd_ep_dev_pass_smem(res->ep_dev, base, size);
pr_info("pcie res: ep_notify, probed cnt=%d.\n",
res->scan_cnt);
/* firsrt scan do somtehing */
if (res->scan_cnt == 1)
sprd_pcie_resource_host_first_rescan_do(res);
/* clear removed irq and notify ep scanned */
sprd_ep_dev_clear_doolbell_irq(res->ep_dev,
PCIE_DBEL_EP_REMOVING);
sprd_ep_dev_raise_irq(res->ep_dev, PCIE_DBEL_EP_SCANNED);
/* wakeup all blocked thread */
wake_up_interruptible_all(&res->wait_pcie_ready);
break;
case PCIE_EP_REMOVE:
pr_info("pcie res: ep_notify, removed.\n");
res->state = SPRD_PCIE_REMOVED;
res->ep_dev_probe = false;
break;
case PCIE_EP_PROBE_BEFORE_SPLIT_BAR:
res->state = SPRD_PCIE_SCANNED_2BAR;
res->ep_dev_probe = true;
pr_info("pcie res: probed before split bar.\n");
if (!res->ep_ready_for_rescan) {
wake_up_interruptible_all(&res->wait_load_ready);
} else {
pr_info("pcie res: bar err, rescan.\n");
sprd_pcie_resource_rescan(res);
}
break;
default:
break;
}
}
static irqreturn_t sprd_pcie_resource_host_irq_handler(int irq, void *private)
{
struct sprd_pcie_res *res = (struct sprd_pcie_res *)private;
if (irq == PCIE_MSI_REQUEST_RES) {
pr_info("pcie res: ep request res.\n");
/*
* client modem power up,
* no need wake lock and no need wait resource.
*/
if (!res->ep_power_on) {
res->ep_power_on = true;
sprd_pms_power_up(res->pms);
}
/* only after received ep request can backup the ep configs. */
sprd_ep_dev_set_backup(res->ep_dev);
} else if (irq == PCIE_MSI_RELEASE_RES) {
pr_info("pcie res: ep release res.\n");
/*
* client modem power down,
* no need wake lock.
*/
if (res->ep_power_on) {
res->ep_power_on = false;
sprd_pms_power_down(res->pms, false);
}
} else if (irq == PCIE_MSI_EP_READY_FOR_RESCAN) {
pr_info("pcie res: ep ready for rescan.\n");
res->ep_ready_for_rescan = true;
wake_up_interruptible_all(&res->wait_first_rescan);
}
return IRQ_HANDLED;
}
static void sprd_pcie_resource_scan_fn(struct work_struct *work)
{
unsigned long diff;
unsigned int delay;
int ret;
struct sprd_pcie_res *res = container_of(work, struct sprd_pcie_res,
scan_work);
/* wait power off, do nothing */
if (res->state == SPRD_PCIE_WAIT_POWER_OFF)
return;
/* request wakelock */
sprd_pms_request_wakelock(res->pms);
diff = jiffies - res->action_jiff;
if (diff < PCIE_REMOVE_SCAN_GAP) {
/* must ensure that the scan starts after a period of remove. */
delay = jiffies_to_msecs(PCIE_REMOVE_SCAN_GAP - diff);
msleep(delay);
}
pr_info("pcie res: scan\n");
ret = sprd_pcie_configure_device(res->pcie_dev);
if (ret)
pr_err("pcie res: scan error = %d!\n", ret);
/* record the last scan jiffies */
res->action_jiff = jiffies;
/* release wakelock */
sprd_pms_release_wakelock(res->pms);
}
static void sprd_pcie_resource_remove_fn(struct work_struct *work)
{
unsigned long diff;
unsigned int delay;
int ret;
struct sprd_pcie_res *res = container_of(work, struct sprd_pcie_res,
remove_work);
/* request wakelock */
sprd_pms_request_wakelock(res->pms);
pr_info("pcie res: remove work!\n");
diff = jiffies - res->action_jiff;
if (diff < PCIE_REMOVE_SCAN_GAP) {
/* must ensure that the remove starts after a period of scan. */
delay = jiffies_to_msecs(PCIE_REMOVE_SCAN_GAP - diff);
msleep(delay);
}
/*
* in wait power off state, or ep device is not probing,
* can't access ep.
*/
if (res->state == SPRD_PCIE_WAIT_POWER_OFF ||
!res->ep_dev_probe) {
/* release wakelock */
sprd_pms_release_wakelock(res->pms);
return;
}
/* notify ep removed, must before removed */
sprd_ep_dev_clear_doolbell_irq(res->ep_dev, PCIE_DBEL_EP_SCANNED);
sprd_ep_dev_raise_irq(res->ep_dev, PCIE_DBEL_EP_REMOVING);
/* waiting for the doorbell irq to ep */
msleep(50);
pr_info("pcie res: remove\n");
/* start removed ep*/
ret = sprd_pcie_unconfigure_device(res->pcie_dev);
if (ret)
pr_err("pcie res: remove error = %d.\n!", ret);
/* record the last remov jiffies */
res->action_jiff = jiffies;
/* release wakelock */
sprd_pms_release_wakelock(res->pms);
}
static void sprd_pcie_resource_start_scan(struct sprd_pcie_res *res)
{
if (res->state == SPRD_PCIE_SCANNED ||
res->state == SPRD_PCIE_WAIT_SCANNED) {
pr_info("pcie res: scanned, do nothing!\n");
} else {
pr_info("pcie res: start scan!\n");
queue_work(res->wq, &res->scan_work);
}
}
static void sprd_pcie_resource_start_remove(struct sprd_pcie_res *res)
{
/* wait power off, do nothing */
if (res->state == SPRD_PCIE_WAIT_POWER_OFF)
return;
if (res->state == SPRD_PCIE_SCANNED ||
res->state == SPRD_PCIE_WAIT_FIRST_READY
|| (res->state == SPRD_PCIE_SCANNED_2BAR)
) {
res->state = SPRD_PCIE_WAIT_REMOVED;
pr_info("pcie res: start remove.");
queue_work(res->wq, &res->remove_work);
} else {
pr_err("pcie res: start remove, err=%d.", res->state);
}
}
static void sprd_pcie_resource_event_process(enum sprd_pcie_event event,
void *data)
{
struct sprd_pcie_res *res = data;
if (event == SPRD_PCIE_EVENT_WAKEUP) {
pr_info("pcie res: wakeup by ep, event=%d.\n", event);
if (!res->ep_power_on) {
res->ep_power_on = true;
sprd_pms_power_up(res->pms);
}
}
}
/*
* sprd_pcie_resource_rescan
* Because the ep bar can only be split by ep itself,
* After all modem images be loaded at the first time,
* the ep will run and split 2 64bit bar to 4 32bit bar.
* host must rescan the pcie ep device agian by this api,
* after receive ep driver ready for rescan msg and all
* modem images load done.
*/
static int sprd_pcie_resource_rescan(struct sprd_pcie_res *res)
{
pr_info("pcie res: rescan.\n");
sprd_pcie_resource_start_remove(res);
sprd_pcie_resource_start_scan(res);
return 0;
}
static int sprd_pcie_resource_check_first_rescan(void *data)
{
struct sprd_pcie_res *res = data;
int ret;
pr_info("pcie res: check first rescan.\n");
while (!kthread_should_stop()) {
ret = wait_event_interruptible(
res->wait_first_rescan,
res->ep_ready_for_rescan);
if (!ret) {
pr_info("pcie res:first resacn ready.\n");
sprd_pcie_resource_rescan(res);
break;
}
}
/* After the first rescan, restore the normal wait time. */
if (sprd_pcie_is_defective_chip())
res->max_wait_time = MAX_PMS_WAIT_TIME;
res->thread = NULL;
return 0;
}
#if 0
static int sprd_pcie_resource_host_mcd(struct notifier_block *nb,
unsigned long mode, void *cmd)
{
struct sprd_pcie_res *res;
int i;
u32 state;
pr_info("pcie res: mcd mode=%ld.\n", mode);
switch (mode) {
case MDM_POWER_OFF:
state = SPRD_PCIE_WAIT_POWER_OFF;
break;
default:
return NOTIFY_DONE;
}
for (i = 0; i < SIPC_ID_NR; i++) {
res = g_pcie_res[i];
/* wait power off, do nothing */
if (res->state == SPRD_PCIE_WAIT_POWER_OFF)
continue;
if (res) {
res->state = state;
cancel_work_sync(&res->scan_work);
cancel_work_sync(&res->remove_work);
}
}
return NOTIFY_DONE;
}
static struct notifier_block mcd_notify = {
.notifier_call = sprd_pcie_resource_host_mcd,
.priority = 149,
};
#endif
/* Because the ep bar can only be split by ep itself,
* After all modem images be loaded, notify the pcie resource.
*/
void sprd_pcie_resource_notify_load_done(u32 dst)
{
struct sprd_pcie_res *res;
pr_info("pcie res: load done.\n");
if (dst >= SIPC_ID_NR || !g_pcie_res[dst])
return;
res = g_pcie_res[dst];
res->thread = kthread_create(sprd_pcie_resource_check_first_rescan, res,
"first rescan");
if (IS_ERR(res->thread))
pr_err("pcie res: Failed to create rescan thread.\n");
else
wake_up_process(res->thread);
}
int sprd_pcie_wait_load_resource(u32 dst)
{
struct sprd_pcie_res *res;
if (dst >= SIPC_ID_NR || !g_pcie_res[dst])
return -EINVAL;
res = g_pcie_res[dst];
/* can load image, return immediately */
if (res->state == SPRD_PCIE_SCANNED ||
res->state == SPRD_PCIE_SCANNED_2BAR)
return 0;
return wait_event_interruptible(
res->wait_load_ready,
(res->state == SPRD_PCIE_SCANNED ||
res->state == SPRD_PCIE_SCANNED_2BAR));
}
void sprd_pcie_resource_reboot_ep(u32 dst)
{
struct sprd_pcie_res *res;
pr_info("pcie res: reboot ep.\n");
if (dst >= SIPC_ID_NR || !g_pcie_res[dst])
return;
res = g_pcie_res[dst];
/* wait power off, do nothing */
if (res->state == SPRD_PCIE_WAIT_POWER_OFF)
return;
res->state = SPRD_PCIE_WAIT_FIRST_READY;
res->smem_send_to_ep = false;
res->ep_ready_for_rescan = false;
/* The defective chip , the first wait time must be enough long. */
if (sprd_pcie_is_defective_chip())
res->max_wait_time = MAX_PMS_DEFECTIVE_CHIP_FIRST_WAIT_TIME;
else
res->max_wait_time = MAX_PMS_WAIT_TIME;
/* after ep reboot, can't backup ep configs*/
sprd_ep_dev_clear_backup(res->ep_dev);
sprd_pcie_resource_start_remove(res);
//modem_ctrl_poweron_modem(MDM_CTRL_COLD_RESET);
sprd_pcie_resource_start_scan(res);
}
int sprd_pcie_resource_host_init(u32 dst, u32 ep_dev,
struct platform_device *pcie_dev)
{
struct sprd_pcie_res *res;
if (dst >= SIPC_ID_NR)
return -EINVAL;
res = kzalloc(sizeof(*res), GFP_KERNEL);
if (!res)
return -ENOMEM;
res->wq = create_singlethread_workqueue("pcie_res");
if (!res->wq) {
pr_err("pcie res:create wq failed.\n");
kfree(res);
return -ENOMEM;
}
init_waitqueue_head(&res->wait_load_ready);
init_waitqueue_head(&res->wait_first_rescan);
#if (LINUX_VERSION_CODE > KERNEL_VERSION( 4,18,0 ))
res->ws = wakeup_source_register(NULL, "pcie_res");
#else
wakeup_source_init(&res->ws, "pcie_res");
#endif
res->dst = dst;
res->state = SPRD_PCIE_WAIT_FIRST_READY;
res->pcie_dev = pcie_dev;
/* The defective chip , the first wait time must be enough long. */
if (sprd_pcie_is_defective_chip())
res->max_wait_time = MAX_PMS_DEFECTIVE_CHIP_FIRST_WAIT_TIME;
else
res->max_wait_time = MAX_PMS_WAIT_TIME;
init_waitqueue_head(&res->wait_pcie_ready);
INIT_WORK(&res->scan_work, sprd_pcie_resource_scan_fn);
INIT_WORK(&res->remove_work, sprd_pcie_resource_remove_fn);
sprintf(res->pms_name, "ep-request-%d", dst);
res->pms = sprd_pms_create(dst, res->pms_name, false);
if (!res->pms)
pr_err("pcie res:create pms failed.\n");
sprd_ep_dev_register_irq_handler_ex(res->ep_dev,
PCIE_MSI_REQUEST_RES,
PCIE_MSI_RELEASE_RES,
sprd_pcie_resource_host_irq_handler, res);
sprd_ep_dev_register_notify(res->ep_dev,
sprd_pcie_resource_host_ep_notify, res);
//modem_ctrl_register_notifier(&mcd_notify);
/* init wake up event callback */
res->reg_event.events = SPRD_PCIE_EVENT_WAKEUP;
res->reg_event.pdev = pcie_dev;
res->reg_event.callback = sprd_pcie_resource_event_process;
res->reg_event.data = res;
g_pcie_res[dst] = res;
return 0;
}
int sprd_pcie_resource_trash(u32 dst)
{
struct sprd_pcie_res *res;
if (dst >= SIPC_ID_NR || !g_pcie_res[dst])
return -EINVAL;
res = g_pcie_res[dst];
if (!IS_ERR_OR_NULL(res->thread))
kthread_stop(res->thread);
#ifdef CONFIG_SPRD_SIPA_RES
if (res->sipa_res)
pcie_sipa_res_destroy(res->sipa_res);
#endif
cancel_work_sync(&res->scan_work);
cancel_work_sync(&res->remove_work);
destroy_workqueue(res->wq);
sprd_pcie_deregister_event(&res->reg_event);
sprd_ep_dev_unregister_irq_handler_ex(res->ep_dev,
PCIE_MSI_REQUEST_RES,
PCIE_MSI_RELEASE_RES);
sprd_ep_dev_unregister_notify(res->ep_dev);
//modem_ctrl_unregister_notifier(&mcd_notify);
sprd_pms_destroy(res->pms);
kfree(res);
g_pcie_res[dst] = NULL;
return 0;
}
int sprd_pcie_wait_resource(u32 dst, int timeout)
{
struct sprd_pcie_res *res;
int ret, wait;
unsigned long delay;
if (dst >= SIPC_ID_NR || !g_pcie_res[dst])
return -EINVAL;
res = g_pcie_res[dst];
/* pcie ready, return succ immediately. */
if (res->state == SPRD_PCIE_SCANNED)
return 0;
if (timeout == 0)
return -ETIME;
/*
* In some case, orca may has an exception, And the pcie
* resource may never ready again. So we must set a
* maximum wait time for let user to know thereis an
* exception in pcie, and can return an error code to the user.
*/
if (timeout < 0 || timeout > res->max_wait_time)
timeout = res->max_wait_time;
/*
* timeout must add 1s,
* because the pcie scan may took some time.
*/
delay = msecs_to_jiffies(timeout + 1000);
wait = wait_event_interruptible_timeout(res->wait_pcie_ready,
res->state ==
SPRD_PCIE_SCANNED,
delay);
if (wait == 0)
ret = -ETIME;
else if (wait > 0)
ret = 0;
else
ret = wait;
if (ret < 0 && ret != -ERESTARTSYS)
pr_err("pcie res: wait resource, val=%d.\n", ret);
return ret;
}
int sprd_pcie_request_resource(u32 dst)
{
struct sprd_pcie_res *res;
if (dst >= SIPC_ID_NR || !g_pcie_res[dst])
return -EINVAL;
res = g_pcie_res[dst];
/* get a wakelock */
#if (LINUX_VERSION_CODE > KERNEL_VERSION( 4,18,0 ))
__pm_stay_awake(res->ws);
#else
__pm_stay_awake(&res->ws);
#endif
pr_info("pcie res: request resource, state=%d.\n", res->state);
#ifdef CONFIG_SPRD_PCIE
/* The first scan is start by pcie driver automatically. */
if (res->state != SPRD_PCIE_WAIT_FIRST_READY)
sprd_pcie_resource_start_scan(res);
#endif
return 0;
}
int sprd_pcie_release_resource(u32 dst)
{
struct sprd_pcie_res *res;
if (dst >= SIPC_ID_NR || !g_pcie_res[dst])
return -EINVAL;
res = g_pcie_res[dst];
/* relax a wakelock */
#if (LINUX_VERSION_CODE > KERNEL_VERSION( 4,18,0 ))
__pm_relax(res->ws);
#else
__pm_relax(&res->ws);
#endif
#ifdef CONFIG_SPRD_PCIE
pr_info("pcie res: release resource.\n");
sprd_pcie_resource_start_remove(res);
#endif
return 0;
}
bool sprd_pcie_is_defective_chip(void)
{
#ifndef CONFIG_SPRD_PCIE
return false;
#else
static bool first_read = true, defective;
if (first_read) {
first_read = false;
defective = sprd_kproperty_chipid("UD710-AB") == 0;
}
return defective;
#endif
}

View File

@ -1,195 +0,0 @@
/*
* Copyright (C) 2018-2019 Unisoc Corporation
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*/
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/soc/sprd/sprd_mpm.h>
#include <linux/workqueue.h>
#include "pcie_sipa_res.h"
#include "../include/sprd_pcie_resource.h"
struct pcie_sipa_res_prod {
u8 dst;
enum sipa_rm_res_id prod_id; /* producer res id */
enum sipa_rm_res_id cons_id; /* consumer res id */
struct sprd_pms *pms;
char pms_name[20];
struct work_struct wait_work;
struct delayed_work rm_work;
};
static void pcie_sipa_res_wait_res_work_fn(struct work_struct *work)
{
int ret;
struct pcie_sipa_res_prod *res = container_of(work,
struct pcie_sipa_res_prod,
wait_work);
ret = sprd_pcie_wait_resource(res->dst, -1);
/* pcie not ready, just return. */
if (ret) {
pr_err("pcie_sipa_res: wait res error = %d!\n", ret);
return;
}
/* notify ipa module that pcie is ready. */
sipa_rm_notify_completion(SIPA_RM_EVT_GRANTED,
res->prod_id);
}
static int pcie_sipa_res_request_resource(void *data)
{
int ret;
struct pcie_sipa_res_prod *res = data;
pr_info("pcie_sipa_res: request resource.\n");
sprd_pms_power_up(res->pms);
/*
* when the resource is not ready, the IPA module doesn't want be
* blocked in here until the pcie ready, the IPA owner designed
* a notification api sipa_rm_notify_completion to notify the
* IPA module that the resource requested by IPA is ready.
* The designated error value is -EINPROGRESS, so we must override the
* return value -ETIME to -EINPROGRESS.
*/
ret = sprd_pcie_wait_resource(res->dst, 0);
if (ret == -ETIME) {
/* add a work to wait pcie ready */
schedule_work(&res->wait_work);
ret = -EINPROGRESS;
}
return ret;
}
static int pcie_sipa_res_release_resource(void *data)
{
struct pcie_sipa_res_prod *res = data;
pr_info("pcie_sipa_res: release resource.\n");
sprd_pms_release_resource(res->pms);
return 0;
}
static void pcie_sipa_res_create_rm_work_fn(struct work_struct *work)
{
int ret;
struct sipa_rm_create_params rm_params;
struct pcie_sipa_res_prod *res = container_of(to_delayed_work(work),
struct pcie_sipa_res_prod,
rm_work);
rm_params.name = res->prod_id;
rm_params.floor_voltage = 0;
rm_params.reg_params.notify_cb = NULL;
rm_params.reg_params.user_data = res;
rm_params.request_resource = pcie_sipa_res_request_resource;
rm_params.release_resource = pcie_sipa_res_release_resource;
ret = sipa_rm_create_resource(&rm_params);
/* defer to create rm */
if (ret == -EPROBE_DEFER) {
schedule_delayed_work(&res->rm_work, msecs_to_jiffies(1000));
return;
}
/* add dependencys */
ret = sipa_rm_add_dependency(res->cons_id, res->prod_id);
if (ret < 0 && ret != -EINPROGRESS) {
pr_err("pcie_sipa_res: add_dependency error = %d!\n", ret);
sipa_rm_delete_resource(res->prod_id);
sprd_pms_destroy(res->pms);
kfree(res);
}
}
void *pcie_sipa_res_create(u8 dst, enum sipa_rm_res_id prod_id,
enum sipa_rm_res_id cons_id)
{
int ret;
struct sipa_rm_create_params rm_params;
struct pcie_sipa_res_prod *res;
res = kzalloc(sizeof(*res), GFP_KERNEL);
if (!res)
return NULL;
/* init wait pcie res work */
INIT_WORK(&res->wait_work, pcie_sipa_res_wait_res_work_fn);
INIT_DELAYED_WORK(&res->rm_work, pcie_sipa_res_create_rm_work_fn);
/* create pms */
strncpy(res->pms_name, "sipa", sizeof(res->pms_name));
res->pms = sprd_pms_create(dst, res->pms_name, false);
if (!res->pms) {
pr_err("pcie_sipa_res: create pms failed!\n");
kfree(res);
return NULL;
}
res->dst = dst;
res->prod_id = prod_id;
res->cons_id = cons_id;
/* create prod */
rm_params.name = prod_id;
rm_params.floor_voltage = 0;
rm_params.reg_params.notify_cb = NULL;
rm_params.reg_params.user_data = res;
rm_params.request_resource = pcie_sipa_res_request_resource;
rm_params.release_resource = pcie_sipa_res_release_resource;
ret = sipa_rm_create_resource(&rm_params);
/* defer to create rm */
if (ret == -EPROBE_DEFER) {
schedule_delayed_work(&res->rm_work, msecs_to_jiffies(1000));
return res;
} else if (ret) {
pr_err("pcie_sipa_res: create rm error = %d!\n", ret);
sprd_pms_destroy(res->pms);
kfree(res);
return NULL;
}
/* add dependencys */
ret = sipa_rm_add_dependency(cons_id, prod_id);
if (ret < 0 && ret != -EINPROGRESS) {
pr_err("pcie_sipa_res: add_dependency error = %d!\n", ret);
sipa_rm_delete_resource(prod_id);
sprd_pms_destroy(res->pms);
kfree(res);
return NULL;
}
return res;
}
void pcie_sipa_res_destroy(void *data)
{
struct pcie_sipa_res_prod *res = data;
cancel_work_sync(&res->wait_work);
cancel_delayed_work_sync(&res->rm_work);
sprd_pms_destroy(res->pms);
sipa_rm_delete_dependency(res->cons_id, res->prod_id);
sipa_rm_delete_resource(res->prod_id);
kfree(res);
}

View File

@ -1,37 +0,0 @@
/*
* Copyright (C) 2018-2019 Unisoc Corporation
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*/
#ifndef PCIE_SIPA_RES_H
#define PCIE_SIPA_RES_H
#include "../include/sipa.h"
/*
* pcie_sipa_res_create - create pcie res for sipa module.
* @prod_id: which res is the producer.
* @cons_id: which res is the consumer.
*
* Returns:
* failed, return NULL,
* succ, return a void * pointer.
*/
void *pcie_sipa_res_create(u8 dst, enum sipa_rm_res_id prod_id,
enum sipa_rm_res_id cons_id);
/*
* pcie_sipa_res_destroy -detroy pcie res for sipa module
* @res_id: the return point of call function pcie_sipa_res_create.
*/
void pcie_sipa_res_destroy(void *res);
#endif

View File

@ -1,126 +0,0 @@
/*
* This file contains work-arounds for many known PCI hardware
* bugs. Devices present only on certain architectures (host
* bridges et cetera) should be handled in arch-specific code.
*
* Note: any quirks for hotpluggable devices must _NOT_ be declared __init.
*
* Copyright (c) 1999 Martin Mares <mj@ucw.cz>
*
* Init/reset quirks for USB host controllers should be in the
* USB quirks file, where their drivers can access reuse it.
*/
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/acpi.h>
#include <linux/kallsyms.h>
#include <linux/dmi.h>
#include <linux/version.h>
#if (LINUX_VERSION_CODE < KERNEL_VERSION( 5,4,0 ))
#include <linux/pci-aspm.h>
#endif
#include <linux/ioport.h>
#include <linux/sched.h>
#include <linux/ktime.h>
#include <linux/mm.h>
#include <asm/dma.h> /* isa_dma_bridge_buggy */
#ifndef PCI_VENDOR_ID_SYNOPSYS
#define PCI_VENDOR_ID_SYNOPSYS 0x16c3
#endif
/*
* It's possible that ep bar size is larger than rc allocated
* memory, so need to resize ep bar to small size.
* Original ep bar size:bar0:256MB, bar1:64kb, bar2:256MB,
* bar3: 64kb, bar4:256MB, bar5:64kb.
* resize to bar0:8MB, bar1:64kb, bar2:2MB, bar3: 64kb,
* bar4:2MB, bar5:64kb.
*/
#define SPRD_PCI_BAR0 0x10
#define SPRD_BAR_NUM 0x6
#define SPRD_PCI_MISC_CTRL1_OFF 0x8bc
#define SPRD_PCI_DBI_RO_WR_EN (0x1 << 0)
#define SPRD_PCI_RESIZABLE_BAR_EXTENDED_CAP_HEADER 0x260
#define SPRD_PCI_RESIZABLE_BAR_EXTENDED_CAPID 0x15
/* Resizable BAR Capability Register */
#define SPRD_PCI_RESIZABLE_BAR0 0x264
#define SPRD_PCI_RESIZABLE_BAR2 0x26c
#define SPRD_PCI_RESIZABLE_BAR4 0x274
#define SPRD_BAR_SUPPORT_2MB (0x1 << 5)
#define SPRD_BAR_SUPPORT_4MB (0x1 << 6)
#define SPRD_BAR_SUPPORT_8MB (0x1 << 7)
/* Resizable BAR Control Register */
#define SPRD_PCI_RESIZABLE_BAR0_CTL 0x268
#define SPRD_PCI_RESIZABLE_BAR2_CTL 0x270
#define SPRD_PCI_RESIZABLE_BAR4_CTL 0x278
/* bit[13:8] is bar size */
#define SPRD_PCI_RESIZABLE_BAR_SIZE_MASK 0x3F00
#define SPRD_PCI_RESIZABLE_2MB (0x1 << 8)
#define SPRD_PCI_RESIZABLE_4MB (0x2 << 8)
#define SPRD_PCI_RESIZABLE_8MB (0x3 << 8)
#define SIZE(val) ((~(val & 0xFFFFFFF0)) + 1)
static void quirk_sprd_pci_resizebar(struct pci_dev *dev)
{
u32 val, i, backup;
pci_read_config_dword(dev,
SPRD_PCI_RESIZABLE_BAR_EXTENDED_CAP_HEADER, &val);
if ((val & SPRD_PCI_RESIZABLE_BAR_EXTENDED_CAPID) !=
SPRD_PCI_RESIZABLE_BAR_EXTENDED_CAPID) {
dev_info(&dev->dev, "%s: not support resize bar\n", __func__);
return;
}
pci_read_config_dword(dev, SPRD_PCI_MISC_CTRL1_OFF, &val);
val |= SPRD_PCI_DBI_RO_WR_EN;
pci_write_config_dword(dev, SPRD_PCI_MISC_CTRL1_OFF, val);
pci_read_config_dword(dev, SPRD_PCI_RESIZABLE_BAR0, &val);
pci_write_config_dword(dev, SPRD_PCI_RESIZABLE_BAR0,
val | SPRD_BAR_SUPPORT_4MB |
SPRD_BAR_SUPPORT_8MB);
pci_read_config_dword(dev, SPRD_PCI_RESIZABLE_BAR2, &val);
pci_write_config_dword(dev, SPRD_PCI_RESIZABLE_BAR2,
val | SPRD_BAR_SUPPORT_4MB |
SPRD_BAR_SUPPORT_8MB);
pci_read_config_dword(dev, SPRD_PCI_RESIZABLE_BAR4, &val);
pci_write_config_dword(dev, SPRD_PCI_RESIZABLE_BAR4,
val | SPRD_BAR_SUPPORT_4MB |
SPRD_BAR_SUPPORT_8MB);
pci_read_config_dword(dev, SPRD_PCI_MISC_CTRL1_OFF, &val);
val &= ~SPRD_PCI_DBI_RO_WR_EN;
pci_write_config_dword(dev, SPRD_PCI_MISC_CTRL1_OFF, val);
pci_read_config_dword(dev, SPRD_PCI_RESIZABLE_BAR0_CTL, &val);
pci_write_config_dword(dev, SPRD_PCI_RESIZABLE_BAR0_CTL,
(val & (~SPRD_PCI_RESIZABLE_BAR_SIZE_MASK)) |
SPRD_PCI_RESIZABLE_4MB);
pci_read_config_dword(dev, SPRD_PCI_RESIZABLE_BAR2_CTL, &val);
pci_write_config_dword(dev, SPRD_PCI_RESIZABLE_BAR2_CTL,
(val & (~SPRD_PCI_RESIZABLE_BAR_SIZE_MASK)) |
SPRD_PCI_RESIZABLE_4MB);
pci_read_config_dword(dev, SPRD_PCI_RESIZABLE_BAR4_CTL, &val);
pci_write_config_dword(dev, SPRD_PCI_RESIZABLE_BAR4_CTL,
(val & (~SPRD_PCI_RESIZABLE_BAR_SIZE_MASK)) |
SPRD_PCI_RESIZABLE_4MB);
for (i = 0; i < SPRD_BAR_NUM; i++) {
pci_read_config_dword(dev, SPRD_PCI_BAR0 + i * 4, &backup);
pci_write_config_dword(dev, SPRD_PCI_BAR0 + i * 4, 0xFFFFFFFF);
pci_read_config_dword(dev, SPRD_PCI_BAR0 + i * 4, &val);
pci_write_config_dword(dev, SPRD_PCI_BAR0 + i * 4, backup);
dev_info(&dev->dev, "%s: bar%d size 0x%x\n",
__func__, i, SIZE(val));
}
}
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SYNOPSYS, 0xabcd, quirk_sprd_pci_resizebar);

View File

@ -1 +0,0 @@
obj-y += power_manager.o

View File

@ -1,964 +0,0 @@
/*
* Copyright (C) 2019 Spreadtrum Communications Inc.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/debugfs.h>
#include <linux/module.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/suspend.h>
#include <linux/types.h>
#include <linux/workqueue.h>
#include <linux/version.h>
#include "../include/sprd_mpm.h"
#include "../include/sipc.h"
/*
* The data struct of modem power manager.
*/
struct sprd_mpm_data {
#if (LINUX_VERSION_CODE > KERNEL_VERSION( 4,18,0 ))
struct wakeup_source *ws;
#else
struct wakeup_source ws;
#endif
struct list_head pms_list;
struct timer_list timer;
spinlock_t mpm_lock;
char name[20];
const char *last_name;
unsigned int dst;
unsigned int up_cnt;
unsigned int awake_cnt;
unsigned int wakelock_cnt;
unsigned int mpm_state;
unsigned long expires;
unsigned int later_idle;
/* resource ops functions */
int (*wait_resource)(unsigned int dst, int timeout);
int (*request_resource)(unsigned int dst);
int (*release_resource)(unsigned int dst);
struct work_struct release_res_work;
struct work_struct request_res_work;
};
/*
* Save all the instance of mpm in here.
*/
static struct sprd_mpm_data *g_sprd_mpm[SIPC_ID_NR];
/**
* sprd_mpm_print_awake
* print the wake up list to known who prevent system sleep.
*/
static void sprd_mpm_print_awake(struct sprd_mpm_data *mpm)
{
struct sprd_pms *pms;
char *awake_info;
int len = 0, max_len = 512;
awake_info = kmalloc(max_len, GFP_KERNEL);
if (!awake_info)
return;
/* print pms list */
list_for_each_entry(pms, &mpm->pms_list, entry) {
if (!pms->awake && pms->pre_awake_cnt == pms->awake_cnt)
continue;
pms->pre_awake_cnt = pms->awake_cnt;
snprintf(awake_info + len,
max_len - len,
"%s is awake, awake_cnt = %d\n",
pms->name,
pms->awake_cnt);
len = strlen(awake_info);
}
if (len)
pr_info("mpm: %s\n", awake_info);
kfree(awake_info);
}
/**
* sprd_mpm_pm_event
* monitor the PM_SUSPEND_PREPARE event.
*/
static int sprd_mpm_pm_event(struct notifier_block *notifier,
unsigned long pm_event, void *unused)
{
unsigned int i;
struct sprd_mpm_data *cur;
switch (pm_event) {
case PM_SUSPEND_PREPARE:
case PM_POST_SUSPEND:
/* check if has wake lock. */
for (i = 0; i < SIPC_ID_NR; i++) {
if (!g_sprd_mpm[i])
continue;
cur = g_sprd_mpm[i];
sprd_mpm_print_awake(cur);
}
break;
default:
break;
}
return NOTIFY_DONE;
}
/*
* The pm event notify data, for the register pm notifier.
*/
static struct notifier_block sprd_mpm_notifier_block = {
.notifier_call = sprd_mpm_pm_event,
};
/**
* sprd_mpm_request_resource
* request resource.
*/
static void sprd_mpm_request_resource(struct sprd_mpm_data *mpm)
{
if (mpm->request_resource)
schedule_work(&mpm->request_res_work);
}
/**
* sprd_mpm_release_resource
* release resource.
*/
static void sprd_mpm_release_resource(struct sprd_mpm_data *mpm)
{
if (mpm->release_resource)
schedule_work(&mpm->release_res_work);
}
/**
* sprd_mpm_wait_resource -wait resource.
*/
static int sprd_mpm_wait_resource(struct sprd_mpm_data *mpm, int timeout)
{
int ret = 0;
if (mpm->wait_resource) {
ret = mpm->wait_resource(mpm->dst, timeout);
if (ret < 0 && ret != -ERESTARTSYS && timeout)
pr_err("mpm: %s wait resource, ret=%d, timeout=%d.\n",
mpm->name, ret, timeout);
}
return ret;
}
/**
* sprd_mpm_active
* set the state to busy.
*/
static void sprd_mpm_active(struct sprd_mpm_data *mpm)
{
pr_debug("mpm: %s active, set state to busy.\n", mpm->name);
mpm->mpm_state = SPRD_MPM_BUSY;
sprd_mpm_request_resource(mpm);
}
/**
* sprd_mpm_deactive
* del the idle timer,
* set the state to idle.
*/
static void sprd_mpm_deactive(struct sprd_mpm_data *mpm)
{
pr_debug("mpm: %s deactive, set state to idle.\n", mpm->name);
mpm->mpm_state = SPRD_MPM_IDLE;
mpm->expires = 0;
sprd_mpm_release_resource(mpm);
}
/**
* sprd_mpm_start_deactive
* start the deactive timer.
*/
static void sprd_mpm_start_deactive(struct sprd_mpm_data *mpm)
{
pr_debug("mpm: %s start deactive.\n", mpm->name);
mpm->expires = jiffies + msecs_to_jiffies(mpm->later_idle);
if (!mpm->expires)
mpm->expires = 1;
mod_timer(&mpm->timer, mpm->expires);
}
/**
* sprd_mpm_request_res_work_fn
* do release resource call in here.
*/
static void sprd_mpm_request_res_work_fn(struct work_struct *work)
{
struct sprd_mpm_data *mpm = container_of(work, struct sprd_mpm_data,
request_res_work);
int ret;
pr_debug("mpm: %s request res work.\n", mpm->name);
ret = mpm->request_resource(mpm->dst);
if (ret)
pr_err("mpm: %s request res, ret = %d.\n", mpm->name, ret);
}
/**
* sprd_mpm_release_res_work_fn
* do relase resource call in here
*/
static void sprd_mpm_release_res_work_fn(struct work_struct *work)
{
struct sprd_mpm_data *mpm = container_of(work, struct sprd_mpm_data,
release_res_work);
int ret;
pr_debug("mpm: %s releae res work.\n", mpm->name);
ret = mpm->release_resource(mpm->dst);
if (ret)
pr_err("mpm: %s request res, ret = %d.\n", mpm->name, ret);
}
/**
* sprd_mpm_deactive_timer_fn
* in a period of time (mpm->later_idle),
* have no modem resource request,
* we consider that it doesn't need modem resource,
* than set the state to idle.
*/
static void sprd_mpm_deactive_timer_fn(
#if (LINUX_VERSION_CODE <= KERNEL_VERSION( 4,10,0 ))
unsigned long data)
{
struct sprd_mpm_data *mpm = (struct sprd_mpm_data *)data;
#else
struct timer_list *t)
{
struct sprd_mpm_data *mpm = from_timer(mpm, t, timer);
#endif
unsigned long flags;
pr_debug("mpm: %s deactive timer.\n", mpm->name);
spin_lock_irqsave(&mpm->mpm_lock, flags);
/* expires is 0, means the timer has been cancelled. */
if (mpm->expires)
sprd_mpm_deactive(mpm);
spin_unlock_irqrestore(&mpm->mpm_lock, flags);
}
/**
* sprd_pms_cancel_timer
* cancel the pms wakelock timer.
*/
static void sprd_pms_cancel_timer(struct sprd_pms *pms)
{
unsigned long flags;
bool print = false;
spin_lock_irqsave(&pms->expires_lock, flags);
if (pms->expires) {
print = true;
pms->expires = 0;
del_timer(&pms->wake_timer);
}
spin_unlock_irqrestore(&pms->expires_lock, flags);
if (print)
pr_debug("pms: %s del timer.\n", pms->name);
}
/**
* sprd_mpm_cancel_timer
* cancel the deactive timer.
*/
static void sprd_mpm_cancel_timer(struct sprd_mpm_data *mpm)
{
if (mpm->expires) {
pr_debug("mpm: %s del timer.\n", mpm->name);
mpm->expires = 0;
del_timer(&mpm->timer);
}
}
/**
* sprd_mpm_up
* modem power manger power up.
*/
static void sprd_mpm_up(struct sprd_mpm_data *mpm, const char *name)
{
unsigned long flags;
spin_lock_irqsave(&mpm->mpm_lock, flags);
/* first cancel deactive timer */
sprd_mpm_cancel_timer(mpm);
mpm->last_name = name;
mpm->up_cnt++;
/* when up_cnt is change form 0 to 1, ready active pms.
* Although the cnt is 0, but later down, the state may is still busy,
* so here must see whether the mpm state is idle.
*/
if (mpm->up_cnt == 1 &&
mpm->mpm_state == SPRD_MPM_IDLE)
sprd_mpm_active(mpm);
spin_unlock_irqrestore(&mpm->mpm_lock, flags);
pr_debug("mpm: %s up, up_cnt=%d.\n", mpm->name, mpm->up_cnt);
}
/**
* sprd_mpm_down
* modem power manger power down.
*/
static void sprd_mpm_down(struct sprd_mpm_data *mpm, bool immediately)
{
unsigned long flags;
/*
* when up_cnt count is change form 1 to 0,
* start deactive pms.
*/
spin_lock_irqsave(&mpm->mpm_lock, flags);
mpm->up_cnt--;
if (!mpm->up_cnt) {
if (mpm->later_idle && !immediately)
sprd_mpm_start_deactive(mpm);
else
sprd_mpm_deactive(mpm);
}
spin_unlock_irqrestore(&mpm->mpm_lock, flags);
pr_debug("mpm: %s down, up_cnt=%d.\n", mpm->name, mpm->up_cnt);
}
/**
* sprd_mpm_stay_awake
* modem power manager stay awake.
*/
static void sprd_mpm_stay_awake(struct sprd_mpm_data *mpm)
{
unsigned long flags;
/*
* when wakelock_cnt is change form 0 to 1,
* get the system wake lock.
*/
spin_lock_irqsave(&mpm->mpm_lock, flags);
mpm->wakelock_cnt++;
if (mpm->wakelock_cnt == 1) {
mpm->awake_cnt++;
#if (LINUX_VERSION_CODE > KERNEL_VERSION( 4,18,0 ))
__pm_stay_awake(mpm->ws);
#else
__pm_stay_awake(&mpm->ws);
#endif
}
spin_unlock_irqrestore(&mpm->mpm_lock, flags);
pr_debug("mpm: %s wake, wake_cnt=%d\n",
mpm->name, mpm->wakelock_cnt);
}
/**
* sprd_mpm_relax
* modem power manager relax wakelock.
*/
static void sprd_mpm_relax(struct sprd_mpm_data *mpm)
{
unsigned long flags;
/*
* when wakelock_cnt is change form 0 to 1,
* release the system wake lock.
*/
spin_lock_irqsave(&mpm->mpm_lock, flags);
mpm->wakelock_cnt--;
if (!mpm->wakelock_cnt)
#if (LINUX_VERSION_CODE > KERNEL_VERSION( 4,18,0 ))
__pm_relax(mpm->ws);
#else
__pm_relax(&mpm->ws);
#endif
spin_unlock_irqrestore(&mpm->mpm_lock, flags);
pr_debug("mpm: %s relax wake, wake_cnt=%d\n",
mpm->name, mpm->wakelock_cnt);
}
/**
* sprd_pms_do_up_single
* do pms power up.
*/
static void sprd_pms_do_up_single(struct sprd_pms *pms)
{
struct sprd_mpm_data *mpm = (struct sprd_mpm_data *)pms->data;
/*
* when active_cnt is change form 0 to 1, mpm up.
*/
pms->active_cnt++;
if (pms->active_cnt == 1)
sprd_mpm_up(mpm, pms->name);
pr_debug("pms: %s up, active_cnt=%d.\n",
pms->name, pms->active_cnt);
}
/**
* sprd_pms_do_up_multi
* do pms power up.
*/
static void sprd_pms_do_up_multi(struct sprd_pms *pms)
{
struct sprd_mpm_data *mpm = (struct sprd_mpm_data *)pms->data;
unsigned long flags;
bool active = false;
/*
* when active_cnt is change form 0 to 1, mpm up.
*/
spin_lock_irqsave(&pms->active_lock, flags);
pms->active_cnt++;
if (pms->active_cnt == 1)
active = true;
spin_unlock_irqrestore(&pms->active_lock, flags);
pr_debug("pms: %s up, active_cnt=%d.\n",
pms->name, pms->active_cnt);
if (active)
sprd_mpm_up(mpm, pms->name);
}
static void sprd_pms_do_up(struct sprd_pms *pms)
{
if (pms->multitask)
sprd_pms_do_up_multi(pms);
else
sprd_pms_do_up_single(pms);
}
/**
* sprd_pms_do_down_single
* do pms power down.
*/
static void sprd_pms_do_down_single(struct sprd_pms *pms, bool immediately)
{
struct sprd_mpm_data *mpm = (struct sprd_mpm_data *)pms->data;
/*
* when active_cnt is change form 1 to 0, mpm down.
*/
if (pms->active_cnt > 0) {
pms->active_cnt--;
if (pms->active_cnt == 0)
sprd_mpm_down(mpm, immediately);
}
pr_debug("pms: %s down, active_cnt=%d.\n",
pms->name, pms->active_cnt);
}
/**
* sprd_pms_do_down
* do pms power down.
*/
static void sprd_pms_do_down_multi(struct sprd_pms *pms, bool immediately)
{
struct sprd_mpm_data *mpm = (struct sprd_mpm_data *)pms->data;
unsigned long flags;
bool deactive = false;
/*
* when active_cnt is change form 1 to 0, mpm down.
*/
spin_lock_irqsave(&pms->active_lock, flags);
if (pms->active_cnt > 0) {
pms->active_cnt--;
if (pms->active_cnt == 0)
deactive = true;
}
spin_unlock_irqrestore(&pms->active_lock, flags);
pr_debug("pms: %s down, active_cnt=%d.\n",
pms->name, pms->active_cnt);
if (deactive)
sprd_mpm_down(mpm, immediately);
}
static void sprd_pms_do_down(struct sprd_pms *pms, bool immediately)
{
if (pms->multitask)
sprd_pms_do_down_multi(pms, immediately);
else
sprd_pms_do_down_single(pms, immediately);
}
/**
* sprd_pms_stay_awake
* power manger source stay awake.
*/
static void sprd_pms_stay_awake(struct sprd_pms *pms)
{
struct sprd_mpm_data *mpm = (struct sprd_mpm_data *)pms->data;
pr_debug("pms: %s stay awake.\n", pms->name);
pms->awake_cnt++;
if (!pms->awake) {
pms->awake = true;
sprd_mpm_stay_awake(mpm);
}
}
/**
* sprd_pms_relax
* power manger source release wakelock.
*/
static void sprd_pms_relax(struct sprd_pms *pms)
{
struct sprd_mpm_data *mpm = (struct sprd_mpm_data *)pms->data;
pr_debug("pms: %s relax awake.\n", pms->name);
if (pms->awake) {
pms->awake = false;
sprd_mpm_relax(mpm);
}
}
/**
* sprd_pms_relax_wakelock_timer
* the timer process function of pms delay release wakelock.
*/
static void sprd_pms_relax_wakelock_timer(
#if (LINUX_VERSION_CODE <= KERNEL_VERSION( 4,10,0 ))
unsigned long data)
{
struct sprd_pms *pms = (struct sprd_pms *)data;
#else
struct timer_list *t)
{
struct sprd_pms *pms = from_timer(pms, t, wake_timer);
#endif
unsigned long flags;
bool relax = false;
pr_debug("pms: %s timer down.\n", pms->name);
spin_lock_irqsave(&pms->expires_lock, flags);
/*
* if jiffies < pms->expires, mpm called has been canceled,
* don't call sprd_pms_down.
*/
if (pms->expires && time_after_eq(jiffies, pms->expires)) {
pms->expires = 0;
relax = true;
}
spin_unlock_irqrestore(&pms->expires_lock, flags);
if (relax)
sprd_pms_relax(pms);
}
int sprd_mpm_create(unsigned int dst, const char *name,
unsigned int later_idle)
{
struct sprd_mpm_data *mpm;
if (dst >= SIPC_ID_NR)
return -EINVAL;
mpm = kzalloc(sizeof(*mpm), GFP_KERNEL);
if (!mpm)
return -ENOMEM;
snprintf(mpm->name, sizeof(mpm->name), "%s-mpm-%d", name, dst);
mpm->dst = dst;
mpm->later_idle = later_idle;
spin_lock_init(&mpm->mpm_lock);
INIT_LIST_HEAD(&mpm->pms_list);
#if (LINUX_VERSION_CODE > KERNEL_VERSION( 4,18,0 ))
mpm->ws = wakeup_source_register(NULL, mpm->name);
#else
wakeup_source_init(&mpm->ws, mpm->name);
#endif
#if (LINUX_VERSION_CODE <= KERNEL_VERSION( 4,10,0 ))
setup_timer(&mpm->timer,
sprd_mpm_deactive_timer_fn,
(unsigned long)mpm);
#else
timer_setup(&mpm->timer,
sprd_mpm_deactive_timer_fn,
0);
#endif
INIT_WORK(&mpm->request_res_work, sprd_mpm_request_res_work_fn);
INIT_WORK(&mpm->release_res_work, sprd_mpm_release_res_work_fn);
g_sprd_mpm[dst] = mpm;
return 0;
}
int sprd_mpm_init_resource_ops(unsigned int dst,
int (*wait_resource)(unsigned int dst,
int timeout),
int (*request_resource)(unsigned int dst),
int (*release_resource)(unsigned int dst))
{
struct sprd_mpm_data *mpm;
if (dst >= SIPC_ID_NR)
return -EINVAL;
mpm = g_sprd_mpm[dst];
if (!mpm)
return -ENODEV;
mpm->wait_resource = wait_resource;
mpm->request_resource = request_resource;
mpm->release_resource = release_resource;
return 0;
}
int sprd_mpm_destroy(unsigned int dst)
{
struct sprd_pms *pms, *temp;
struct sprd_mpm_data *mpm;
unsigned long flags;
if (dst >= SIPC_ID_NR)
return -EINVAL;
mpm = g_sprd_mpm[dst];
if (!mpm)
return -ENODEV;
sprd_mpm_cancel_timer(mpm);
cancel_work_sync(&mpm->request_res_work);
cancel_work_sync(&mpm->release_res_work);
spin_lock_irqsave(&mpm->mpm_lock, flags);
list_for_each_entry_safe(pms,
temp,
&mpm->pms_list,
entry) {
sprd_pms_cancel_timer(pms);
list_del(&pms->entry);
}
spin_unlock_irqrestore(&mpm->mpm_lock, flags);
kfree(mpm);
g_sprd_mpm[dst] = NULL;
return 0;
}
struct sprd_pms *sprd_pms_create(unsigned int dst,
const char *name, bool multitask)
{
unsigned long flags;
struct sprd_pms *pms;
struct sprd_mpm_data *mpm;
if (dst >= SIPC_ID_NR)
return NULL;
mpm = g_sprd_mpm[dst];
if (!mpm) {
pr_err("mpm: %s pms init failed, dst=%d.\n", name, dst);
return NULL;
}
pms = kzalloc(sizeof(*pms), GFP_KERNEL);
if (!pms)
return NULL;
pms->multitask = multitask;
pms->name = name;
pms->data = (void *)mpm;
spin_lock_init(&pms->expires_lock);
spin_lock_init(&pms->active_lock);
#if (LINUX_VERSION_CODE <= KERNEL_VERSION( 4,10,0 ))
setup_timer(&pms->wake_timer,
sprd_pms_relax_wakelock_timer, (unsigned long)pms);
#else
timer_setup(&pms->wake_timer,
sprd_pms_relax_wakelock_timer, 0);
#endif
spin_lock_irqsave(&mpm->mpm_lock, flags);
list_add(&pms->entry, &mpm->pms_list);
spin_unlock_irqrestore(&mpm->mpm_lock, flags);
return pms;
}
void sprd_pms_destroy(struct sprd_pms *pms)
{
unsigned long flags;
struct sprd_mpm_data *mpm;
if (pms) {
sprd_pms_cancel_timer(pms);
mpm = (struct sprd_mpm_data *)pms->data;
spin_lock_irqsave(&mpm->mpm_lock, flags);
list_del(&pms->entry);
spin_unlock_irqrestore(&mpm->mpm_lock, flags);
kfree(pms);
}
}
/**
* sprd_pms_request_resource - request mpm resource
*
* @pms, the point of this pms.
* @timeout, in ms.
*
* Returns:
* 0 resource ready,
* < 0 resoure not ready,
* -%ERESTARTSYS if it was interrupted by a signal.
*/
int sprd_pms_request_resource(struct sprd_pms *pms, int timeout)
{
int ret;
struct sprd_mpm_data *mpm;
if (!pms)
return -EINVAL;
sprd_pms_do_up(pms);
/* wait resource */
mpm = (struct sprd_mpm_data *)pms->data;
ret = sprd_mpm_wait_resource(mpm, timeout);
if (ret)
sprd_pms_do_down(pms, false);
return ret;
}
/**
* sprd_pms_release_resource - release mpm resource.
*
* @pms, the point of this pms.
*/
void sprd_pms_release_resource(struct sprd_pms *pms)
{
if (pms)
sprd_pms_do_down(pms, false);
}
/**
* sprd_pms_request_wakelock - request wakelock
*
* @pms, the point of this pms.
*/
void sprd_pms_request_wakelock(struct sprd_pms *pms)
{
if (pms) {
sprd_pms_cancel_timer(pms);
sprd_pms_stay_awake(pms);
}
}
/**
* sprd_pms_release_wakelock - release wakelock
*
* @pms, the point of this pms.
*/
void sprd_pms_release_wakelock(struct sprd_pms *pms)
{
if (pms) {
sprd_pms_cancel_timer(pms);
sprd_pms_relax(pms);
}
}
/**
* sprd_pms_request_wakelock_period -
* request wake lock, and will auto reaslse in msec ms.
*
* @pms, the point of this pms.
* @msec, will auto reaslse in msec ms
*/
void sprd_pms_request_wakelock_period(struct sprd_pms *pms, unsigned int msec)
{
sprd_pms_request_wakelock(pms);
sprd_pms_release_wakelock_later(pms, msec);
}
/**
* sprd_pms_release_wakelock_later - release wakelock later.
*
* @pms, the point of this pms.
* @msec, later time (in ms).
*/
void sprd_pms_release_wakelock_later(struct sprd_pms *pms,
unsigned int msec)
{
unsigned long expires;
unsigned long flags;
if (pms) {
pr_debug("pms: %s release wakelock after %d ms.\n",
pms->name, msec);
spin_lock_irqsave(&pms->expires_lock, flags);
expires = jiffies + msecs_to_jiffies(msec);
if (!expires)
expires = 1;
/* always update the timer with new time */
pms->expires = expires;
mod_timer(&pms->wake_timer, expires);
spin_unlock_irqrestore(&pms->expires_lock, flags);
}
}
void sprd_pms_power_up(struct sprd_pms *pms)
{
if (pms)
sprd_pms_do_up(pms);
}
void sprd_pms_power_down(struct sprd_pms *pms, bool immediately)
{
if (pms)
sprd_pms_do_down(pms, immediately);
}
#if defined(CONFIG_DEBUG_FS)
static int sprd_mpm_stats_show(struct seq_file *m, void *unused)
{
unsigned long flags;
struct sprd_pms *pms;
struct sprd_mpm_data *cur;
unsigned int i, ms;
seq_puts(m, "---------------------------------------------\n");
seq_puts(m, "All mpm list:\n");
for (i = 0; i < SIPC_ID_NR; i++) {
if (!g_sprd_mpm[i])
continue;
cur = g_sprd_mpm[i];
seq_puts(m, "------------------------------------\n");
seq_printf(m, "mpm = %s info:\n", cur->name);
seq_printf(m, "last up module = %s info:\n",
cur->last_name ? cur->last_name : "null");
if (cur->expires > 0) {
ms = jiffies_to_msecs(cur->expires - jiffies);
seq_printf(m, "left %d ms to idle\n", ms);
}
seq_printf(m, "up_cnt=%d, state=%d.\n",
cur->up_cnt, cur->mpm_state);
seq_printf(m, "wakelock_cnt=%d, awake_cnt=%d\n",
cur->wakelock_cnt, cur->awake_cnt);
seq_puts(m, "------------------------------------\n");
seq_puts(m, "active pms list:\n");
spin_lock_irqsave(&cur->mpm_lock, flags);
list_for_each_entry(pms, &cur->pms_list, entry) {
if (!pms->active_cnt && !pms->awake)
continue;
seq_printf(m, " %s: active_cnt=%d, awake=%d\n",
pms->name, pms->active_cnt, pms->awake);
}
spin_unlock_irqrestore(&cur->mpm_lock, flags);
}
seq_puts(m, "---------------------------------------------\n");
return 0;
}
static int sprd_mpm_stats_open(struct inode *inode, struct file *file)
{
return single_open(file, sprd_mpm_stats_show, NULL);
}
static const struct file_operations sprd_mpm_stats_fops = {
.owner = THIS_MODULE,
.open = sprd_mpm_stats_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static int sprd_mpm_init_debugfs(void)
{
struct dentry *root = debugfs_create_dir("mpm", NULL);
if (!root)
return -ENXIO;
debugfs_create_file("power_manage", 0444,
(struct dentry *)root,
NULL, &sprd_mpm_stats_fops);
return 0;
}
#endif
int modem_power_manager_init(void)
{
register_pm_notifier(&sprd_mpm_notifier_block);
#if defined(CONFIG_DEBUG_FS)
sprd_mpm_init_debugfs();
#endif
return 0;
}
EXPORT_SYMBOL(modem_power_manager_init);
void modem_power_manager_exit(void)
{
unregister_pm_notifier(&sprd_mpm_notifier_block);
}
EXPORT_SYMBOL(modem_power_manager_exit);

View File

@ -1,9 +0,0 @@
menu "SIPA modules"
config SPRD_SIPA
bool "sipa ipa"
default n
help
sipa is a module for spreadtrum ip packet accelator driver.
endmenu

View File

@ -1,6 +0,0 @@
EXTRA_CFLAGS += -Wno-error -Wno-packed-bitfield-compat
ccflags-y += -DCONFIG_SPRD_SIPA
obj-y += sipa_core.o sipa_skb_send.o sipa_skb_recv.o \
sipa_nic.o sipa_debugfs.o sipa_dele_cmn.o \
sipa_eth.o sipa_dummy.o
obj-y += sipa_phy_v0/

View File

@ -1,333 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note.
*
* UNISOC 'virt sipa' driver
*
* Qingsheng.Li <qingsheng.li@unisoc.com>
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License v2 as published by
* the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/device.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/of_device.h>
#include <linux/mfd/syscon.h>
#include <linux/io.h>
#include <linux/cdev.h>
#include <linux/pm_wakeup.h>
#include <linux/pm_runtime.h>
#include <linux/dma-direction.h>
#include <linux/dma-mapping.h>
#include <linux/pci.h>
#include <linux/sched.h>
#include <linux/debugfs.h>
#include <linux/kthread.h>
#include <linux/delay.h>
#include "../include/sipa.h"
#include "../include/sprd_pcie_ep_device.h"
#include "../include/sipc.h"
#include "sipa_core.h"
#define DRV_NAME "virt_sipa"
struct sipa_core *sipa_ctrl;
struct sipa_core *sipa_get_ctrl_pointer(void)
{
return sipa_ctrl;
}
EXPORT_SYMBOL(sipa_get_ctrl_pointer);
static void sipa_notify_sender_flow_ctrl(struct work_struct *work)
{
struct sipa_core *sipa_ctrl = container_of(work, struct sipa_core,
flow_ctrl_work);
if (sipa_ctrl->sender && sipa_ctrl->sender->free_notify_net)
wake_up(&sipa_ctrl->sender->free_waitq);
}
static int sipa_init_cmn_fifo(struct sipa_core *ipa,
enum sipa_cmn_fifo_index id)
{
size_t size;
dma_addr_t dma_addr;
struct sipa_cmn_fifo_cfg_tag *cmn_fifo;
cmn_fifo = &ipa->cmn_fifo_cfg[id];
cmn_fifo->fifo_id = id;
cmn_fifo->dst = SIPA_TERM_VCP;
cmn_fifo->cur = SIPA_TERM_PCIE0;
size = cmn_fifo->tx_fifo.depth *
sizeof(struct sipa_node_description_tag);
cmn_fifo->tx_fifo.virtual_addr = dma_alloc_coherent(ipa->pci_dev, size,
&dma_addr,
GFP_KERNEL);
if (!cmn_fifo->tx_fifo.virtual_addr)
return -ENOMEM;
cmn_fifo->tx_fifo.dma_ptr = dma_addr;
memset(cmn_fifo->tx_fifo.virtual_addr, 0, size);
pr_info("comfifo%d tx_fifo addr-0x%lx\n", id, (long unsigned int)cmn_fifo->tx_fifo.virtual_addr);
cmn_fifo->tx_fifo.fifo_base_addr_l = lower_32_bits(dma_addr);
cmn_fifo->tx_fifo.fifo_base_addr_h = 0x2;
size = cmn_fifo->rx_fifo.depth *
sizeof(struct sipa_node_description_tag);
cmn_fifo->rx_fifo.virtual_addr = dma_alloc_coherent(ipa->pci_dev, size,
&dma_addr,
GFP_KERNEL);
if (!cmn_fifo->rx_fifo.virtual_addr)
return -ENOMEM;
cmn_fifo->rx_fifo.dma_ptr = dma_addr;
memset(cmn_fifo->rx_fifo.virtual_addr, 0, size);
pr_info("comfifo%d rx_fifo addr-0x%lx\n", id, (long unsigned int)cmn_fifo->rx_fifo.virtual_addr);
cmn_fifo->rx_fifo.fifo_base_addr_l = lower_32_bits(dma_addr);
cmn_fifo->rx_fifo.fifo_base_addr_h = 0x2;
return 0;
}
static void sipa_free_cmn_fifo(struct sipa_core *ipa, enum sipa_cmn_fifo_index id)
{
size_t size;
struct sipa_cmn_fifo_cfg_tag *cmn_fifo;
cmn_fifo = &ipa->cmn_fifo_cfg[id];
size = cmn_fifo->tx_fifo.depth * sizeof(struct sipa_node_description_tag);
dma_free_coherent(ipa->dev, size, cmn_fifo->tx_fifo.virtual_addr, cmn_fifo->tx_fifo.dma_ptr);
size = cmn_fifo->rx_fifo.depth * sizeof(struct sipa_node_description_tag);
dma_free_coherent(ipa->dev, size, cmn_fifo->rx_fifo.virtual_addr, cmn_fifo->rx_fifo.dma_ptr);
}
static void sipa_init_ep(struct sipa_core *ipa)
{
struct sipa_endpoint *ep = &ipa->ep;
ep->send_fifo = &ipa->cmn_fifo_cfg[SIPA_FIFO_PCIE_UL];
ep->recv_fifo = &ipa->cmn_fifo_cfg[SIPA_FIFO_PCIE_DL];
}
#ifdef SPRD_PCIE_USE_DTS
static int sipa_parse_dts_configuration(struct platform_device *pdev,
struct sipa_core *ipa)
{
int ret;
struct sipa_cmn_fifo_cfg_tag *cmn_fifo;
ipa->reg_res = platform_get_resource_byname(pdev,
IORESOURCE_MEM, "ipa-base");
if (!ipa->reg_res) {
dev_err(&pdev->dev, "get ipa-base res fail\n");
return -EINVAL;
}
cmn_fifo = &ipa->cmn_fifo_cfg[SIPA_FIFO_PCIE_DL];
ret = of_property_read_u32(pdev->dev.of_node, "pcie-dl-tx-fifo-depth",
&cmn_fifo->tx_fifo.depth);
if (ret) {
dev_err(&pdev->dev,
"get pcie-dl-tx-fifo-depth ret = %d\n", ret);
return ret;
}
ret = of_property_read_u32(pdev->dev.of_node, "pcie-dl-rx-fifo-depth",
&cmn_fifo->rx_fifo.depth);
if (ret) {
dev_err(&pdev->dev,
"get pcie-dl-rx-fifo-depth ret = %d\n", ret);
return ret;
}
cmn_fifo = &ipa->cmn_fifo_cfg[SIPA_FIFO_PCIE_UL];
ret = of_property_read_u32(pdev->dev.of_node, "pcie-ul-tx-fifo-depth",
&cmn_fifo->tx_fifo.depth);
if (ret) {
dev_err(&pdev->dev,
"get pcie-ul-tx-fifo-depth ret = %d\n", ret);
return ret;
}
ret = of_property_read_u32(pdev->dev.of_node, "pcie-ul-rx-fifo-depth",
&cmn_fifo->rx_fifo.depth);
if (ret) {
dev_err(&pdev->dev,
"get pcie-ul-rx-fifo-depth ret = %d\n", ret);
return ret;
}
return 0;
}
#else
static struct resource ipa_res = {
.start = 0x2e000000,
.end = 0x2e000000 + 0x2000 -1,
.flags = IORESOURCE_MEM,
};
static int sipa_parse_dts_configuration(struct platform_device *pdev,
struct sipa_core *ipa)
{
struct sipa_cmn_fifo_cfg_tag *cmn_fifo;
ipa->reg_res = &ipa_res;
cmn_fifo = &ipa->cmn_fifo_cfg[SIPA_FIFO_PCIE_DL];
cmn_fifo->tx_fifo.depth = 4096;
cmn_fifo->rx_fifo.depth = 4096;
cmn_fifo = &ipa->cmn_fifo_cfg[SIPA_FIFO_PCIE_UL];
cmn_fifo->tx_fifo.depth = 4096;
cmn_fifo->rx_fifo.depth = 4096;
return 0;
}
#endif
static int sipa_plat_drv_probe(struct platform_device *pdev)
{
int ret;
struct sipa_core *ipa;
struct device *dev = &pdev->dev;
struct device *pci_dev;
pci_dev = (struct device *)dev_get_drvdata(dev);
if(!pci_dev)
return -1;
ipa = devm_kzalloc(dev, sizeof(*ipa), GFP_KERNEL);
if (!ipa)
return -ENOMEM;
sipa_ctrl = ipa;
ipa->dev = dev;
ipa->pci_dev = pci_dev;
ipa->pcie_mem_offset = SIPA_PCIE_MEM_OFFSET;
dev_set_drvdata(dev, ipa);
ret = sipa_parse_dts_configuration(pdev, ipa);
if (ret)
return ret;
ret = sipa_init_cmn_fifo(ipa, SIPA_FIFO_PCIE_DL);
if (ret)
return ret;
ret = sipa_init_cmn_fifo(ipa, SIPA_FIFO_PCIE_UL);
if (ret)
return ret;
sipa_init_ep(ipa);
sipa_fifo_ops_init(&ipa->hal_ops);
INIT_WORK(&ipa->flow_ctrl_work, sipa_notify_sender_flow_ctrl);
create_sipa_skb_receiver(&ipa->ep, &ipa->receiver);
create_sipa_skb_sender(&ipa->ep, &ipa->sender);
device_init_wakeup(dev, true);
sipa_create_smsg_channel(ipa);
sprd_ep_dev_register_irq_handler(PCIE_EP_MODEM, PCIE_MSI_IPA,
(irq_handler_t)sipa_int_callback_func,
(void *)ipa);
sipa_init_debugfs(ipa);
return 0;
}
extern void destroy_sipa_skb_receiver(struct sipa_skb_receiver *receiver);
extern void destroy_sipa_skb_sender(struct sipa_skb_sender *sender);
static int sipa_plat_drv_remove(struct platform_device *pdev)
{
struct sipa_core *ipa;
ipa = dev_get_drvdata(&pdev->dev);
smsg_ch_close(SIPC_ID_MINIAP, SMSG_CH_COMM_SIPA, 1000);
if(ipa->smsg_thread){
kthread_stop(ipa->smsg_thread);
ipa->smsg_thread = NULL;
}
destroy_sipa_skb_sender(ipa->sender);
destroy_sipa_skb_receiver(ipa->receiver);
cancel_work_sync(&ipa->flow_ctrl_work);
mdelay(1000);
sipa_free_cmn_fifo(ipa, SIPA_FIFO_PCIE_UL);
sipa_free_cmn_fifo(ipa, SIPA_FIFO_PCIE_DL);
if (!IS_ERR_OR_NULL(ipa->dentry))
debugfs_remove_recursive(ipa->dentry);
devm_kfree(&pdev->dev, ipa);
platform_set_drvdata(pdev, NULL);
return 0;
}
#ifdef SPRD_PCIE_USE_DTS
static const struct of_device_id sipa_plat_drv_match[] = {
{ .compatible = "sprd,virt-sipa"},
};
#endif
static struct platform_driver sipa_plat_drv = {
.probe = sipa_plat_drv_probe,
.remove = sipa_plat_drv_remove,
.driver = {
.name = DRV_NAME,
.owner = THIS_MODULE,
#ifdef SPRD_PCIE_USE_DTS
.of_match_table = sipa_plat_drv_match,
#endif
},
};
#ifndef SPRD_PCIE_USE_DTS
static struct platform_device *sipa_plat_dev;
static int sipa_core_platform_device_reigster(struct device *dev)
{
int retval = -ENOMEM;
sipa_plat_dev = platform_device_alloc("virt_sipa", -1);
if (!sipa_plat_dev)
return retval;
sipa_plat_dev->dev.dma_mask = dev->dma_mask;
sipa_plat_dev->dev.coherent_dma_mask = dev->coherent_dma_mask;
sipa_plat_dev->dev.archdata = dev->archdata;
dev_set_drvdata(&sipa_plat_dev->dev, dev);
retval = platform_device_add(sipa_plat_dev);
if (retval < 0)
platform_device_put(sipa_plat_dev);
return retval;
}
#endif
int sipa_module_init(struct device *dev)
{
#ifndef SPRD_PCIE_USE_DTS
sipa_core_platform_device_reigster(dev);
#endif
return platform_driver_register(&sipa_plat_drv);
}
EXPORT_SYMBOL(sipa_module_init);
void sipa_module_exit(void)
{
platform_driver_unregister(&sipa_plat_drv);
#ifndef SPRD_PCIE_USE_DTS
platform_device_unregister(sipa_plat_dev);
#endif
}
EXPORT_SYMBOL(sipa_module_exit);

View File

@ -1,519 +0,0 @@
/*
* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef _SIPA_CORE_H_
#define _SIPA_CORE_H_
#include <linux/bitops.h>
#include <linux/spinlock.h>
#include <linux/wait.h>
#include <linux/workqueue.h>
#include <linux/types.h>
#include <linux/interrupt.h>
#include <asm/byteorder.h>
enum sipa_cmn_fifo_index {
SIPA_FIFO_PCIE_DL,
SIPA_FIFO_PCIE_UL,
SIPA_FIFO_MAX,
};
enum sipa_irq_evt_type {
SIPA_IRQ_TX_FIFO_THRESHOLD_SW = BIT(22),
SIPA_IRQ_EXIT_FLOW_CTRL = BIT(20),
SIPA_IRQ_ENTER_FLOW_CTRL = BIT(19),
SIPA_IRQ_TXFIFO_FULL_INT = BIT(18),
SIPA_IRQ_TXFIFO_OVERFLOW = BIT(17),
SIPA_IRQ_ERRORCODE_IN_TX_FIFO = BIT(16),
SIPA_IRQ_INTR_BIT = BIT(15),
SIPA_IRQ_THRESHOLD = BIT(14),
SIPA_IRQ_DELAY_TIMER = BIT(13),
SIPA_IRQ_DROP_PACKT_OCCUR = BIT(12),
SIPA_IRQ_ERROR = 0x0,
};
#define SIPA_FIFO_THRESHOLD_IRQ_EN BIT(1)
#define SIPA_FIFO_DELAY_TIMER_IRQ_EN BIT(0)
#define SIPA_PCIE_MEM_OFFSET 0x200000000ULL
enum sipa_nic_status_e {
NIC_OPEN,
NIC_CLOSE
};
#define SIPA_RECV_EVT (SIPA_IRQ_INTR_BIT | SIPA_IRQ_THRESHOLD | \
SIPA_IRQ_DELAY_TIMER | SIPA_IRQ_TX_FIFO_THRESHOLD_SW)
#define SIPA_RECV_WARN_EVT (SIPA_IRQ_TXFIFO_FULL_INT | SIPA_IRQ_TXFIFO_OVERFLOW)
#define SMSG_FLG_DELE_REQUEST 0x1
#define SMSG_FLG_DELE_RELEASE 0x2
typedef void (*sipa_irq_notify_cb)(void *priv,
enum sipa_irq_evt_type evt,
u32 data);
struct sipa_node_description_tag {
/*soft need to set*/
u64 address : 40;
/*soft need to set*/
u32 length : 20;
/*soft need to set*/
u16 offset : 12;
/*soft need to set*/
u8 net_id;
/*soft need to set*/
u8 src : 5;
/*soft need to set*/
u8 dst : 5;
u8 prio : 3;
u8 bear_id : 7;
/*soft need to set*/
u8 intr : 1;
/*soft need to set*/
u8 indx : 1;
u8 err_code : 4;
u32 reserved : 22;
} __attribute__((__packed__));
struct sipa_cmn_fifo_params {
u32 tx_intr_delay_us;
u32 tx_intr_threshold;
bool flowctrl_in_tx_full;
u32 flow_ctrl_cfg;
u32 flow_ctrl_irq_mode;
u32 tx_enter_flowctrl_watermark;
u32 tx_leave_flowctrl_watermark;
u32 rx_enter_flowctrl_watermark;
u32 rx_leave_flowctrl_watermark;
u32 data_ptr_cnt;
u32 buf_size;
dma_addr_t data_ptr;
};
struct sipa_skb_dma_addr_node {
struct sk_buff *skb;
u64 dma_addr;
struct list_head list;
};
struct sipa_cmn_fifo_tag {
u32 depth;
u32 wr;
u32 rd;
u32 fifo_base_addr_l;
u32 fifo_base_addr_h;
void *virtual_addr;
dma_addr_t dma_ptr;
};
struct sipa_cmn_fifo_cfg_tag {
const char *fifo_name;
void *priv;
enum sipa_cmn_fifo_index fifo_id;
bool state;
u32 dst;
u32 cur;
void __iomem *fifo_reg_base;
struct sipa_cmn_fifo_tag rx_fifo;
struct sipa_cmn_fifo_tag tx_fifo;
u32 enter_flow_ctrl_cnt;
u32 exit_flow_ctrl_cnt;
sipa_irq_notify_cb irq_cb;
};
struct sipa_endpoint {
/* Centered on CPU/PAM */
struct sipa_cmn_fifo_cfg_tag *send_fifo;
struct sipa_cmn_fifo_cfg_tag *recv_fifo;
struct sipa_cmn_fifo_params send_fifo_param;
struct sipa_cmn_fifo_params recv_fifo_param;
bool inited;
bool connected;
bool suspended;
};
struct sipa_nic {
enum sipa_nic_id nic_id;
struct sipa_endpoint *send_ep;
struct sk_buff_head rx_skb_q;
int need_notify;
u32 src_mask;
int netid;
struct list_head list;
sipa_notify_cb cb;
void *cb_priv;
atomic_t status;
bool flow_ctrl_status;
bool continue_notify;
bool rm_flow_ctrl;
};
struct sipa_skb_array {
struct sipa_skb_dma_addr_node *array;
u32 rp;
u32 wp;
u32 depth;
};
struct sipa_skb_sender {
struct device *dev;
struct sipa_endpoint *ep;
atomic_t left_cnt;
/* To be used for add/remove nic device */
spinlock_t nic_lock;
/* To be used for send skb process */
spinlock_t send_lock;
spinlock_t exit_lock;
struct list_head nic_list;
struct list_head sending_list;
struct list_head pair_free_list;
struct sipa_skb_dma_addr_node *pair_cache;
bool free_notify_net;
bool ep_cover_net;
bool send_notify_net;
wait_queue_head_t free_waitq;
struct task_struct *free_thread;
struct task_struct *send_thread;
bool init_flag;
u32 no_mem_cnt;
u32 no_free_cnt;
u32 enter_flow_ctrl_cnt;
u32 exit_flow_ctrl_cnt;
u32 run;
};
struct sipa_skb_receiver {
struct sipa_endpoint *ep;
u32 rsvd;
struct sipa_skb_array recv_array;
wait_queue_head_t recv_waitq;
wait_queue_head_t fill_recv_waitq;
spinlock_t lock;
spinlock_t exit_lock;
u32 nic_cnt;
atomic_t need_fill_cnt;
struct sipa_nic *nic_array[SIPA_NIC_MAX];
struct task_struct *fill_thread;
u32 tx_danger_cnt;
u32 rx_danger_cnt;
u32 run;
};
struct sipa_fifo_hal_ops {
int (*open)(enum sipa_cmn_fifo_index id,
struct sipa_cmn_fifo_cfg_tag *cfg_base, void *cookie);
int (*close)(enum sipa_cmn_fifo_index id,
struct sipa_cmn_fifo_cfg_tag *cfg_base);
int (*set_rx_depth)(enum sipa_cmn_fifo_index id,
struct sipa_cmn_fifo_cfg_tag *cfg_base, u32 depth);
int (*set_tx_depth)(enum sipa_cmn_fifo_index id,
struct sipa_cmn_fifo_cfg_tag *cfg_base, u32 depth);
u32 (*get_rx_depth)(enum sipa_cmn_fifo_index id,
struct sipa_cmn_fifo_cfg_tag *cfg_base);
int (*hal_set_tx_depth)(enum sipa_cmn_fifo_index id,
struct sipa_cmn_fifo_cfg_tag *cfg_base,
u32 depth);
u32 (*get_tx_depth)(enum sipa_cmn_fifo_index id,
struct sipa_cmn_fifo_cfg_tag *cfg_base);
int (*set_intr_drop_packet)(enum sipa_cmn_fifo_index id,
struct sipa_cmn_fifo_cfg_tag *cfg_base,
u32 enable, sipa_irq_notify_cb cb);
int (*set_intr_error_code)(enum sipa_cmn_fifo_index id,
struct sipa_cmn_fifo_cfg_tag *cfg_base,
u32 enable, sipa_irq_notify_cb cb);
int (*set_intr_timeout)(enum sipa_cmn_fifo_index id,
struct sipa_cmn_fifo_cfg_tag *cfg_base,
u32 enable, u32 time, sipa_irq_notify_cb cb);
int (*set_hw_intr_timeout)(enum sipa_cmn_fifo_index id,
struct sipa_cmn_fifo_cfg_tag *cfg_base,
u32 enable, u32 time, sipa_irq_notify_cb cb);
int (*set_intr_threshold)(enum sipa_cmn_fifo_index id,
struct sipa_cmn_fifo_cfg_tag *cfg_base,
u32 enable, u32 cnt, sipa_irq_notify_cb cb);
int (*set_hw_intr_thres)(enum sipa_cmn_fifo_index id,
struct sipa_cmn_fifo_cfg_tag *cfg_base,
u32 enable, u32 cnt, sipa_irq_notify_cb cb);
int (*set_src_dst_term)(enum sipa_cmn_fifo_index id,
struct sipa_cmn_fifo_cfg_tag *cfg_base,
u32 src, u32 dst);
int (*enable_local_flowctrl_intr)(enum sipa_cmn_fifo_index id,
struct sipa_cmn_fifo_cfg_tag *
cfg_base, u32 enable, u32 irq_mode,
sipa_irq_notify_cb cb);
int (*enable_remote_flowctrl_intr)(enum sipa_cmn_fifo_index id,
struct sipa_cmn_fifo_cfg_tag *
cfg_base, u32 work_mode,
u32 tx_entry_watermark,
u32 tx_exit_watermark,
u32 rx_entry_watermark,
u32 rx_exit_watermark);
int (*set_interrupt_intr)(enum sipa_cmn_fifo_index id,
struct sipa_cmn_fifo_cfg_tag *cfg_base,
u32 enable, sipa_irq_notify_cb cb);
int (*set_intr_txfifo_overflow)(enum sipa_cmn_fifo_index id,
struct sipa_cmn_fifo_cfg_tag *cfg_base,
u32 enable, sipa_irq_notify_cb cb);
int (*set_intr_txfifo_full)(enum sipa_cmn_fifo_index id,
struct sipa_cmn_fifo_cfg_tag *cfg_base,
u32 enable, sipa_irq_notify_cb cb);
int (*put_node_to_rx_fifo)(struct device *dev,
enum sipa_cmn_fifo_index id,
struct sipa_cmn_fifo_cfg_tag *cfg_base,
struct sipa_node_description_tag *node,
u32 force_intr, u32 num);
u32 (*get_left_cnt)(enum sipa_cmn_fifo_index id,
struct sipa_cmn_fifo_cfg_tag *cfg_base);
u32 (*recv_node_from_tx_fifo)(struct device *dev,
enum sipa_cmn_fifo_index id,
struct sipa_cmn_fifo_cfg_tag *cfg_base,
u32 num);
void (*get_rx_ptr)(enum sipa_cmn_fifo_index id,
struct sipa_cmn_fifo_cfg_tag *cfg_base,
u32 *wr, u32 *rd);
void (*get_tx_ptr)(enum sipa_cmn_fifo_index id,
struct sipa_cmn_fifo_cfg_tag *cfg_base,
u32 *wr, u32 *rd);
void (*get_filled_depth)(enum sipa_cmn_fifo_index id,
struct sipa_cmn_fifo_cfg_tag *cfg_base,
u32 *rx_filled, u32 *tx_filled);
u32 (*get_tx_full_status)(enum sipa_cmn_fifo_index id,
struct sipa_cmn_fifo_cfg_tag *cfg_base);
u32 (*get_tx_empty_status)(enum sipa_cmn_fifo_index id,
struct sipa_cmn_fifo_cfg_tag *cfg_base);
u32 (*get_rx_full_status)(enum sipa_cmn_fifo_index id,
struct sipa_cmn_fifo_cfg_tag *cfg_base);
u32 (*get_rx_empty_status)(enum sipa_cmn_fifo_index id,
struct sipa_cmn_fifo_cfg_tag *cfg_base);
bool (*set_rx_fifo_wptr)(enum sipa_cmn_fifo_index id,
struct sipa_cmn_fifo_cfg_tag *cfg_base,
u32 wptr);
bool (*set_tx_fifo_wptr)(enum sipa_cmn_fifo_index id,
struct sipa_cmn_fifo_cfg_tag *cfg_base,
u32 wptr);
int (*set_rx_tx_fifo_ptr)(enum sipa_cmn_fifo_index id,
struct sipa_cmn_fifo_cfg_tag *cfg_base,
u32 rx_rd, u32 rx_wr, u32 tx_rd, u32 tx_wr);
int (*ctrl_receive)(enum sipa_cmn_fifo_index id,
struct sipa_cmn_fifo_cfg_tag *cfg_base,
bool stop);
struct sipa_node_description_tag *
(*get_tx_fifo_rp)(enum sipa_cmn_fifo_index id,
struct sipa_cmn_fifo_cfg_tag *cfg_base,
u32 index);
struct sipa_node_description_tag *
(*get_rx_fifo_wr)(enum sipa_cmn_fifo_index id,
struct sipa_cmn_fifo_cfg_tag *cfg_base,
u32 index);
int (*set_tx_fifo_rp)(enum sipa_cmn_fifo_index id,
struct sipa_cmn_fifo_cfg_tag *cfg_base,
u32 tx_rd);
int (*set_rx_fifo_wr)(struct device *dev, enum sipa_cmn_fifo_index id,
struct sipa_cmn_fifo_cfg_tag *cfg_base,
u32 num);
int (*set_intr_eb)(enum sipa_cmn_fifo_index id,
struct sipa_cmn_fifo_cfg_tag *cfg_base,
bool eb, u32 type);
void (*clr_tout_th_intr)(enum sipa_cmn_fifo_index id,
struct sipa_cmn_fifo_cfg_tag *cfg_base);
};
struct sipa_core {
const char *name;
struct device *dev;
struct device *pci_dev;
struct dentry *dentry;
struct sipa_endpoint ep;
struct sipa_cmn_fifo_cfg_tag cmn_fifo_cfg[SIPA_FIFO_MAX];
struct work_struct flow_ctrl_work;
/* ipa low power*/
bool remote_ready;
struct resource *reg_res;
phys_addr_t reg_mapped;
void __iomem *virt_reg_addr;
/* IPA NIC interface */
struct sipa_nic *nic[SIPA_NIC_MAX];
/* sender & receiver */
struct sipa_skb_sender *sender;
struct sipa_skb_receiver *receiver;
atomic_t recv_cnt;
u64 pcie_mem_offset;
struct sipa_fifo_hal_ops hal_ops;
struct task_struct *smsg_thread;
struct dentry *debugfs_root;
const void *debugfs_data;
};
void sipa_fifo_ops_init(struct sipa_fifo_hal_ops *ops);
struct sipa_core *sipa_get_ctrl_pointer(void);
void sipa_receiver_add_nic(struct sipa_skb_receiver *receiver,
struct sipa_nic *nic);
void sipa_receiver_open_cmn_fifo(struct sipa_skb_receiver *receiver);
void sipa_sender_open_cmn_fifo(struct sipa_skb_sender *sender);
int create_sipa_skb_sender(struct sipa_endpoint *ep,
struct sipa_skb_sender **sender_pp);
void destroy_sipa_skb_sender(struct sipa_skb_sender *sender);
void sipa_skb_sender_add_nic(struct sipa_skb_sender *sender,
struct sipa_nic *nic);
void sipa_skb_sender_remove_nic(struct sipa_skb_sender *sender,
struct sipa_nic *nic);
int sipa_skb_sender_send_data(struct sipa_skb_sender *sender,
struct sk_buff *skb,
enum sipa_term_type dst,
u8 netid);
int create_sipa_skb_receiver(struct sipa_endpoint *ep,
struct sipa_skb_receiver **receiver_pp);
void sipa_nic_notify_evt(struct sipa_nic *nic, enum sipa_evt_type evt);
void sipa_nic_try_notify_recv(struct sipa_nic *nic);
void sipa_nic_push_skb(struct sipa_nic *nic, struct sk_buff *skb);
void sipa_nic_check_flow_ctrl(void);
int sipa_create_smsg_channel(struct sipa_core *ipa);
int sipa_init_debugfs(struct sipa_core *ipa);
int sipa_int_callback_func(int evt, void *cookie);
#if defined (__BIG_ENDIAN_BITFIELD)
static inline int sipa_get_node_desc(u8 *node_addr,
struct sipa_node_description_tag *node)
{
if (!node_addr || !node)
return -EINVAL;
node->address = node_addr[0] + ((u32)node_addr[1] << 8) +
((u32)node_addr[2] << 16) + ((u32)node_addr[3] << 24) +
((u64)node_addr[4] << 32);
#if 0
node->length = node_addr[5] + ((u32)node_addr[6] << 8) +
((u32)(node_addr[7] & 0xf) << 16);
node->offset = ((node_addr[7] & 0xf0) >> 4) +
((u16)node_addr[8] << 4);
#endif
node->net_id = node_addr[9];
node->src = node_addr[10] & 0x1f;
#if 0
node->dst = ((node_addr[11] & 0x3) << 3) +
((node_addr[10] & 0xe0) >> 5);
#endif
node->err_code = ((node_addr[12] & 0xc0) >> 6) +
((node_addr[13] & 0x03) << 2);
#if 0
node->prio = (node_addr[11] & 0x1c) >> 2;
node->bear_id = ((node_addr[11] & 0xe0) >> 5) +
((node_addr[12] & 0xf) << 3);
node->intr = !!(node_addr[12] & BIT(4));
node->indx = !!(node_addr[12] & BIT(5));
node->reserved = ((node_addr[13] & 0xfc) >> 2) +
((u32)node_addr[14] << 6) + ((u32)node_addr[15] << 14);
#endif
smp_rmb();
return 0;
}
static inline int sipa_set_node_desc(u8 *dst_addr, u8 *src_addr)
{
if (!dst_addr || !src_addr)
return -EINVAL;
/* address */
dst_addr[0] = src_addr[4];
dst_addr[1] = src_addr[3];
dst_addr[2] = src_addr[2];
dst_addr[3] = src_addr[1];
dst_addr[4] = src_addr[0];
/* length */
dst_addr[5] = (src_addr[7] >> 4) + ((src_addr[6] & 0x0f) << 4);
dst_addr[6] = (src_addr[6] >> 4) + ((src_addr[5] & 0x0f) << 4);
dst_addr[7] = src_addr[5] >> 4;
/* offset */
dst_addr[7] += ((src_addr[8] & 0x0f) << 4);
dst_addr[8] = (src_addr[7] << 4) + (src_addr[8] >> 4);
/* netid */
dst_addr[9] = src_addr[9];
/* src */
dst_addr[10] = ((src_addr[10] & 0xf8) >> 3);
/* dst */
dst_addr[10] +=
((src_addr[11] >> 6) + ((src_addr[10] & 0x01) << 2)) << 5;
dst_addr[11] = (src_addr[10] & 0x6) >> 1;
/* prio */
dst_addr[11] += ((src_addr[11] & 0x38) >> 1);
/* bear_id */
dst_addr[11] += ((src_addr[12] & 0x70) << 1);
dst_addr[12] = ((src_addr[11] & 0x7) << 1) + (src_addr[12] >> 7);
/* intx */
dst_addr[12] += ((src_addr[12] & 0x8) << 1);
/* indx */
dst_addr[12] += ((src_addr[12] & 0x4) << 3);
/* err code */
dst_addr[12] += (src_addr[13] & 0xc0);
dst_addr[13] = src_addr[12] & 0x3;
/* reserved */
dst_addr[13] += src_addr[15] << 2;
dst_addr[14] = (src_addr[15] & 0x3) + (src_addr[14] << 2);
dst_addr[15] = ((src_addr[13] & 0x3f) << 2) +
((src_addr[14] & 0xc0) >> 6);
smp_wmb();
return 0;
}
#endif
#endif

View File

@ -1,590 +0,0 @@
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/io.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
#include <linux/regmap.h>
#include <linux/dma-mapping.h>
#include <uapi/linux/swab.h>
#include "../include/sipa.h"
#include "sipa_core.h"
static u32 debug_cmd[5], data_buf[5];
static struct sipa_node_description_tag ipa_node;
static int sipa_params_debug_show(struct seq_file *s, void *unused)
{
int i;
u32 tmp;
struct sipa_core *ipa = (struct sipa_core *)s->private;
struct sipa_cmn_fifo_cfg_tag *fifo_cfg;
seq_printf(s, "dma_mask = 0x%llx coherent_dma_mask = 0x%llx\n",
(u64)*ipa->pci_dev->dma_mask, (u64)ipa->pci_dev->coherent_dma_mask);
seq_printf(s, "remote ready = %d reg_mapped = 0x%llx virt_reg_addr = 0x%p\n",
ipa->remote_ready, (long long unsigned int)ipa->reg_mapped, ipa->virt_reg_addr);
seq_printf(s, "ipa reg start = 0x%llx size = 0x%llx pcie_mem_offset = %llx\n",
(long long unsigned int)ipa->reg_res->start, (long long unsigned int)resource_size(ipa->reg_res),
(long long unsigned int)ipa->pcie_mem_offset);
for (i = 0; i < SIPA_NIC_MAX; i++) {
if (!ipa->nic[i])
continue;
seq_printf(s, "open = %d src_mask = 0x%x netid = %d flow_ctrl_status = %d",
atomic_read(&ipa->nic[i]->status), ipa->nic[i]->src_mask,
ipa->nic[i]->netid, ipa->nic[i]->flow_ctrl_status);
seq_printf(s, " qlen = %d need_notify = %d continue_notify = %d\n",
ipa->nic[i]->rx_skb_q.qlen, ipa->nic[i]->need_notify,
ipa->nic[i]->continue_notify);
}
seq_printf(s, "sender no_mem_cnt = %d no_free_cnt = %d left_cnt = %d\n",
ipa->sender->no_mem_cnt, ipa->sender->no_free_cnt,
atomic_read(&ipa->sender->left_cnt));
seq_printf(s, "sender enter_flow_ctrl_cnt=%d, exit_flow_ctrl_cnt=%d, free_notify_net=%d, ep_cover_net=%d\n",
ipa->sender->enter_flow_ctrl_cnt, ipa->sender->exit_flow_ctrl_cnt,
ipa->sender->free_notify_net, ipa->sender->ep_cover_net);
seq_printf(s, "receiver need_fill_cnt = %d",
atomic_read(&ipa->receiver->need_fill_cnt));
seq_printf(s, " tx_danger_cnt = %d rx_danger_cnt = %d\n",
ipa->receiver->tx_danger_cnt, ipa->receiver->rx_danger_cnt);
fifo_cfg = &ipa->cmn_fifo_cfg[SIPA_FIFO_PCIE_DL];
seq_printf(s, "[PCIE_DL]state = %d fifo_reg_base = %p\n",
fifo_cfg->state, fifo_cfg->fifo_reg_base);
seq_printf(s, "[PCIE_DL]rx fifo depth = 0x%x wr = 0x%x rd = 0x%x\n",
fifo_cfg->rx_fifo.depth,
fifo_cfg->rx_fifo.wr,
fifo_cfg->rx_fifo.rd);
seq_printf(s, "[PCIE_DL]rx_fifo fifo_addrl = 0x%x fifo_addrh = 0x%x\n",
fifo_cfg->rx_fifo.fifo_base_addr_l,
fifo_cfg->rx_fifo.fifo_base_addr_h);
seq_printf(s, "[PCIE_DL]rx fifo virt addr = %p\n",
fifo_cfg->rx_fifo.virtual_addr);
seq_printf(s, "[PCIE_DL]tx fifo depth = 0x%x wr = 0x%x rd = 0x%x\n",
fifo_cfg->tx_fifo.depth, fifo_cfg->tx_fifo.wr,
fifo_cfg->tx_fifo.rd);
seq_printf(s, "[PCIE_DL]tx_fifo fifo_addrl = 0x%x fifo_addrh = 0x%x\n",
fifo_cfg->tx_fifo.fifo_base_addr_l,
fifo_cfg->tx_fifo.fifo_base_addr_h);
seq_printf(s, "[PCIE_DL]tx fifo virt addr = %p\n",
fifo_cfg->tx_fifo.virtual_addr);
fifo_cfg = &ipa->cmn_fifo_cfg[SIPA_FIFO_PCIE_UL];
seq_printf(s, "[PCIE_UL]state = %d fifo_reg_base = %p\n",
fifo_cfg->state, fifo_cfg->fifo_reg_base);
seq_printf(s, "[PCIE_UL]rx fifo depth = 0x%x wr = 0x%x rd = 0x%x\n",
fifo_cfg->rx_fifo.depth,
fifo_cfg->rx_fifo.wr,
fifo_cfg->rx_fifo.rd);
seq_printf(s, "[PCIE_UL]rx_fifo fifo_addrl = 0x%x fifo_addrh = 0x%x\n",
fifo_cfg->rx_fifo.fifo_base_addr_l,
fifo_cfg->rx_fifo.fifo_base_addr_h);
seq_printf(s, "[PCIE_UL]rx fifo virt addr = %p\n",
fifo_cfg->rx_fifo.virtual_addr);
seq_printf(s, "[PCIE_UL]tx fifo depth = 0x%x wr = 0x%x rd = 0x%x\n",
fifo_cfg->tx_fifo.depth, fifo_cfg->tx_fifo.wr,
fifo_cfg->tx_fifo.rd);
seq_printf(s, "[PCIE_UL]tx_fifo fifo_addrl = 0x%x fifo_addrh = 0x%x\n",
fifo_cfg->tx_fifo.fifo_base_addr_l,
fifo_cfg->tx_fifo.fifo_base_addr_h);
seq_printf(s, "[PCIE_UL]tx fifo virt addr = %p\n",
fifo_cfg->tx_fifo.virtual_addr);
//ep: IPA_COMMON_TX_FIFO_DEPTH 0x0Cl
tmp = readl_relaxed(ipa->virt_reg_addr + 0xc00 + 0x0C);
seq_printf(s, "neil: read IPA_COMMON_TX_FIFO_DEPTH, value = %x\n", (tmp >> 16));
//ep: IPA_COMMON_TX_FIFO_WR 0x10l
tmp = readl_relaxed(ipa->virt_reg_addr + 0xc00 + 0x10);
seq_printf(s, "neil: read IPA_COMMON_TX_FIFO_WR, value = %x\n", (tmp >> 16));
//ep: IPA_COMMON_TX_FIFO_RD 0x14l
tmp = readl_relaxed(ipa->virt_reg_addr + 0xc00 + 0x14);
seq_printf(s, "neil: read IPA_COMMON_TX_FIFO_RD, value = %x\n", (tmp >> 16));
return 0;
}
static int sipa_params_debug_open(struct inode *inode,
struct file *file)
{
return single_open(file, sipa_params_debug_show,
inode->i_private);
}
static ssize_t sipa_endian_debug_write(struct file *f, const char __user *buf,
size_t size, loff_t *l)
{
ssize_t len;
u32 debug_cmd[24], data_buf[24];
len = min(size, sizeof(data_buf) - 1);
if (copy_from_user((char *)data_buf, buf, len))
return -EFAULT;
len = sscanf((char *)data_buf, "%x %x %x %x %x %x %x %x %x %x %x %x\n",
&debug_cmd[0], &debug_cmd[1], &debug_cmd[2],
&debug_cmd[3], &debug_cmd[4], &debug_cmd[5],
&debug_cmd[6], &debug_cmd[7], &debug_cmd[8],
&debug_cmd[9], &debug_cmd[10], &debug_cmd[11]);
ipa_node.address = debug_cmd[0];
ipa_node.length = debug_cmd[1];
ipa_node.offset = debug_cmd[2];
ipa_node.net_id = debug_cmd[3];
ipa_node.src = debug_cmd[4];
ipa_node.dst = debug_cmd[5];
ipa_node.prio = debug_cmd[6];
ipa_node.bear_id = debug_cmd[7];
ipa_node.intr = debug_cmd[8];
ipa_node.indx = debug_cmd[9];
ipa_node.err_code = debug_cmd[10];
ipa_node.reserved = debug_cmd[11];
return size;
}
static int sipa_endian_debug_show(struct seq_file *s, void *unused)
{
int i;
u8 *byte;
seq_printf(s, "address = 0x%llx length = 0x%x offset = 0x%x net_id = 0x%x\n",
(u64)ipa_node.address, ipa_node.length, ipa_node.offset,
ipa_node.net_id);
seq_printf(s, "src = 0x%x dst = 0x%x prio = 0x%x bear_id = 0x%x\n",
ipa_node.src, ipa_node.dst, ipa_node.prio, ipa_node.bear_id);
seq_printf(s, "intr = 0x%x indx = 0x%x err_code = 0x%x reserved = 0x%x\n",
ipa_node.intr, ipa_node.indx,
ipa_node.err_code, ipa_node.reserved);
byte = (u8 *)&ipa_node;
for (i = 0; i < sizeof(ipa_node); i++)
seq_printf(s, "0x%x ", *(byte + i));
seq_puts(s, "\n");
return 0;
}
static const struct file_operations sipa_params_fops = {
.open = sipa_params_debug_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static int sipa_endian_debug_open(struct inode *inode,
struct file *file)
{
return single_open(file, sipa_endian_debug_show,
inode->i_private);
}
static const struct file_operations sipa_endian_fops = {
.open = sipa_endian_debug_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
.write = sipa_endian_debug_write,
};
static ssize_t sipa_get_node_debug_write(struct file *f, const char __user *buf,
size_t size, loff_t *l)
{
int i;
ssize_t len;
u8 debug_cmd[16], data_buf[128];
len = min(size, sizeof(data_buf) - 1);
if (copy_from_user((char *)data_buf, buf, len))
return -EFAULT;
len = sscanf((char *)data_buf, "%4hhx %4hhx %4hhx %4hhx %4hhx %4hhx %4hhx %4hhx %4hhx %4hhx %4hhx %4hhx %4hhx %4hhx %4hhx %4hhx\n",
&debug_cmd[0], &debug_cmd[1], &debug_cmd[2],
&debug_cmd[3], &debug_cmd[4], &debug_cmd[5],
&debug_cmd[6], &debug_cmd[7], &debug_cmd[8],
&debug_cmd[9], &debug_cmd[10], &debug_cmd[11],
&debug_cmd[12], &debug_cmd[13], &debug_cmd[14],
&debug_cmd[15]);
for (i = 0; i < 16; i++)
pr_err("0x%x ", debug_cmd[i]);
pr_err("\n");
#if defined (__BIG_ENDIAN_BITFIELD)
sipa_get_node_desc(debug_cmd, &ipa_node);
#else
ipa_node.address = debug_cmd[4] + ((u32)debug_cmd[3] << 8) +
((u32)debug_cmd[2] << 16) + ((u32)debug_cmd[1] << 24) +
((u64)debug_cmd[0] << 32);
ipa_node.net_id = debug_cmd[9];
ipa_node.src = debug_cmd[10] & 0x1f;
ipa_node.err_code = ((debug_cmd[13] & 0xc0) >> 6) +
((debug_cmd[12] & 0x03) << 2);
#endif
return size;
}
static int sipa_get_node_debug_show(struct seq_file *s, void *unused)
{
int i;
u8 *byte;
seq_printf(s, "address = 0x%llx length = 0x%x offset = 0x%x net_id = 0x%x\n",
(u64)ipa_node.address, ipa_node.length, ipa_node.offset,
ipa_node.net_id);
seq_printf(s, "src = 0x%x dst = 0x%x prio = 0x%x bear_id = 0x%x\n",
ipa_node.src, ipa_node.dst, ipa_node.prio, ipa_node.bear_id);
seq_printf(s, "intr = 0x%x indx = 0x%x err_code = 0x%x reserved = 0x%x\n",
ipa_node.intr, ipa_node.indx,
ipa_node.err_code, ipa_node.reserved);
byte = (u8 *)&ipa_node;
for (i = 0; i < sizeof(ipa_node); i++)
seq_printf(s, "0x%x ", *(byte + i));
seq_puts(s, "\n");
return 0;
}
static int sipa_get_node_debug_open(struct inode *inode,
struct file *file)
{
return single_open(file, sipa_get_node_debug_show,
inode->i_private);
}
static const struct file_operations sipa_get_node_fops = {
.open = sipa_get_node_debug_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
.write = sipa_get_node_debug_write,
};
static ssize_t sipa_set_node_debug_write(struct file *f, const char __user *buf,
size_t size, loff_t *l)
{
ssize_t len;
u32 debug_cmd[24], data_buf[24];
len = min(size, sizeof(data_buf) - 1);
if (copy_from_user((char *)data_buf, buf, len))
return -EFAULT;
len = sscanf((char *)data_buf, "%x %x %x %x %x %x %x %x %x %x %x %x\n",
&debug_cmd[0], &debug_cmd[1], &debug_cmd[2],
&debug_cmd[3], &debug_cmd[4], &debug_cmd[5],
&debug_cmd[6], &debug_cmd[7], &debug_cmd[8],
&debug_cmd[9], &debug_cmd[10], &debug_cmd[11]);
ipa_node.address = debug_cmd[0];
ipa_node.length = debug_cmd[1];
ipa_node.offset = debug_cmd[2];
ipa_node.net_id = debug_cmd[3];
ipa_node.src = debug_cmd[4];
ipa_node.dst = debug_cmd[5];
ipa_node.prio = debug_cmd[6];
ipa_node.bear_id = debug_cmd[7];
ipa_node.intr = debug_cmd[8];
ipa_node.indx = debug_cmd[9];
ipa_node.err_code = debug_cmd[10];
ipa_node.reserved = debug_cmd[11];
return size;
}
static int sipa_set_node_debug_show(struct seq_file *s, void *unused)
{
#if defined (__BIG_ENDIAN_BITFIELD)
int i;
u8 node_buf[16];
#endif
seq_printf(s, "address = 0x%llx length = 0x%x offset = 0x%x net_id = 0x%x\n",
(u64)ipa_node.address, ipa_node.length, ipa_node.offset,
ipa_node.net_id);
seq_printf(s, "src = 0x%x dst = 0x%x prio = 0x%x bear_id = 0x%x\n",
ipa_node.src, ipa_node.dst, ipa_node.prio, ipa_node.bear_id);
seq_printf(s, "intr = 0x%x indx = 0x%x err_code = 0x%x reserved = 0x%x\n",
ipa_node.intr, ipa_node.indx,
ipa_node.err_code, ipa_node.reserved);
#if defined (__BIG_ENDIAN_BITFIELD)
sipa_set_node_desc(node_buf, (u8 *)&ipa_node);
for (i = 0; i < sizeof(node_buf); i++)
seq_printf(s, "0x%x ", node_buf[i]);
#endif
seq_puts(s, "\n");
return 0;
}
static int sipa_set_node_debug_open(struct inode *inode,
struct file *file)
{
return single_open(file, sipa_set_node_debug_show,
inode->i_private);
}
static const struct file_operations sipa_set_node_fops = {
.open = sipa_set_node_debug_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
.write = sipa_set_node_debug_write,
};
static ssize_t sipa_reg_debug_write(struct file *f, const char __user *buf,
size_t size, loff_t *l)
{
ssize_t len;
struct sipa_core *ipa = f->f_inode->i_private;
len = min(size, sizeof(data_buf) - 1);
if (copy_from_user((char *)data_buf, buf, len))
return -EFAULT;
len = sscanf((char *)data_buf, "%x %x %x %x %x\n",
&debug_cmd[0], &debug_cmd[1], &debug_cmd[2],
&debug_cmd[3], &debug_cmd[4]);
if (debug_cmd[2])
writel_relaxed(debug_cmd[1], ipa->virt_reg_addr + debug_cmd[0]);
return size;
}
static int sipa_reg_debug_show(struct seq_file *s, void *unused)
{
u32 tx_filled, rx_filled;
u32 tx_wr, tx_rd, rx_wr, rx_rd;
struct sipa_core *ipa = (struct sipa_core *)s->private;
seq_printf(s, "0x%x\n",
readl_relaxed(ipa->virt_reg_addr + debug_cmd[0]));
seq_printf(s, "pcie dl tx fifo empty = %d full = %d rx fifo empty = %d full = %d\n",
ipa->hal_ops.get_tx_empty_status(SIPA_FIFO_PCIE_DL,
ipa->cmn_fifo_cfg),
ipa->hal_ops.get_tx_full_status(SIPA_FIFO_PCIE_DL,
ipa->cmn_fifo_cfg),
ipa->hal_ops.get_rx_empty_status(SIPA_FIFO_PCIE_DL,
ipa->cmn_fifo_cfg),
ipa->hal_ops.get_rx_full_status(SIPA_FIFO_PCIE_DL,
ipa->cmn_fifo_cfg));
seq_printf(s, "pcie ul tx fifo empty = %d full = %d rx fifo empty = %d full = %d\n",
ipa->hal_ops.get_tx_empty_status(SIPA_FIFO_PCIE_UL,
ipa->cmn_fifo_cfg),
ipa->hal_ops.get_tx_full_status(SIPA_FIFO_PCIE_UL,
ipa->cmn_fifo_cfg),
ipa->hal_ops.get_rx_empty_status(SIPA_FIFO_PCIE_UL,
ipa->cmn_fifo_cfg),
ipa->hal_ops.get_rx_full_status(SIPA_FIFO_PCIE_UL,
ipa->cmn_fifo_cfg));
ipa->hal_ops.get_filled_depth(SIPA_FIFO_PCIE_DL, ipa->cmn_fifo_cfg,
&rx_filled, &tx_filled);
seq_printf(s, "pcie dl tx filled = 0x%x rx filled = 0x%x\n",
tx_filled, rx_filled);
ipa->hal_ops.get_filled_depth(SIPA_FIFO_PCIE_UL, ipa->cmn_fifo_cfg,
&rx_filled, &tx_filled);
seq_printf(s, "pcie ul tx filled = 0x%x rx filled = 0x%x\n",
tx_filled, rx_filled);
ipa->hal_ops.get_rx_ptr(SIPA_FIFO_PCIE_UL, ipa->cmn_fifo_cfg, &rx_wr, &rx_rd);
ipa->hal_ops.get_tx_ptr(SIPA_FIFO_PCIE_UL, ipa->cmn_fifo_cfg, &tx_wr, &tx_rd);
seq_printf(s, "pcie ul rx_wr = 0x%x, rx_rd = 0x%x, tx_wr = 0x%x, tx_rd = 0x%x\n",
rx_wr, rx_rd, tx_wr, tx_rd);
ipa->hal_ops.get_rx_ptr(SIPA_FIFO_PCIE_DL, ipa->cmn_fifo_cfg, &rx_wr, &rx_rd);
ipa->hal_ops.get_tx_ptr(SIPA_FIFO_PCIE_DL, ipa->cmn_fifo_cfg, &tx_wr, &tx_rd);
seq_printf(s, "pcie dl rx_wr = 0x%x, rx_rd = 0x%x, tx_wr = 0x%x, tx_rd = 0x%x\n",
rx_wr, rx_rd, tx_wr, tx_rd);
sipa_int_callback_func(0, NULL);
return 0;
}
static int sipa_reg_debug_open(struct inode *inode,
struct file *file)
{
return single_open(file, sipa_reg_debug_show,
inode->i_private);
}
static const struct file_operations sipa_reg_debug_fops = {
.open = sipa_reg_debug_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
.write = sipa_reg_debug_write,
};
static int sipa_send_test_show(struct seq_file *s, void *unused)
{
struct sk_buff *skb = NULL;
struct sipa_core *ipa = (struct sipa_core *)s->private;
if (!skb) {
skb = __dev_alloc_skb(256, GFP_KERNEL | GFP_NOWAIT);
if (!skb) {
dev_err(ipa->dev, "failed to alloc skb!\n");
return 0;
}
skb_put(skb, 128);
memset(skb->data, 0xE7, skb->len);
sipa_skb_sender_send_data(ipa->sender, skb, 0x19, 0);
}
return 0;
}
static int sipa_send_test_open(struct inode *inode, struct file *file)
{
return single_open(file, sipa_send_test_show, inode->i_private);
}
static const struct file_operations sipa_send_test_fops = {
.open = sipa_send_test_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static ssize_t sipa_nic_debug_write(struct file *f, const char __user *buf,
size_t size, loff_t *l)
{
ssize_t len;
u8 debug_cmd[24], data_buf[24];
len = min(size, sizeof(data_buf) - 1);
if (copy_from_user((char *)data_buf, buf, len))
return -EFAULT;
len = sscanf((char *)data_buf, "%4hhx %4hhx\n",
&debug_cmd[0], &debug_cmd[1]);
if (debug_cmd[1])
sipa_nic_open(debug_cmd[0], 0, NULL, NULL);
else
sipa_nic_close(debug_cmd[0]);
return size;
}
static int sipa_nic_debug_show(struct seq_file *s, void *unused)
{
//struct sk_buff *skb = NULL;
struct sipa_core *ipa = (struct sipa_core *)s->private;
struct sipa_cmn_fifo_cfg_tag *pcie_dl = &ipa->cmn_fifo_cfg[SIPA_FIFO_PCIE_DL];
//struct sipa_cmn_fifo_cfg_tag *pcie_ul = &ipa->cmn_fifo_cfg[SIPA_FIFO_PCIE_UL];
//struct sipa_cmn_fifo_tag *dl_tx_fifo = &pcie_dl->tx_fifo;
struct sipa_cmn_fifo_tag *dl_rx_fifo = &pcie_dl->rx_fifo;
//struct sipa_cmn_fifo_tag *ul_tx_fifo = &pcie_ul->tx_fifo;
//struct sipa_cmn_fifo_tag *ul_rx_fifo = &pcie_ul->rx_fifo;
struct sipa_node_description_tag *node;
int i = 0;
pr_info("dl rx_fifo addr: 0x%lx wp-%d rp-%d\n", (long unsigned int)dl_rx_fifo->virtual_addr,
dl_rx_fifo->wr, dl_rx_fifo->rd);
node = (struct sipa_node_description_tag *)dl_rx_fifo->virtual_addr;
for (i = 0; i < dl_rx_fifo->depth; i++, node++) {
pr_info("node addr 0x%lx\n", (long unsigned int)node);
pr_info("node info i-%d, addr-0x%llx len-%u off-%u netid-%u src-%u dst-%u pro-%u bearid-%u intr-%u indx-%u err-%u resd-%u\n",
i, (long long unsigned int)node->address, node->length, node->offset, node->net_id,
node->src, node->dst, node->prio, node->bear_id, node->intr,
node->indx, node->err_code, node->reserved);
}
return 0;
}
static int sipa_nic_debug_open(struct inode *inode, struct file *file)
{
return single_open(file, sipa_nic_debug_show, inode->i_private);
}
static const struct file_operations sipa_nic_debug_fops = {
.open = sipa_nic_debug_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
.write = sipa_nic_debug_write,
};
int sipa_init_debugfs(struct sipa_core *ipa)
{
struct dentry *root;
struct dentry *file;
root = debugfs_create_dir(dev_name(ipa->dev), NULL);
if (!root) {
dev_err(ipa->dev, "sipa create debugfs fail\n");
return -ENOMEM;
}
file = debugfs_create_file("params", 0444, root, ipa,
&sipa_params_fops);
if (!file) {
dev_err(ipa->dev, "sipa create params file debugfs fail\n");
debugfs_remove_recursive(root);
return -ENOMEM;
}
file = debugfs_create_file("endian", 0444, root, ipa,
&sipa_endian_fops);
if (!file) {
dev_err(ipa->dev, "sipa create endian file debugfs fail\n");
debugfs_remove_recursive(root);
return -ENOMEM;
}
file = debugfs_create_file("get_node", 0444, root, ipa,
&sipa_get_node_fops);
if (!file) {
dev_err(ipa->dev, "sipa create endian file debugfs fail\n");
debugfs_remove_recursive(root);
return -ENOMEM;
}
file = debugfs_create_file("set_node", 0444, root, ipa,
&sipa_set_node_fops);
if (!file) {
dev_err(ipa->dev, "sipa create set node file debugfs fail\n");
debugfs_remove_recursive(root);
return -ENOMEM;
}
file = debugfs_create_file("reg", 0444, root, ipa,
&sipa_reg_debug_fops);
if (!file) {
dev_err(ipa->dev, "sipa create reg debug file debugfs fail\n");
debugfs_remove_recursive(root);
return -ENOMEM;
}
file = debugfs_create_file("send_test", 0444, root, ipa,
&sipa_send_test_fops);
if (!file) {
dev_err(ipa->dev, "sipa create send_test debug file debugfs fail\n");
debugfs_remove_recursive(root);
return -ENOMEM;
}
file = debugfs_create_file("nic", 0444, root, ipa,
&sipa_nic_debug_fops);
if (!file) {
dev_err(ipa->dev, "sipa create nic debug file debugfs fail\n");
debugfs_remove_recursive(root);
return -ENOMEM;
}
ipa->dentry = root;
return 0;
}
EXPORT_SYMBOL(sipa_init_debugfs);

View File

@ -1,156 +0,0 @@
/*
* Copyright (C) 2018-2019 Unisoc Corporation
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*/
#include <linux/device.h>
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/kthread.h>
#include <linux/delay.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/io.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/workqueue.h>
#include <linux/completion.h>
#include "../include/sipa.h"
#include "../include/sipc.h"
#include "../include/sprd_pcie_ep_device.h"
#include "sipa_core.h"
#define SIPA_PCIE_DL_CMN_FIFO_REG_OFFSET 0x980
#define SIPA_PCIE_UL_CMN_FIFO_REG_OFFSET 0x200
static int sipa_dele_start_req_work(void)
{
struct smsg msg;
msg.channel = SMSG_CH_COMM_SIPA;
msg.type = SMSG_TYPE_CMD;
msg.flag = SMSG_FLG_DELE_REQUEST;
msg.value = 0;
return smsg_send(SIPC_ID_MINIAP, &msg, -1);
}
static int sipa_init_cmn_fifo_reg_addr(struct sipa_core *ipa)
{
ipa->reg_mapped = sprd_ep_ipa_map(PCIE_IPA_TYPE_REG,
ipa->reg_res->start,
resource_size(ipa->reg_res));
#ifndef devm_ioremap_nocache
#define devm_ioremap_nocache devm_ioremap
#endif
ipa->virt_reg_addr = devm_ioremap_nocache(ipa->dev,
(resource_size_t)ipa->reg_mapped,
(resource_size_t)(resource_size(ipa->reg_res)));
if (!ipa->virt_reg_addr) {
dev_err(ipa->dev, "ipa reg base remap fail\n");
return -ENOMEM;
}
ipa->cmn_fifo_cfg[SIPA_FIFO_PCIE_DL].fifo_reg_base =
ipa->virt_reg_addr + SIPA_PCIE_DL_CMN_FIFO_REG_OFFSET;
ipa->cmn_fifo_cfg[SIPA_FIFO_PCIE_UL].fifo_reg_base =
ipa->virt_reg_addr + SIPA_PCIE_UL_CMN_FIFO_REG_OFFSET;
return 0;
}
static int conn_thread(void *data)
{
struct smsg mrecv;
int ret, timeout = 500;
struct sipa_core *ipa = data;
/* since the channel open may hang, we call it in the thread context */
ret = smsg_ch_open(SIPC_ID_MINIAP, SMSG_CH_COMM_SIPA, -1);
if (ret != 0) {
dev_err(ipa->dev, "sipa_delegator failed to open dst %d channel %d\n",
SIPC_ID_MINIAP, SMSG_CH_COMM_SIPA);
/* assign NULL to thread poniter as failed to open channel */
return ret;
}
while (sipa_dele_start_req_work() && timeout--)
usleep_range(5000, 10000);
/* start listen the smsg events */
while (!kthread_should_stop()) {
/* monitor seblock recv smsg */
smsg_set(&mrecv, SMSG_CH_COMM_SIPA, 0, 0, 0);
ret = smsg_recv(SIPC_ID_MINIAP, &mrecv, -1);
if (ret == -EIO || ret == -ENODEV) {
/* channel state is FREE */
usleep_range(5000, 10000);
continue;
}
dev_dbg(ipa->dev, "sipa type=%d, flag=0x%x, value=0x%08x\n",
mrecv.type, mrecv.flag, mrecv.value);
switch (mrecv.type) {
case SMSG_TYPE_OPEN:
/* just ack open */
smsg_open_ack(SIPC_ID_AP, SMSG_CH_COMM_SIPA);
break;
case SMSG_TYPE_CLOSE:
/* handle channel close */
smsg_close_ack(SIPC_ID_AP, SMSG_CH_COMM_SIPA);
break;
case SMSG_TYPE_CMD:
/* handle commads */
break;
case SMSG_TYPE_DONE:
sipa_init_cmn_fifo_reg_addr(ipa);
dev_info(ipa->dev, "remote ipa ready reg_mapped = 0x%llx\n", (long long unsigned int)ipa->reg_mapped);
sipa_receiver_open_cmn_fifo(ipa->receiver);
sipa_sender_open_cmn_fifo(ipa->sender);
sipa_nic_check_flow_ctrl();
ipa->remote_ready = true;
/* handle cmd done */
break;
case SMSG_TYPE_EVENT:
/* handle events */
break;
default:
ret = 1;
break;
};
if (ret) {
dev_info(ipa->dev, "unknown msg in conn_thrd: %d, %d, %d\n",
mrecv.type, mrecv.flag, mrecv.value);
ret = 0;
}
}
return ret;
}
int sipa_create_smsg_channel(struct sipa_core *ipa)
{
/* create channel thread for this seblock channel */
ipa->smsg_thread = kthread_create(conn_thread, ipa, "sipa-dele");
if (IS_ERR(ipa->smsg_thread)) {
dev_err(ipa->dev, "Failed to create monitor smsg kthread\n");
return PTR_ERR(ipa->smsg_thread);
}
wake_up_process(ipa->smsg_thread);
return 0;
}
EXPORT_SYMBOL(sipa_create_smsg_channel);

View File

@ -1,583 +0,0 @@
/*
* Copyright (C) 2020 Spreadtrum Communications Inc.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#define pr_fmt(fmt) "sipa_dummy: " fmt
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/init.h>
#include <linux/netdevice.h>
#include <linux/device.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <linux/udp.h>
#include <linux/if_arp.h>
#include <linux/platform_device.h>
#include <linux/sched.h>
#include <linux/kthread.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
#include <linux/of_device.h>
#include <linux/interrupt.h>
#include <linux/netdev_features.h>
#include <linux/mutex.h>
#include <net/arp.h>
#include "sipa_eth.h"
#include "sipa_core.h"
#include "../include/sipa.h"
/* Device status */
#define DEV_ON 1
#define DEV_OFF 0
#define SIPA_DUMMY_NAPI_WEIGHT 64
extern struct sipa_eth_netid_device * dev_list[];
static struct net_device *dummy_dev;
static struct dentry *dummy_root;
static int sipa_dummy_debugfs_mknod(void *data);
#ifndef CONFIG_SPRD_ETHERNET
static int sipa_arp_reply(struct net_device *net, struct sk_buff *skb) {
struct arphdr *parp;
u8 *arpptr, *sha;
u8 sip[4], tip[4];
struct sk_buff *reply = NULL;
parp = arp_hdr(skb);
if (parp->ar_hrd == htons(ARPHRD_ETHER) && parp->ar_pro == htons(ETH_P_IP)
&& parp->ar_op == htons(ARPOP_REQUEST) && parp->ar_hln == 6 && parp->ar_pln == 4) {
arpptr = (u8 *)parp + sizeof(struct arphdr);
sha = arpptr;
arpptr += net->addr_len; /* sha */
memcpy(sip, arpptr, sizeof(sip));
arpptr += sizeof(sip);
arpptr += net->addr_len; /* tha */
memcpy(tip, arpptr, sizeof(tip));
pr_info("%s sip = %d.%d.%d.%d, tip=%d.%d.%d.%d\n", netdev_name(net), sip[0], sip[1], sip[2], sip[3], tip[0], tip[1], tip[2], tip[3]);
reply = arp_create(ARPOP_REPLY, ETH_P_ARP, *((__be32 *)sip), skb->dev, *((__be32 *)tip), sha, net->dev_addr, sha);
if (reply) {
dev_queue_xmit(reply);
}
return 1;
}
return 0;
}
static void sipa_get_modem_mac(struct sk_buff *skb, struct SIPA_ETH *sipa_eth)
{
struct ethhdr *ehdr;
struct iphdr *iph;
struct udphdr *udph;
struct sipa_eth_init_data *pdata = sipa_eth->pdata;
ehdr = (struct ethhdr *)(skb->data - ETH_HLEN);
iph = ip_hdr(skb);
udph = (struct udphdr *)(skb->data + iph->ihl*4);
if (ehdr->h_proto == htons(ETH_P_ARP)) {
sipa_arp_reply(skb->dev, skb);
return;
}
//printk("%s skb=%p, h_proto=%x, protocol=%x, saddr=%x, daddr=%x dest=%x\n", __func__, skb, ehdr->h_proto, iph->protocol, iph->saddr, iph->daddr, udph->dest);
if (ehdr->h_proto == htons(ETH_P_IP) && iph->protocol == IPPROTO_UDP && iph->saddr != 0x00000000 && iph->daddr == 0xFFFFFFFF) {
if (udph->dest == htons(68)) //DHCP offer/ACK
{
memcpy(pdata->modem_mac, ehdr->h_source, ETH_ALEN);
pr_info("Modem Mac Address: %02x:%02x:%02x:%02x:%02x:%02x\n",
pdata->modem_mac[0], pdata->modem_mac[1], pdata->modem_mac[2], pdata->modem_mac[3], pdata->modem_mac[4], pdata->modem_mac[5]);
}
}
}
#endif
/* Term type 0x6 means we are in direct mode, currently.
* we will recv pkt with a dummy mac header, which will
* cause us fail to get skb->pkt_type and skb->protocol.
*/
static void sipa_dummy_prepare_skb(struct sk_buff *skb)
{
struct iphdr *iph;
struct ipv6hdr *ipv6h;
struct net_device *dev;
unsigned int real_len = 0, payload_len = 0;
bool ip_arp = true;
dev = skb->dev;
skb->protocol = eth_type_trans(skb, dev);
skb_reset_network_header(skb);
switch (ntohs(skb->protocol)) {
case ETH_P_IP:
iph = ip_hdr(skb);
real_len = ntohs(iph->tot_len);
break;
case ETH_P_IPV6:
ipv6h = ipv6_hdr(skb);
payload_len = ntohs(ipv6h->payload_len);
real_len = payload_len + sizeof(struct ipv6hdr);
break;
case ETH_P_ARP:
real_len = arp_hdr_len(dev);
break;
default:
ip_arp = false;
break;
}
if (ip_arp)
skb_trim(skb, real_len);
/* TODO chechsum ... */
skb->ip_summed = CHECKSUM_NONE;
skb->pkt_type = PACKET_HOST;
}
/* Term type 0x6 means we are in direct mode, currently.
* we will recv pkt with a dummy mac header, which will
* cause us fail to get skb->pkt_type and skb->protocol.
*/
static void sipa_dummy_direct_mode_prepare_skb(struct sk_buff *skb)
{
struct iphdr *iph;
struct ipv6hdr *ipv6h;
unsigned int real_len = 0, payload_len = 0;
skb_pull_inline(skb, ETH_HLEN);
skb_reset_network_header(skb);
iph = ip_hdr(skb);
if (iph->version == 4) {
skb->protocol = htons(ETH_P_IP);
iph = ip_hdr(skb);
real_len = ntohs(iph->tot_len);
skb_trim(skb, real_len);
} else if(iph->version == 6){
skb->protocol = htons(ETH_P_IPV6);
ipv6h = ipv6_hdr(skb);
payload_len = ntohs(ipv6h->payload_len);
real_len = payload_len + sizeof(struct ipv6hdr);
skb_trim(skb, real_len);
} else {
pr_err("unrecognized ip version %d\n", iph->version);
}
skb->ip_summed = CHECKSUM_NONE;
skb->pkt_type = PACKET_HOST;
}
static int sipa_dummy_rx(struct SIPA_DUMMY *sipa_dummy, int budget)
{
struct sk_buff *skb;
struct sipa_eth_netid_device *netid_dev_info;
struct SIPA_ETH *sipa_eth;
int real_netid = 0;
int skb_cnt = 0;
int ret;
if (!sipa_dummy) {
pr_err("no sipa_dummy device\n");
return -EINVAL;
}
atomic_set(&sipa_dummy->rx_evt, 0);
while (skb_cnt < budget) {
ret = sipa_nic_rx(&real_netid, &skb, skb_cnt);
if (ret) {
switch (ret) {
case -ENODEV:
pr_err("sipa fail to find dev\n");
sipa_dummy->stats.rx_errors++;
sipa_dummy->netdev->stats.rx_errors++;
break;
case -ENODATA:
pr_err("sipa no data\n");
atomic_set(&sipa_dummy->rx_busy, 0);
break;
}
break;
}
skb_cnt++;
sipa_dummy->stats.rx_packets++;
sipa_dummy->stats.rx_bytes += skb->len;
if (real_netid < 0) {
pr_err("sipa invaild netid");
break;
}
/*
* We should determine the real device before we do eth_types_tran,
*/
if (real_netid < 0 || real_netid >= SIPA_DUMMY_IFACE_NUM) {
pr_err("illegal real_netid %d\n", real_netid);
dev_kfree_skb_any(skb);
break;
}
netid_dev_info = dev_list[real_netid];
if (!netid_dev_info || netid_dev_info->state == DEV_OFF) {
pr_info("netid= %d net is not DEV_ON\n", real_netid);
dev_kfree_skb_any(skb);
break;
}
skb->dev = netid_dev_info->ndev;
sipa_eth = netdev_priv(skb->dev);
sipa_eth->stats.rx_packets++;
sipa_eth->stats.rx_bytes += skb->len;
if (sipa_eth->pdata->term_type == 0x6) {
sipa_dummy_direct_mode_prepare_skb(skb);
} else {
sipa_dummy_prepare_skb(skb);
#ifndef CONFIG_SPRD_ETHERNET
sipa_get_modem_mac(skb, sipa_eth);
#endif
}
napi_gro_receive(&sipa_dummy->napi, skb);
}
return skb_cnt;
}
static int sipa_dummy_rx_poll_handler(struct napi_struct *napi, int budget)
{
int pkts = 0, num, tmp = 0;
struct SIPA_DUMMY *sipa_dummy = container_of(napi, struct SIPA_DUMMY, napi);
READ_AGAIN:
num = sipa_nic_get_filled_num();
if (!num)
goto check;
if (num > budget)
num = budget;
pkts = sipa_dummy_rx(sipa_dummy, num);
if (pkts > 0)
sipa_nic_set_tx_fifo_rp(pkts);
tmp += pkts;
budget -= pkts;
if (!budget)
goto out;
check:
if (!sipa_check_recv_tx_fifo_empty() ||
atomic_read(&sipa_dummy->rx_evt)) {
atomic_set(&sipa_dummy->rx_evt, 0);
goto READ_AGAIN;
}
atomic_set(&sipa_dummy->rx_busy, 0);
napi_complete(napi);
sipa_nic_restore_irq();
if (atomic_read(&sipa_dummy->rx_evt) ||
atomic_read(&sipa_dummy->rx_busy) ||
!sipa_check_recv_tx_fifo_empty()) {
atomic_set(&sipa_dummy->rx_evt, 0);
napi_schedule(&sipa_dummy->napi);
}
out:
return tmp;
}
static void sipa_dummy_rx_handler (void *priv)
{
struct SIPA_DUMMY *sipa_dummy = (struct SIPA_DUMMY *)priv;
if (!sipa_dummy) {
pr_err("data is NULL\n");
return;
}
if (!atomic_cmpxchg(&sipa_dummy->rx_busy, 0, 1)) {
atomic_set(&sipa_dummy->rx_evt, 0);
napi_schedule(&sipa_dummy->napi);
}
}
/* for sipa to invoke */
void sipa_dummy_recv_trigger(void)
{
struct SIPA_DUMMY *sipa_dummy;
if (!dummy_dev)
return;
sipa_dummy = netdev_priv(dummy_dev);
atomic_set(&sipa_dummy->rx_evt, 1);
sipa_dummy_rx_handler(sipa_dummy);
}
static int sipa_dummy_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct SIPA_DUMMY *sipa_dummy = netdev_priv(dev);
/* update netdev statistics */
sipa_dummy->stats.tx_packets++;
sipa_dummy->stats.tx_bytes += skb->len;
dev_kfree_skb(skb);
return NETDEV_TX_OK;
}
/* Open interface */
static int sipa_dummy_open(struct net_device *dev)
{
struct SIPA_DUMMY *sipa_dummy = netdev_priv(dev);
struct sipa_core *ctrl = sipa_get_ctrl_pointer();
if (!ctrl) {
return -EINVAL;
}
if(!ctrl->remote_ready)
return -EINVAL;
pr_info("dummy open\n");
if (!netif_carrier_ok(sipa_dummy->netdev)) {
netif_carrier_on(sipa_dummy->netdev);
}
netif_start_queue(dev);
//napi_enable(&sipa_dummy->napi);
napi_schedule(&sipa_dummy->napi);
return 0;
}
/* Close interface */
static int sipa_dummy_close(struct net_device *dev)
{
//struct SIPA_DUMMY *sipa_dummy = netdev_priv(dev);
pr_info("close dummy!\n");
//napi_disable(&sipa_dummy->napi);
netif_stop_queue(dev);
netif_carrier_off(dev);
return 0;
}
static struct net_device_stats *sipa_dummy_get_stats(struct net_device *dev)
{
struct SIPA_DUMMY *sipa_dummy = netdev_priv(dev);
return &sipa_dummy->stats;
}
static const struct net_device_ops sipa_dummy_ops = {
.ndo_open = sipa_dummy_open,
.ndo_stop = sipa_dummy_close,
.ndo_start_xmit = sipa_dummy_start_xmit,
.ndo_get_stats = sipa_dummy_get_stats,
};
static void s_setup(struct net_device *dev)
{
ether_setup(dev);
}
static int sipa_dummy_probe(struct platform_device *pdev)
{
struct SIPA_DUMMY *sipa_dummy;
struct net_device *netdev;
int ret;
#ifdef NET_NAME_PREDICTABLE
netdev = alloc_netdev(
sizeof(struct SIPA_DUMMY),
"sipa_dummy0",
NET_NAME_PREDICTABLE,
s_setup);
#else
netdev = alloc_netdev(
sizeof(struct SIPA_DUMMY),
"sipa_dummy0",
s_setup);
#endif
if (!netdev) {
pr_err("alloc_netdev() failed.\n");
return -ENOMEM;
}
dummy_dev = netdev;
netdev->type = ARPHRD_ETHER;
sipa_dummy = netdev_priv(netdev);
sipa_dummy->netdev = netdev;
netdev->netdev_ops = &sipa_dummy_ops;
netdev->watchdog_timeo = 1 * HZ;
netdev->irq = 0;
netdev->dma = 0;
netdev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_HW_CSUM);
random_ether_addr(netdev->dev_addr);
netif_napi_add(netdev,
&sipa_dummy->napi,
sipa_dummy_rx_poll_handler,
SIPA_DUMMY_NAPI_WEIGHT);
/* Register new Ethernet interface */
ret = register_netdev(netdev);
if (ret) {
pr_err("register_netdev() failed (%d)\n", ret);
netif_napi_del(&sipa_dummy->napi);
free_netdev(netdev);
return ret;
}
/* Set link as disconnected */
netif_carrier_off(netdev);
platform_set_drvdata(pdev, sipa_dummy);
sipa_dummy_debugfs_mknod((void *)sipa_dummy);
napi_enable(&sipa_dummy->napi);
return 0;
}
/* Cleanup Ethernet device driver. */
static int sipa_dummy_remove(struct platform_device *pdev)
{
struct SIPA_DUMMY *sipa_dummy= platform_get_drvdata(pdev);
netif_stop_queue(sipa_dummy->netdev);
napi_disable(&sipa_dummy->napi);
netif_napi_del(&sipa_dummy->napi);
unregister_netdev(sipa_dummy->netdev);
free_netdev(sipa_dummy->netdev);
platform_set_drvdata(pdev, NULL);
if (!IS_ERR_OR_NULL(dummy_root))
debugfs_remove_recursive(dummy_root);
return 0;
}
#ifdef SPRD_PCIE_USE_DTS
static const struct of_device_id sipa_dummy_match_table[] = {
{ .compatible = "sprd,sipa_dummy"},
{ }
};
#endif
static struct platform_driver sipa_dummy_driver = {
.probe = sipa_dummy_probe,
.remove = sipa_dummy_remove,
.driver = {
.owner = THIS_MODULE,
.name = "sipa_dummy",
#ifdef SPRD_PCIE_USE_DTS
.of_match_table = sipa_dummy_match_table
#endif
}
};
#ifndef SPRD_PCIE_USE_DTS
static struct platform_device *sipa_dummy_device;
static int sipa_dummy_platform_device_reigster(void)
{
int retval = -ENOMEM;
sipa_dummy_device = platform_device_alloc("sipa_dummy", -1);
if (!sipa_dummy_device)
return retval;
retval = platform_device_add(sipa_dummy_device);
if (retval < 0)
platform_device_put(sipa_dummy_device);
return retval;
}
#endif
static int sipa_dummy_debug_show(struct seq_file *m, void *v)
{
struct SIPA_DUMMY *sipa_dummy = (struct SIPA_DUMMY *)(m->private);
if (!sipa_dummy) {
pr_err("invalid data, sipa_dummy is NULL\n");
return -EINVAL;
}
seq_puts(m, "*************************************************\n");
seq_printf(m, "DEVICE: %s rx_busy=%d rx_evt=%d\n",
sipa_dummy->netdev->name, atomic_read(&sipa_dummy->rx_busy),
atomic_read(&sipa_dummy->rx_evt));
seq_puts(m, "*************************************************\n");
return 0;
}
static int sipa_dummy_debug_open(struct inode *inode, struct file *file)
{
return single_open(file, sipa_dummy_debug_show, inode->i_private);
}
static const struct file_operations sipa_dummy_debug_fops = {
.open = sipa_dummy_debug_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static int sipa_dummy_debugfs_mknod(void *data)
{
if (!dummy_root) {
pr_err("dummy dir is NULL\n");
return -ENXIO;
}
debugfs_create_file("stats",
0444,
dummy_root,
data,
&sipa_dummy_debug_fops);
return 0;
}
static void __init sipa_dummy_debugfs_init(void)
{
dummy_root = debugfs_create_dir("sipa_dummy", NULL);
if (!dummy_root)
pr_err("failed to create sipa_dummy debugfs dir\n");
}
int sipa_dummy_init(void)
{
sipa_dummy_debugfs_init();
#ifndef SPRD_PCIE_USE_DTS
sipa_dummy_platform_device_reigster();
#endif
return platform_driver_register(&sipa_dummy_driver);
}
EXPORT_SYMBOL(sipa_dummy_init);
void sipa_dummy_exit(void)
{
platform_driver_unregister(&sipa_dummy_driver);
#ifndef SPRD_PCIE_USE_DTS
platform_device_unregister(sipa_dummy_device);
#endif
}
EXPORT_SYMBOL(sipa_dummy_exit);

File diff suppressed because it is too large Load Diff

View File

@ -1,65 +0,0 @@
#ifndef _SIPA_ETH_H_
#define _SIPA_ETH_H_
#include "../include/sipa.h"
#include <linux/if.h>
#define SIPA_ETH_NUM 2
#define SIPA_DUMMY_IFACE_NUM 4
/* Struct of data transfer statistics */
struct sipa_eth_dtrans_stats {
u32 rx_sum;
u32 rx_cnt;
u32 rx_fail;
u32 tx_sum;
u32 tx_cnt;
u32 tx_fail;
};
/* Device instance data. */
struct SIPA_ETH {
int state;
atomic_t rx_busy;
atomic_t rx_evt;
struct net_device *netdev;/* Linux net device */
enum sipa_nic_id nic_id;
struct napi_struct napi;/* Napi instance */
/* Record data_transfer statistics */
struct sipa_eth_dtrans_stats dt_stats;
struct net_device_stats stats;/* Net statistics */
struct sipa_eth_init_data *pdata;/* Platform data */
struct dentry *subroot;
};
struct sipa_eth_init_data {
char name[IFNAMSIZ];
unsigned char modem_mac[ETH_ALEN];
u32 term_type;
s32 netid;
bool mac_h;
};
struct sipa_eth_netid_device {
int state;
int netid;
struct net_device *ndev;
struct napi_struct napi;/* Napi instance */
/* Record data_transfer statistics */
struct net_device_stats stats;/* Net statistics */
};
/* Device instance data. */
struct SIPA_DUMMY {
atomic_t rx_busy;
atomic_t rx_evt;
struct net_device *netdev;/* Linux net device */
struct napi_struct napi;/* Napi instance */
struct net_device_stats stats;/* Net statistics */
};
void sipa_dummy_recv_trigger(void);
#endif

View File

@ -1,332 +0,0 @@
/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/device.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include "../include/sipa.h"
#include "sipa_core.h"
#define SIPA_CP_SRC ((1 << SIPA_TERM_CP0) | \
(1 << SIPA_TERM_CP1) | (1 << SIPA_TERM_VCP) | \
(1 << 0x19) | (1 << 0x18))
struct sipa_nic_statics_info {
u32 src_mask;
int netid;
};
static struct sipa_nic_statics_info s_spia_nic_statics[SIPA_NIC_MAX] = {
{
.src_mask = SIPA_CP_SRC,
.netid = 0,
},
{
.src_mask = SIPA_CP_SRC,
.netid = 1,
},
{
.src_mask = SIPA_CP_SRC,
.netid = 2,
},
{
.src_mask = SIPA_CP_SRC,
.netid = 3,
},
{
.src_mask = SIPA_CP_SRC,
.netid = 4,
},
{
.src_mask = SIPA_CP_SRC,
.netid = 5,
},
{
.src_mask = SIPA_CP_SRC,
.netid = 6,
},
{
.src_mask = SIPA_CP_SRC,
.netid = 7,
},
{
.src_mask = SIPA_CP_SRC,
.netid = 8,
},
{
.src_mask = SIPA_CP_SRC,
.netid = 9,
},
};
int sipa_nic_open(enum sipa_term_type src, int netid,
sipa_notify_cb cb, void *priv)
{
struct sipa_nic *nic = NULL;
struct sk_buff *skb;
enum sipa_nic_id nic_id = SIPA_NIC_MAX;
struct sipa_core *ctrl = sipa_get_ctrl_pointer();
if (!ctrl) {
return -EINVAL;
}
if(!ctrl->remote_ready)
return -EINVAL;
nic_id = netid;
dev_info(ctrl->dev, "open nic_id = %d\n", nic_id);
if (nic_id == SIPA_NIC_MAX)
return -EINVAL;
if (ctrl->nic[nic_id]) {
nic = ctrl->nic[nic_id];
if (atomic_read(&nic->status) == NIC_OPEN)
return -EBUSY;
while ((skb = skb_dequeue(&nic->rx_skb_q)) != NULL)
dev_kfree_skb_any(skb);
} else {
nic = kzalloc(sizeof(*nic), GFP_KERNEL);
if (!nic)
return -ENOMEM;
ctrl->nic[nic_id] = nic;
skb_queue_head_init(&nic->rx_skb_q);
}
atomic_set(&nic->status, NIC_OPEN);
nic->nic_id = nic_id;
nic->send_ep = &ctrl->ep;
nic->need_notify = 0;
nic->src_mask = s_spia_nic_statics[nic_id].src_mask;
nic->netid = netid;
nic->cb = cb;
nic->cb_priv = priv;
nic->continue_notify = true;
/* every receiver may receive cp packets */
//sipa_receiver_add_nic(ctrl->receiver, nic);
sipa_skb_sender_add_nic(ctrl->sender, nic);
return nic_id;
}
EXPORT_SYMBOL(sipa_nic_open);
void sipa_nic_close(enum sipa_nic_id nic_id)
{
struct sipa_nic *nic = NULL;
struct sk_buff *skb;
struct sipa_core *ctrl = sipa_get_ctrl_pointer();
if (!ctrl) {
dev_err(ctrl->dev, "sipa driver may not register\n");
return;
}
if (nic_id == SIPA_NIC_MAX || !ctrl->nic[nic_id])
return;
nic = ctrl->nic[nic_id];
nic->continue_notify = false;
atomic_set(&nic->status, NIC_CLOSE);
/* free all pending skbs */
while ((skb = skb_dequeue(&nic->rx_skb_q)) != NULL)
dev_kfree_skb_any(skb);
sipa_skb_sender_remove_nic(ctrl->sender, nic);
dev_info(ctrl->dev, "close nic_id = %d\n", nic_id);
}
EXPORT_SYMBOL(sipa_nic_close);
void sipa_nic_notify_evt(struct sipa_nic *nic, enum sipa_evt_type evt)
{
struct sipa_core *ipa = sipa_get_ctrl_pointer();
if (!ipa->remote_ready) {
return;
}
if (nic->cb)
nic->cb(nic->cb_priv, evt, 0);
}
EXPORT_SYMBOL(sipa_nic_notify_evt);
void sipa_nic_check_flow_ctrl(void)
{
int i;
struct sipa_nic *nic;
struct sipa_core *ipa = sipa_get_ctrl_pointer();
for (i = 0; i < SIPA_NIC_MAX; i++) {
nic = ipa->nic[i];
if (nic && nic->rm_flow_ctrl) {
nic->rm_flow_ctrl = false;
nic->cb(nic->cb_priv, SIPA_LEAVE_FLOWCTRL, 0);
}
}
}
EXPORT_SYMBOL(sipa_nic_check_flow_ctrl);
void sipa_nic_try_notify_recv(struct sipa_nic *nic)
{
if (atomic_read(&nic->status) == NIC_CLOSE)
return;
if (nic->cb)
nic->cb(nic->cb_priv, SIPA_RECEIVE, 0);
}
EXPORT_SYMBOL(sipa_nic_try_notify_recv);
void sipa_nic_push_skb(struct sipa_nic *nic, struct sk_buff *skb)
{
struct sipa_core *ctrl = sipa_get_ctrl_pointer();
atomic_inc(&ctrl->recv_cnt);
skb_queue_tail(&nic->rx_skb_q, skb);
if (nic->rx_skb_q.qlen == 1 || nic->continue_notify)
nic->need_notify = 1;
}
EXPORT_SYMBOL(sipa_nic_push_skb);
int sipa_nic_tx(enum sipa_nic_id nic_id, enum sipa_term_type dst,
int netid, struct sk_buff *skb)
{
int ret;
struct sipa_core *ctrl = sipa_get_ctrl_pointer();
if (!ctrl || !ctrl->sender) {
dev_err(ctrl->dev, "sipa driver may not register\n");
return -EINVAL;
}
if (!ctrl->remote_ready) {
ctrl->nic[nic_id]->rm_flow_ctrl = true;
// dev_err(ctrl->dev, "remote ipa not ready\n");
return -EINPROGRESS;
}
ret = sipa_skb_sender_send_data(ctrl->sender, skb, dst, netid);
// if (ret == -EAGAIN)
// ctrl->nic[nic_id]->flow_ctrl_status = true;
return ret;
}
EXPORT_SYMBOL(sipa_nic_tx);
int sipa_nic_rx(int *netid, struct sk_buff **out_skb, int index)
{
struct sk_buff *skb;
skb = sipa_recv_skb(netid, index);
*out_skb = skb;
return (skb) ? 0 : -ENODATA;
}
EXPORT_SYMBOL(sipa_nic_rx);
int sipa_nic_rx_has_data(enum sipa_nic_id nic_id)
{
struct sipa_nic *nic;
struct sipa_core *ctrl = sipa_get_ctrl_pointer();
if (!ctrl) {
pr_err("sipa driver may not register\n");
return -EINVAL;
}
if (!ctrl->nic[nic_id] ||
atomic_read(&ctrl->nic[nic_id]->status) == NIC_CLOSE)
return 0;
nic = ctrl->nic[nic_id];
return (!!nic->rx_skb_q.qlen);
}
EXPORT_SYMBOL(sipa_nic_rx_has_data);
int sipa_nic_trigger_flow_ctrl_work(enum sipa_nic_id nic_id, int err)
{
struct sipa_core *ctrl = sipa_get_ctrl_pointer();
if (!ctrl) {
pr_err("sipa driver may not register\n");
return -EINVAL;
}
if (!ctrl->sender)
return -ENODEV;
switch (err) {
case -EAGAIN:
//ctrl->sender->free_notify_net = true;
schedule_work(&ctrl->flow_ctrl_work);
break;
default:
dev_warn(ctrl->dev,
"don't have this flow ctrl err type\n");
break;
}
return 0;
}
EXPORT_SYMBOL(sipa_nic_trigger_flow_ctrl_work);
u32 sipa_nic_get_filled_num(void)
{
struct sipa_core *ctrl = sipa_get_ctrl_pointer();
enum sipa_cmn_fifo_index id = ctrl->receiver->ep->recv_fifo->fifo_id;
if (!ctrl->remote_ready) {
dev_err(ctrl->dev, "remote sipa not ready %d\n",
ctrl->remote_ready);
return 0;
}
return ctrl->hal_ops.recv_node_from_tx_fifo(ctrl->dev, id,
ctrl->cmn_fifo_cfg, -1);
}
EXPORT_SYMBOL(sipa_nic_get_filled_num);
void sipa_nic_restore_irq(void)
{
struct sipa_core *ctrl = sipa_get_ctrl_pointer();
enum sipa_cmn_fifo_index id = ctrl->receiver->ep->recv_fifo->fifo_id;
if (!ctrl->remote_ready) {
dev_err(ctrl->dev, "remote sipa not ready %d\n",
ctrl->remote_ready);
return;
}
ctrl->hal_ops.clr_tout_th_intr(id, ctrl->cmn_fifo_cfg);
ctrl->hal_ops.set_intr_eb(id, ctrl->cmn_fifo_cfg, true,
SIPA_FIFO_THRESHOLD_IRQ_EN |
SIPA_FIFO_DELAY_TIMER_IRQ_EN);
}
EXPORT_SYMBOL(sipa_nic_restore_irq);
void sipa_nic_set_tx_fifo_rp(u32 rptr)
{
struct sipa_core *ctrl = sipa_get_ctrl_pointer();
enum sipa_cmn_fifo_index id = ctrl->receiver->ep->recv_fifo->fifo_id;
if (!ctrl->remote_ready) {
dev_err(ctrl->dev, "remote sipa not ready %d\n",
ctrl->remote_ready);
return;
}
ctrl->hal_ops.set_tx_fifo_rp(id, ctrl->cmn_fifo_cfg, rptr);
}
EXPORT_SYMBOL(sipa_nic_set_tx_fifo_rp);

View File

@ -1 +0,0 @@
obj-y += sipa_common_fifo_hal.o sipa_fifo_irq_hal.o

View File

@ -1,74 +0,0 @@
#include "../../include/sipa.h"
#include "../sipa_core.h"
#include "sipa_fifo_phy.h"
static void ipa_fifo_traverse_int_bit(enum sipa_cmn_fifo_index id,
struct sipa_cmn_fifo_cfg_tag *ipa_cfg)
{
void __iomem *fifo_base;
u32 clr_sts = 0;
u32 int_status = 0;
fifo_base = ipa_cfg->fifo_reg_base;
int_status = ipa_phy_get_fifo_all_int_sts(fifo_base);
if (!(int_status & IPA_INT_STS_GROUP))
return;
if (int_status & IPA_INT_EXIT_FLOW_CTRL_STS) {
ipa_cfg->exit_flow_ctrl_cnt++;
clr_sts |= IPA_EXIT_FLOW_CONTROL_CLR_BIT;
}
if (int_status & IPA_INT_ERRORCODE_IN_TX_FIFO_STS)
clr_sts |= IPA_ERROR_CODE_INTR_CLR_BIT;
if (int_status & IPA_INT_ENTER_FLOW_CTRL_STS) {
ipa_cfg->enter_flow_ctrl_cnt++;
clr_sts |= IPA_ENTRY_FLOW_CONTROL_CLR_BIT;
}
if (int_status & IPA_INT_INTR_BIT_STS)
clr_sts |= IPA_TX_FIFO_INTR_CLR_BIT;
if (int_status & IPA_INT_THRESHOLD_STS ||
int_status & IPA_INT_DELAY_TIMER_STS) {
ipa_phy_disable_int_bit(ipa_cfg->fifo_reg_base,
IPA_TX_FIFO_THRESHOLD_EN |
IPA_TX_FIFO_DELAY_TIMER_EN);
clr_sts |= IPA_TX_FIFO_THRESHOLD_CLR_BIT |
IPA_TX_FIFO_TIMER_CLR_BIT;
}
if (int_status & IPA_INT_DROP_PACKT_OCCUR)
clr_sts |= IPA_DROP_PACKET_INTR_CLR_BIT;
if (int_status & IPA_INT_TXFIFO_OVERFLOW_STS)
clr_sts |= IPA_TX_FIFO_OVERFLOW_CLR_BIT;
if (int_status & IPA_INT_TXFIFO_FULL_INT_STS)
clr_sts |= IPA_TX_FIFO_FULL_INT_CLR_BIT;
if (ipa_cfg->irq_cb)
ipa_cfg->irq_cb(ipa_cfg->priv, int_status, id);
else
pr_err("Don't register this fifo(%d) irq callback\n", id);
ipa_phy_clear_int(ipa_cfg->fifo_reg_base, clr_sts);
}
int sipa_int_callback_func(int evt, void *cookie)
{
struct sipa_core *ipa = sipa_get_ctrl_pointer();
if (ipa->remote_ready) {
ipa_fifo_traverse_int_bit(SIPA_FIFO_PCIE_DL,
&ipa->cmn_fifo_cfg[SIPA_FIFO_PCIE_DL]);
ipa_fifo_traverse_int_bit(SIPA_FIFO_PCIE_UL,
&ipa->cmn_fifo_cfg[SIPA_FIFO_PCIE_UL]);
}
return 0;
}
EXPORT_SYMBOL(sipa_int_callback_func);

View File

@ -1,674 +0,0 @@
/*
* Copyright (C) 2020 Unisoc Communications Inc.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/delay.h>
#include <linux/errno.h>
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include <linux/workqueue.h>
#include <linux/ipv6.h>
#include <linux/ip.h>
#include <linux/tcp.h>
#include <linux/icmp.h>
#include <linux/icmpv6.h>
#include <linux/if_arp.h>
#include <asm/byteorder.h>
#include <linux/tty.h>
#include <linux/platform_device.h>
#include <linux/atomic.h>
#include <linux/sched.h>
#include <linux/kthread.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
#include <linux/of_device.h>
#include <linux/version.h>
#if (LINUX_VERSION_CODE > KERNEL_VERSION( 4,4,60 ))
#include <uapi/linux/sched/types.h>
#include <linux/sched/task.h>
#endif
#include "../include/sipa.h"
#include "sipa_core.h"
#include "sipa_eth.h"
#define SIPA_RECV_BUF_LEN 1600
#define SIPA_RECV_RSVD_LEN 128
static int put_recv_array_node(struct sipa_skb_array *p,
struct sk_buff *skb, u64 *dma_addr)
{
u32 pos;
if ((p->wp - p->rp) < p->depth) {
pos = p->wp & (p->depth - 1);
p->array[pos].skb = skb;
p->array[pos].dma_addr = *dma_addr;
/*
* Ensure that we put the item to the fifo before
* we update the fifo wp.
*/
smp_wmb();
p->wp++;
return 0;
} else {
return -1;
}
}
static int get_recv_array_node(struct sipa_skb_array *p,
struct sk_buff **skb, u64 *dma_addr)
{
u32 pos;
if (p->rp != p->wp) {
pos = p->rp & (p->depth -1);
*skb = p->array[pos].skb;
*dma_addr = p->array[pos].dma_addr;
/*
* Ensure that we remove the item from the fifo before
* we update the fifo rp.
*/
smp_wmb();
p->rp++;
return 0;
} else {
return -1;
}
}
static int create_recv_array(struct sipa_skb_array *p, u32 depth)
{
p->array = kzalloc(sizeof(*p->array) * depth,
GFP_KERNEL);
if (!p->array)
return -ENOMEM;
p->rp = 0;
p->wp = 0;
p->depth = depth;
return 0;
}
static void destroy_recv_array(struct sipa_skb_array *p)
{
kfree(p->array);
p->array = NULL;
p->rp = 0;
p->wp = 0;
p->depth = 0;
}
static struct sk_buff *alloc_recv_skb(u32 req_len, u8 rsvd)
{
struct sk_buff *skb;
u32 hr;
struct sipa_core *ctrl = sipa_get_ctrl_pointer();
skb = __dev_alloc_skb(req_len + rsvd, GFP_KERNEL | GFP_NOWAIT);
if (!skb) {
dev_err(ctrl->dev, "failed to alloc skb!\n");
return NULL;
}
/* save skb ptr to skb->data */
hr = skb_headroom(skb);
if (hr < rsvd)
skb_reserve(skb, rsvd - hr);
return skb;
}
static void sipa_prepare_free_node_init(struct sipa_skb_receiver *receiver,
u32 cnt)
{
struct sk_buff *skb;
u32 tmp, fail_cnt = 0;
int i;
u32 success_cnt = 0;
u64 dma_addr;
struct sipa_node_description_tag *node;
#if defined (__BIG_ENDIAN_BITFIELD)
struct sipa_node_description_tag node_tmp;
#endif
struct sipa_core *ctrl = sipa_get_ctrl_pointer();
struct sipa_cmn_fifo_cfg_tag *cmn = receiver->ep->recv_fifo;
for (i = 0; i < cnt; i++) {
skb = alloc_recv_skb(SIPA_RECV_BUF_LEN, receiver->rsvd);
if (!skb) {
fail_cnt++;
break;
}
tmp = skb_headroom(skb);
if (unlikely(tmp > SIPA_RECV_RSVD_LEN)) {
tmp -= SIPA_RECV_RSVD_LEN;
skb_put(skb, SIPA_RECV_BUF_LEN - tmp);
skb_push(skb, tmp);
} else {
skb_put(skb, SIPA_RECV_BUF_LEN);
}
dma_addr = (u64)dma_map_single(ctrl->pci_dev,
skb->head,
SIPA_RECV_BUF_LEN +
skb_headroom(skb),
DMA_FROM_DEVICE);
if (dma_mapping_error(ctrl->pci_dev, (dma_addr_t)dma_addr)) {
dev_kfree_skb_any(skb);
dev_err(ctrl->dev,
"prepare free node dma map err\n");
fail_cnt++;
break;
}
node = ctrl->hal_ops.get_rx_fifo_wr(cmn->fifo_id,
ctrl->cmn_fifo_cfg,
i);
if (!node) {
dma_unmap_single(ctrl->pci_dev, dma_addr,
SIPA_RECV_BUF_LEN +
skb_headroom(skb),
DMA_FROM_DEVICE);
dev_kfree_skb_any(skb);
dev_err(ctrl->dev,
"get node fail index = %d\n", i);
fail_cnt++;
break;
}
dma_addr += ctrl->pcie_mem_offset;
#if defined (__BIG_ENDIAN_BITFIELD)
memset(&node_tmp, 0, sizeof(node_tmp));
node_tmp.address = dma_addr;
node_tmp.length = skb->len;
node_tmp.offset = skb_headroom(skb);
node_tmp.dst = ctrl->ep.recv_fifo->dst;
node_tmp.src = ctrl->ep.recv_fifo->cur;
node_tmp.intr = 0;
node_tmp.net_id = 0;
node_tmp.err_code = 0;
sipa_set_node_desc((u8 *)node, (u8 *)&node_tmp);
#else
node->address = dma_addr;
node->length = skb->len;
node->offset = skb_headroom(skb);
node->dst = ctrl->ep.recv_fifo->dst;
node->src = ctrl->ep.recv_fifo->cur;
node->intr = 0;
node->net_id = 0;
node->err_code = 0;
#endif
if (dma_addr == 0 || node->address == 0)
pr_info("cnt = %d, i = %d, dma_addr 0x%llx, node->address 0x%llx\n",
cnt, i, dma_addr, (long long unsigned int)node->address);
put_recv_array_node(&receiver->recv_array, skb, &dma_addr);
success_cnt++;
}
if (fail_cnt)
dev_err(ctrl->dev,
"fail_cnt = %d success_cnt = %d\n",
fail_cnt, success_cnt);
}
static void fill_free_fifo(struct sipa_skb_receiver *receiver, u32 cnt)
{
struct sk_buff *skb;
u32 tmp, fail_cnt = 0;
int i;
u32 success_cnt = 0, depth;
u64 dma_addr;
struct sipa_node_description_tag *node;
#if defined (__BIG_ENDIAN_BITFIELD)
struct sipa_node_description_tag node_tmp;
#endif
struct sipa_core *ctrl = sipa_get_ctrl_pointer();
struct sipa_cmn_fifo_cfg_tag *cmn = receiver->ep->recv_fifo;
depth = cmn->rx_fifo.depth;
if (cnt > (depth - depth / 4)) {
// dev_warn(ctrl->dev, "free node is not enough,need fill %d\n", cnt);
receiver->rx_danger_cnt++;
}
for (i = 0; i < cnt; i++) {
skb = alloc_recv_skb(SIPA_RECV_BUF_LEN, receiver->rsvd);
if (!skb) {
fail_cnt++;
break;
}
tmp = skb_headroom(skb);
if (unlikely(tmp > SIPA_RECV_RSVD_LEN)) {
tmp -= SIPA_RECV_RSVD_LEN;
skb_put(skb, SIPA_RECV_BUF_LEN - tmp);
skb_push(skb, tmp);
} else {
skb_put(skb, SIPA_RECV_BUF_LEN);
}
dma_addr = (u64)dma_map_single(ctrl->pci_dev,
skb->head,
SIPA_RECV_BUF_LEN +
skb_headroom(skb),
DMA_FROM_DEVICE);
if (dma_mapping_error(ctrl->pci_dev, (dma_addr_t)dma_addr)) {
dev_kfree_skb_any(skb);
dev_err(ctrl->dev,
"prepare free node dma map err\n");
fail_cnt++;
break;
}
node = ctrl->hal_ops.get_rx_fifo_wr(cmn->fifo_id,
ctrl->cmn_fifo_cfg,
i);
if (!node) {
dma_unmap_single(ctrl->pci_dev, dma_addr,
SIPA_RECV_BUF_LEN +
skb_headroom(skb),
DMA_FROM_DEVICE);
dev_kfree_skb_any(skb);
dev_err(ctrl->dev,
"get node fail index = %d\n", i);
fail_cnt++;
break;
}
dma_addr += ctrl->pcie_mem_offset;
#if defined (__BIG_ENDIAN_BITFIELD)
memset(&node_tmp, 0, sizeof(node_tmp));
node_tmp.address = dma_addr;
node_tmp.length = skb->len;
node_tmp.offset = skb_headroom(skb);
node_tmp.dst = ctrl->ep.recv_fifo->dst;
node_tmp.src = ctrl->ep.recv_fifo->cur;
node_tmp.intr = 0;
node_tmp.net_id = 0;
node_tmp.err_code = 0;
sipa_set_node_desc((u8 *)node, (u8 *)&node_tmp);
#else
node->address = dma_addr;
node->length = skb->len;
node->offset = skb_headroom(skb);
node->dst = ctrl->ep.recv_fifo->dst;
node->src = ctrl->ep.recv_fifo->cur;
node->intr = 0;
node->net_id = 0;
node->err_code = 0;
#endif
put_recv_array_node(&receiver->recv_array, skb, &dma_addr);
success_cnt++;
}
if (success_cnt) {
ctrl->hal_ops.set_rx_fifo_wr(ctrl->pci_dev,
cmn->fifo_id,
ctrl->cmn_fifo_cfg,
success_cnt);
if (atomic_read(&receiver->need_fill_cnt) > 0)
atomic_sub(success_cnt,
&receiver->need_fill_cnt);
}
if (fail_cnt)
dev_err(ctrl->dev,
"fill free fifo fail_cnt = %d\n", fail_cnt);
}
static void sipa_fill_free_node(struct sipa_skb_receiver *receiver, u32 cnt)
{
struct sipa_core *ctrl = sipa_get_ctrl_pointer();
ctrl->hal_ops.set_rx_fifo_wr(ctrl->pci_dev,
receiver->ep->recv_fifo->fifo_id,
ctrl->cmn_fifo_cfg, cnt);
if (atomic_read(&receiver->need_fill_cnt) > 0)
dev_info(ctrl->dev,
"a very serious problem, mem cover may appear\n");
atomic_set(&receiver->need_fill_cnt, 0);
}
static void sipa_receiver_notify_cb(void *priv, enum sipa_irq_evt_type evt,
unsigned long data)
{
struct sipa_core *ctrl = sipa_get_ctrl_pointer();
struct sipa_skb_receiver *receiver = (struct sipa_skb_receiver *)priv;
if (evt & SIPA_RECV_WARN_EVT) {
dev_dbg(ctrl->dev,
"sipa maybe poor resources evt = 0x%x\n", evt);
receiver->tx_danger_cnt++;
}
sipa_dummy_recv_trigger();
}
static void sipa_free_recv_skb(struct sipa_skb_receiver *receiver)
{
u64 addr = 0;
struct sk_buff *recv_skb = NULL;
while(!get_recv_array_node(&receiver->recv_array, &recv_skb, &addr))
{
dev_kfree_skb_any(recv_skb);
}
}
struct sk_buff *sipa_recv_skb(int *netid, int index)
{
int ret = -1;
u32 retry_cnt = 10;
u64 addr = 0;
struct sk_buff *recv_skb = NULL;
#if defined (__BIG_ENDIAN_BITFIELD)
struct sipa_node_description_tag node;
#else
struct sipa_node_description_tag *node;
#endif
struct sipa_core *ctrl = sipa_get_ctrl_pointer();
struct sipa_skb_receiver *receiver = ctrl->receiver;
enum sipa_cmn_fifo_index id = receiver->ep->recv_fifo->fifo_id;
ret = get_recv_array_node(&receiver->recv_array,
&recv_skb, &addr);
read_again:
#if defined (__BIG_ENDIAN_BITFIELD)
sipa_get_node_desc((u8 *)ctrl->hal_ops.get_tx_fifo_rp(id,
ctrl->cmn_fifo_cfg, index), &node);
#else
node = ctrl->hal_ops.get_tx_fifo_rp(id, ctrl->cmn_fifo_cfg, index);
#endif
#if defined (__BIG_ENDIAN_BITFIELD)
if (!node.address) {
#else
if (!node->address) {
#endif
if (retry_cnt--) {
udelay(1);
goto read_again;
}
#if defined (__BIG_ENDIAN_BITFIELD)
dev_err(ctrl->dev, "phy addr is null = %llx\n",
(u64)node.address);
#else
dev_err(ctrl->dev, "phy addr is null = %llx\n",
(u64)node->address);
#endif
if(!ret) {
dma_unmap_single(ctrl->pci_dev, (dma_addr_t)(addr - ctrl->pcie_mem_offset),
SIPA_RECV_BUF_LEN + skb_headroom(recv_skb),
DMA_FROM_DEVICE);
dev_kfree_skb_any(recv_skb);
atomic_add(1, &receiver->need_fill_cnt);
ctrl->hal_ops.set_tx_fifo_rp(id, ctrl->cmn_fifo_cfg, 1);
dev_err(ctrl->dev,
"recv addr is null, but recv_array addr:0x%llx\n",
addr);
}
return NULL;
}
retry_cnt = 10;
check_again:
if (ret) {
#if defined (__BIG_ENDIAN_BITFIELD)
dev_err(ctrl->dev,
"recv addr:0x%llx, but recv_array is empty\n",
(u64)node.address);
#else
dev_err(ctrl->dev,
"recv addr:0x%llx, but recv_array is empty\n",
(u64)node->address);
#endif
return NULL;
#if defined (__BIG_ENDIAN_BITFIELD)
} else if (addr != node.address && retry_cnt) {
#else
} else if (addr != node->address && retry_cnt) {
#endif
retry_cnt--;
udelay(1);
#if defined (__BIG_ENDIAN_BITFIELD)
sipa_get_node_desc((u8 *)ctrl->hal_ops.get_tx_fifo_rp(id,
ctrl->cmn_fifo_cfg, index), &node);
#endif
goto check_again;
#if defined (__BIG_ENDIAN_BITFIELD)
} else if (addr != node.address && !retry_cnt) {
#else
} else if (addr != node->address && !retry_cnt) {
#endif
dma_unmap_single(ctrl->pci_dev, (dma_addr_t)(addr - ctrl->pcie_mem_offset),
SIPA_RECV_BUF_LEN + skb_headroom(recv_skb),
DMA_FROM_DEVICE);
dev_kfree_skb_any(recv_skb);
atomic_add(1, &receiver->need_fill_cnt);
dev_err(ctrl->dev,
"recv addr:0x%llx, but recv_array addr:0x%llx not equal\n",
#if defined (__BIG_ENDIAN_BITFIELD)
(u64)node.address, addr);
#else
(u64)node->address, addr);
#endif
ctrl->hal_ops.set_tx_fifo_rp(id, ctrl->cmn_fifo_cfg, 1);
return NULL;
}
dma_unmap_single(ctrl->pci_dev, (dma_addr_t)(addr - ctrl->pcie_mem_offset),
SIPA_RECV_BUF_LEN + skb_headroom(recv_skb),
DMA_FROM_DEVICE);
atomic_add(1, &receiver->need_fill_cnt);
if (atomic_read(&receiver->need_fill_cnt) > 0x30)
wake_up(&receiver->fill_recv_waitq);
#if defined (__BIG_ENDIAN_BITFIELD)
*netid = node.net_id;
#else
*netid = node->net_id;
#endif
return recv_skb;
}
EXPORT_SYMBOL(sipa_recv_skb);
static int fill_recv_thread(void *data)
{
int ret;
struct sipa_skb_receiver *receiver = (struct sipa_skb_receiver *)data;
struct sched_param param = {.sched_priority = 92};
unsigned long flags;
sched_setscheduler(current, SCHED_RR, &param);
while (!kthread_should_stop()) {
ret = wait_event_interruptible(receiver->fill_recv_waitq,
(atomic_read(&receiver->need_fill_cnt) > 0) || receiver->run == 0);
spin_lock_irqsave(&receiver->exit_lock, flags);
if(receiver->run == 0) {
spin_unlock_irqrestore(&receiver->exit_lock, flags);
break;
}
spin_unlock_irqrestore(&receiver->exit_lock, flags);
if (!ret)
fill_free_fifo(receiver, atomic_read(&receiver->need_fill_cnt));
}
sipa_free_recv_skb(receiver);
if (receiver->recv_array.array)
destroy_recv_array(&receiver->recv_array);
kfree(receiver);
return 0;
}
bool sipa_check_recv_tx_fifo_empty(void)
{
struct sipa_core *ctrl = sipa_get_ctrl_pointer();
enum sipa_cmn_fifo_index id = ctrl->receiver->ep->recv_fifo->fifo_id;
if (!ctrl->remote_ready)
return true;
return ctrl->hal_ops.get_tx_empty_status(id, ctrl->cmn_fifo_cfg);
}
EXPORT_SYMBOL(sipa_check_recv_tx_fifo_empty);
void sipa_receiver_open_cmn_fifo(struct sipa_skb_receiver *receiver)
{
struct sipa_core *ctrl = sipa_get_ctrl_pointer();
struct sipa_cmn_fifo_cfg_tag *fifo_cfg = receiver->ep->recv_fifo;
if (unlikely(!ctrl || !receiver)) {
pr_err("ctrl %p receiver %p not ready\n", ctrl, receiver);
return;
}
ctrl->hal_ops.open(fifo_cfg->fifo_id, ctrl->cmn_fifo_cfg, NULL);
sipa_fill_free_node(receiver, fifo_cfg->rx_fifo.depth);
ctrl->hal_ops.set_hw_intr_thres(fifo_cfg->fifo_id,
ctrl->cmn_fifo_cfg,
true, 64, NULL);
/* timeout = 1 / ipa_sys_clk * 1024 * value */
ctrl->hal_ops.set_hw_intr_timeout(fifo_cfg->fifo_id,
ctrl->cmn_fifo_cfg,
true, 0x32, NULL);
// ctrl->hal_ops.set_intr_txfifo_full(fifo_cfg->fifo_id,
// ctrl->cmn_fifo_cfg,
// true, NULL);
}
EXPORT_SYMBOL(sipa_receiver_open_cmn_fifo);
static void sipa_receiver_init(struct sipa_skb_receiver *receiver, u32 rsvd)
{
u32 depth;
struct sipa_core *ctrl = sipa_get_ctrl_pointer();
enum sipa_cmn_fifo_index fifo_id = receiver->ep->recv_fifo->fifo_id;
dev_info(ctrl->dev,
"fifo_id = %d rx_fifo depth = 0x%x\n",
receiver->ep->recv_fifo->fifo_id,
receiver->ep->recv_fifo->rx_fifo.depth);
ctrl->cmn_fifo_cfg[fifo_id].irq_cb =
(sipa_irq_notify_cb)sipa_receiver_notify_cb;
ctrl->cmn_fifo_cfg[fifo_id].priv = receiver;
/* reserve space for dma flushing cache issue */
receiver->rsvd = rsvd;
depth = receiver->ep->recv_fifo->rx_fifo.depth;
sipa_prepare_free_node_init(receiver, depth);
}
void sipa_receiver_add_nic(struct sipa_skb_receiver *receiver,
struct sipa_nic *nic)
{
int i;
unsigned long flags;
for (i = 0; i < receiver->nic_cnt; i++)
if (receiver->nic_array[i] == nic)
return;
spin_lock_irqsave(&receiver->lock, flags);
if (receiver->nic_cnt < SIPA_NIC_MAX)
receiver->nic_array[receiver->nic_cnt++] = nic;
spin_unlock_irqrestore(&receiver->lock, flags);
}
EXPORT_SYMBOL(sipa_receiver_add_nic);
void sipa_reinit_recv_array(struct sipa_skb_receiver *receiver)
{
if (!receiver) {
pr_err("sipa receiver is null\n");
return;
}
if (!receiver->recv_array.array) {
pr_err("sipa p->array is null\n");
return;
}
receiver->recv_array.rp = 0;
receiver->recv_array.wp = receiver->recv_array.depth;
}
int create_sipa_skb_receiver(struct sipa_endpoint *ep,
struct sipa_skb_receiver **receiver_pp)
{
int ret;
struct sipa_skb_receiver *receiver = NULL;
struct sipa_core *ctrl = sipa_get_ctrl_pointer();
receiver = kzalloc(sizeof(*receiver), GFP_KERNEL);
if (!receiver)
return -ENOMEM;
receiver->ep = ep;
receiver->rsvd = SIPA_RECV_RSVD_LEN;
atomic_set(&receiver->need_fill_cnt, 0);
ret = create_recv_array(&receiver->recv_array,
receiver->ep->recv_fifo->rx_fifo.depth);
if (ret) {
dev_err(ctrl->dev,
"create_sipa_sipa_receiver: recv_array kzalloc err.\n");
kfree(receiver);
return -ENOMEM;
}
spin_lock_init(&receiver->lock);
spin_lock_init(&receiver->exit_lock);
init_waitqueue_head(&receiver->fill_recv_waitq);
sipa_receiver_init(receiver, SIPA_RECV_RSVD_LEN);
receiver->run = 1;
receiver->fill_thread = kthread_create(fill_recv_thread, receiver,
"sipa-fill");
if (IS_ERR(receiver->fill_thread)) {
dev_err(ctrl->dev, "Failed to create kthread: ipa-fill\n");
ret = PTR_ERR(receiver->fill_thread);
kfree(receiver->recv_array.array);
kfree(receiver);
return ret;
}
wake_up_process(receiver->fill_thread);
*receiver_pp = receiver;
return 0;
}
EXPORT_SYMBOL(create_sipa_skb_receiver);
void destroy_sipa_skb_receiver(struct sipa_skb_receiver *receiver)
{
unsigned long flags;
spin_lock_irqsave(&receiver->exit_lock, flags);
receiver->run = 0;
wake_up_interruptible_all(&receiver->fill_recv_waitq);
spin_unlock_irqrestore(&receiver->exit_lock, flags);
}
EXPORT_SYMBOL(destroy_sipa_skb_receiver);

View File

@ -1,556 +0,0 @@
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/delay.h>
#include <linux/errno.h>
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include <linux/workqueue.h>
#include <linux/ipv6.h>
#include <linux/ip.h>
#include <linux/tcp.h>
#include <linux/icmp.h>
#include <linux/icmpv6.h>
#include <linux/if_arp.h>
#include <asm/byteorder.h>
#include <linux/tty.h>
#include <linux/platform_device.h>
#include <linux/atomic.h>
#include <linux/sched.h>
#include <linux/kthread.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
#include <linux/of_device.h>
#include <linux/version.h>
#if (LINUX_VERSION_CODE > KERNEL_VERSION( 4,4,60 ))
#include <uapi/linux/sched/types.h>
#include <linux/sched/task.h>
#endif
#include "sipa_phy_v0/sipa_fifo_phy.h"
#include "../include/sipa.h"
#include "sipa_core.h"
#include "sipa_eth.h"
#define SIPA_RECEIVER_BUF_LEN 1600
static void sipa_inform_evt_to_nics(struct sipa_skb_sender *sender,
enum sipa_evt_type evt)
{
struct sipa_nic *nic;
unsigned long flags;
spin_lock_irqsave(&sender->nic_lock, flags);
if(SIPA_LEAVE_FLOWCTRL == evt){
if(sender->free_notify_net == true){
pr_info("%s, not leave flowctl, free_notify_net is true\n", __func__);
return;
}
if(sender->ep_cover_net == true){
pr_info("%s, not leave flowctl, ep_cover_net is true\n", __func__);
return;
}
pr_info("%s, leave flowctl\n", __func__);
list_for_each_entry(nic, &sender->nic_list, list) {
if (nic->flow_ctrl_status == true) {
nic->flow_ctrl_status = false;
sipa_nic_notify_evt(nic, evt);
}
}
}else{
pr_info("%s, enter flowctl\n", __func__);
list_for_each_entry(nic, &sender->nic_list, list) {
if (nic->flow_ctrl_status == false) {
nic->flow_ctrl_status = true;
sipa_nic_notify_evt(nic, evt);
}
}
}
spin_unlock_irqrestore(&sender->nic_lock, flags);
}
static void sipa_sender_notify_cb(void *priv, enum sipa_irq_evt_type evt,
unsigned long data)
{
unsigned long flags;
struct sipa_skb_sender *sender = (struct sipa_skb_sender *)priv;
struct sipa_core *ctrl = sipa_get_ctrl_pointer();
if (evt & SIPA_RECV_WARN_EVT) {
dev_err(ctrl->dev,
"sipa overflow on ep\n");
sender->no_free_cnt++;
}
if (evt & SIPA_IRQ_ENTER_FLOW_CTRL) {
spin_lock_irqsave(&sender->send_lock, flags);
pr_info("sipa_sender_notify_cb set ep_cover_net true!!!!!\n");
sender->enter_flow_ctrl_cnt++;
sender->ep_cover_net = true;
sipa_inform_evt_to_nics(sender, SIPA_ENTER_FLOWCTRL);
spin_unlock_irqrestore(&sender->send_lock, flags);
}
if (evt & SIPA_IRQ_EXIT_FLOW_CTRL) {
spin_lock_irqsave(&sender->send_lock, flags);
sender->exit_flow_ctrl_cnt++;
sender->ep_cover_net = false;
sipa_inform_evt_to_nics(sender, SIPA_LEAVE_FLOWCTRL);
spin_unlock_irqrestore(&sender->send_lock, flags);
}
wake_up(&sender->free_waitq);
}
static void sipa_free_sent_items(struct sipa_skb_sender *sender)
{
bool status = false;
unsigned long flags;
u32 i, num, success_cnt = 0, retry_cnt = 10, failed_cnt = 0;
struct sipa_skb_dma_addr_node *iter, *_iter;
#if defined (__BIG_ENDIAN_BITFIELD)
struct sipa_node_description_tag node;
#else
struct sipa_node_description_tag *node;
#endif
struct sipa_core *ctrl = sipa_get_ctrl_pointer();
enum sipa_cmn_fifo_index id = sender->ep->send_fifo->fifo_id;
u32 tx_wr, tx_rd, rx_wr, rx_rd;
int exit_flow = 0;
struct sipa_cmn_fifo_cfg_tag *fifo_cfg;
void __iomem *fifo_base;
u32 clr_sts = 0;
u32 int_status = 0;
u32 read_count = 0;
num = ctrl->hal_ops.recv_node_from_tx_fifo(ctrl->dev, id,
ctrl->cmn_fifo_cfg, -1);
for (i = 0; i < num; i++) {
retry_cnt = 10;
#if defined (__BIG_ENDIAN_BITFIELD)
sipa_get_node_desc((u8 *)ctrl->hal_ops.get_tx_fifo_rp(id,
ctrl->cmn_fifo_cfg, i), &node);
#else
node = ctrl->hal_ops.get_tx_fifo_rp(id, ctrl->cmn_fifo_cfg, i);
#endif
#if defined (__BIG_ENDIAN_BITFIELD)
if (node.err_code)
dev_err(ctrl->dev, "have node transfer err = %d\n",
node.err_code);
#else
if (node->err_code)
dev_err(ctrl->dev, "have node transfer err = %d\n",
node->err_code);
#endif
check_again:
spin_lock_irqsave(&sender->send_lock, flags);
if (list_empty(&sender->sending_list)) {
ctrl->hal_ops.get_rx_ptr(SIPA_FIFO_PCIE_UL, ctrl->cmn_fifo_cfg, &rx_wr, &rx_rd);
ctrl->hal_ops.get_tx_ptr(SIPA_FIFO_PCIE_UL, ctrl->cmn_fifo_cfg, &tx_wr, &tx_rd);
dev_err(ctrl->dev, "fifo id %d: send list is empty, old tx_wr=%x tx_rd=%x, rx_wr=%x, rx_rd=%x, left_cnt=%d\n",
sender->ep->send_fifo->fifo_id, tx_wr, tx_rd, rx_wr, rx_rd, atomic_read(&sender->left_cnt));
spin_unlock_irqrestore(&sender->send_lock, flags);
goto sipa_free_end;
}
list_for_each_entry_safe(iter, _iter, &sender->sending_list, list) {
#if defined (__BIG_ENDIAN_BITFIELD)
if (iter->dma_addr == node.address) {
#else
if (iter->dma_addr == node->address) {
#endif
list_del(&iter->list);
list_add_tail(&iter->list,
&sender->pair_free_list);
status = true;
break;
}
}
spin_unlock_irqrestore(&sender->send_lock, flags);
if (status) {
dma_unmap_single(ctrl->pci_dev,
(dma_addr_t)(iter->dma_addr - ctrl->pcie_mem_offset),
iter->skb->len +
skb_headroom(iter->skb),
DMA_TO_DEVICE);
dev_kfree_skb_any(iter->skb);
success_cnt++;
status = false;
} else {
if (retry_cnt--) {
#if defined (__BIG_ENDIAN_BITFIELD)
sipa_get_node_desc((u8 *)ctrl->hal_ops.get_tx_fifo_rp(id,
ctrl->cmn_fifo_cfg, i), &node);
#endif
//dev_err(ctrl->dev, "free send skb warning, retry_cnt = %d\n", retry_cnt);
goto check_again;
}
failed_cnt++;
}
}
if(failed_cnt >0){
dev_err(ctrl->dev, "can't find matching nodes num=%d\n", failed_cnt);
}
ctrl->hal_ops.set_tx_fifo_rp(id, ctrl->cmn_fifo_cfg, i);
atomic_add(success_cnt, &sender->left_cnt);
if (num != success_cnt)
dev_err(ctrl->dev, "recv num = %d release num = %d\n", num, success_cnt);
sipa_free_end:
if (sender->free_notify_net && atomic_read(&sender->left_cnt) > sender->ep->send_fifo->rx_fifo.depth / 4) {
sender->free_notify_net = false;
exit_flow = 1;
}
if(sender->ep_cover_net == true){
fifo_cfg = ctrl->cmn_fifo_cfg + sender->ep->send_fifo->fifo_id;
fifo_base = fifo_cfg->fifo_reg_base;
int_status = ipa_phy_get_fifo_all_int_sts(fifo_base);
if (int_status & IPA_INT_EXIT_FLOW_CTRL_STS) {
exit_flow = 1;
sender->ep_cover_net = false;
clr_sts |= IPA_EXIT_FLOW_CONTROL_CLR_BIT;
ipa_phy_clear_int(fifo_base, clr_sts);
pr_info("%s, exit flow control\n", __func__);
}else{
pr_info("%s, still in flow control\n", __func__);
}
}
if(exit_flow == 1){
spin_lock_irqsave(&sender->send_lock, flags);
sipa_inform_evt_to_nics(sender, SIPA_LEAVE_FLOWCTRL);
spin_unlock_irqrestore(&sender->send_lock, flags);
}
}
static bool sipa_sender_ck_unfree(struct sipa_skb_sender *sender)
{
struct sipa_core *ctrl = sipa_get_ctrl_pointer();
enum sipa_cmn_fifo_index id = sender->ep->send_fifo->fifo_id;
if (!ctrl->remote_ready) {
printk("%s: need wait remote_ready!\n", __func__);
return false;
}
if (ctrl->hal_ops.get_tx_empty_status(id, ctrl->cmn_fifo_cfg)) {
ctrl->hal_ops.clr_tout_th_intr(id, ctrl->cmn_fifo_cfg);
ctrl->hal_ops.set_intr_eb(id, ctrl->cmn_fifo_cfg, true,
SIPA_FIFO_THRESHOLD_IRQ_EN |
SIPA_FIFO_DELAY_TIMER_IRQ_EN);
return false;
} else {
return true;
}
}
static void sipa_free_send_skb(struct sipa_skb_sender *sender)
{
struct sipa_skb_dma_addr_node *iter, *_iter;
struct sipa_core *ctrl = sipa_get_ctrl_pointer();
unsigned long flags;
spin_lock_irqsave(&sender->send_lock, flags);
if (list_empty(&sender->sending_list)) {
dev_err(ctrl->dev, "fifo id %d: send list is empty\n", sender->ep->send_fifo->fifo_id);
spin_unlock_irqrestore(&sender->send_lock, flags);
return;
}
list_for_each_entry_safe(iter, _iter, &sender->sending_list, list) {
list_del(&iter->list);
list_add_tail(&iter->list, &sender->pair_free_list);
dma_unmap_single(ctrl->pci_dev, (dma_addr_t)(iter->dma_addr - ctrl->pcie_mem_offset),
iter->skb->len + skb_headroom(iter->skb), DMA_TO_DEVICE);
dev_kfree_skb_any(iter->skb);
}
spin_unlock_irqrestore(&sender->send_lock, flags);
}
static int sipa_free_thread(void *data)
{
struct sipa_skb_sender *sender = (struct sipa_skb_sender *)data;
struct sched_param param = {.sched_priority = 90};
struct sipa_core *ctrl = sipa_get_ctrl_pointer();
enum sipa_cmn_fifo_index id = sender->ep->send_fifo->fifo_id;
unsigned long flags;
struct sipa_nic *iter, *_iter;
sched_setscheduler(current, SCHED_RR, &param);
while (!kthread_should_stop()) {
wait_event_interruptible(sender->free_waitq,
sender->free_notify_net || sender->run == 0 || sender->ep_cover_net ||
sipa_sender_ck_unfree(sender));
spin_lock_irqsave(&sender->exit_lock, flags);
if(sender->run == 0) {
spin_unlock_irqrestore(&sender->exit_lock, flags);
break;
}
spin_unlock_irqrestore(&sender->exit_lock, flags);
sipa_free_sent_items(sender);
if (!ctrl->hal_ops.get_tx_empty_status(id, ctrl->cmn_fifo_cfg)) {
usleep_range(100, 200);
//pr_info("%s, not empty\n", __func__);
}
}
sipa_free_send_skb(sender);
kfree(sender->pair_cache);
list_for_each_entry_safe(iter, _iter, &sender->nic_list, list) {
list_del(&iter->list);
kfree(iter);
}
kfree(sender);
return 0;
}
void sipa_sender_open_cmn_fifo(struct sipa_skb_sender *sender)
{
struct sipa_core *ctrl = sipa_get_ctrl_pointer();
struct sipa_cmn_fifo_cfg_tag *fifo_cfg = sender->ep->send_fifo;
if (unlikely(!ctrl || !sender)) {
pr_err("ctrl %p sender %p not ready\n", ctrl, sender);
return;
}
fifo_cfg->irq_cb = (sipa_irq_notify_cb)sipa_sender_notify_cb;
fifo_cfg->priv = sender;
ctrl->hal_ops.open(fifo_cfg->fifo_id, ctrl->cmn_fifo_cfg, NULL);
ctrl->hal_ops.set_hw_intr_thres(fifo_cfg->fifo_id,
ctrl->cmn_fifo_cfg, true,
128, NULL);
ctrl->hal_ops.set_hw_intr_timeout(fifo_cfg->fifo_id, ctrl->cmn_fifo_cfg,
true, 0x64, NULL);
ctrl->hal_ops.set_intr_txfifo_full(fifo_cfg->fifo_id,
ctrl->cmn_fifo_cfg, true, NULL);
}
EXPORT_SYMBOL(sipa_sender_open_cmn_fifo);
int create_sipa_skb_sender(struct sipa_endpoint *ep,
struct sipa_skb_sender **sender_pp)
{
int i, ret;
struct sipa_skb_sender *sender = NULL;
struct sipa_core *ctrl = sipa_get_ctrl_pointer();
dev_info(ctrl->dev, "sender create start\n");
sender = kzalloc(sizeof(*sender), GFP_KERNEL);
if (!sender) {
dev_err(ctrl->dev, "alloc sender failed\n");
return -ENOMEM;
}
sender->pair_cache = kcalloc(ep->send_fifo->rx_fifo.depth,
sizeof(struct sipa_skb_dma_addr_node),
GFP_KERNEL);
if (!sender->pair_cache) {
dev_err(ctrl->dev, "alloc sender->pair_cache fail\n");
kfree(sender);
return -ENOMEM;
}
INIT_LIST_HEAD(&sender->nic_list);
INIT_LIST_HEAD(&sender->sending_list);
INIT_LIST_HEAD(&sender->pair_free_list);
spin_lock_init(&sender->nic_lock);
spin_lock_init(&sender->send_lock);
spin_lock_init(&sender->exit_lock);
for (i = 0; i < ep->send_fifo->rx_fifo.depth; i++)
list_add_tail(&((sender->pair_cache + i)->list),
&sender->pair_free_list);
sender->ep = ep;
atomic_set(&sender->left_cnt, ep->send_fifo->rx_fifo.depth / 4 * 3);
init_waitqueue_head(&sender->free_waitq);
sender->run = 1;
sender->free_thread = kthread_create(sipa_free_thread, sender,
"sipa-free");
if (IS_ERR(sender->free_thread)) {
dev_err(ctrl->dev, "Failed to create kthread: ipa-free\n");
ret = PTR_ERR(sender->free_thread);
kfree(sender->pair_cache);
kfree(sender);
return ret;
}
*sender_pp = sender;
wake_up_process(sender->free_thread);
return 0;
}
EXPORT_SYMBOL(create_sipa_skb_sender);
void destroy_sipa_skb_sender(struct sipa_skb_sender *sender)
{
unsigned long flags;
spin_lock_irqsave(&sender->exit_lock, flags);
sender->run = 0;
wake_up_interruptible_all(&sender->free_waitq);
spin_unlock_irqrestore(&sender->exit_lock, flags);
}
EXPORT_SYMBOL(destroy_sipa_skb_sender);
void sipa_skb_sender_add_nic(struct sipa_skb_sender *sender,
struct sipa_nic *nic)
{
unsigned long flags;
spin_lock_irqsave(&sender->nic_lock, flags);
list_add_tail(&nic->list, &sender->nic_list);
spin_unlock_irqrestore(&sender->nic_lock, flags);
}
EXPORT_SYMBOL(sipa_skb_sender_add_nic);
void sipa_skb_sender_remove_nic(struct sipa_skb_sender *sender,
struct sipa_nic *nic)
{
unsigned long flags;
spin_lock_irqsave(&sender->nic_lock, flags);
list_del(&nic->list);
spin_unlock_irqrestore(&sender->nic_lock, flags);
}
EXPORT_SYMBOL(sipa_skb_sender_remove_nic);
int sipa_skb_sender_send_data(struct sipa_skb_sender *sender,
struct sk_buff *skb,
enum sipa_term_type dst,
u8 netid)
{
unsigned long flags;
u64 dma_addr;
struct sipa_skb_dma_addr_node *node;
#if defined (__BIG_ENDIAN_BITFIELD)
struct sipa_node_description_tag des;
#else
struct sipa_node_description_tag *des;
#endif
struct sipa_core *ctrl = sipa_get_ctrl_pointer();
struct sipa_cmn_fifo_cfg_tag *fifo_cfg;
void __iomem *fifo_base;
u32 clr_sts = 0;
u32 int_status = 0;
spin_lock_irqsave(&sender->send_lock, flags);
if (sender->ep_cover_net == true){
pr_info("%s, ep_cover_net is true, so return EAGAIN\n", __func__);
spin_unlock_irqrestore(&sender->send_lock, flags);
wake_up(&sender->free_waitq);
return -EAGAIN;
}else{
fifo_cfg = ctrl->cmn_fifo_cfg + sender->ep->send_fifo->fifo_id;
fifo_base = fifo_cfg->fifo_reg_base;
int_status = ipa_phy_get_fifo_all_int_sts(fifo_base);
if(int_status == 0x5FF000){
pr_err("%s: check sts failed, maybe ep is down\n", __func__);
spin_unlock_irqrestore(&sender->send_lock, flags);
return -EINPROGRESS;
}
if (int_status & IPA_INT_ENTER_FLOW_CTRL_STS) {
pr_info("sipa_skb_sender_send_data set ep_cover_net true!!!!!\n");
sender->ep_cover_net = true;
sender->enter_flow_ctrl_cnt++;
clr_sts |= IPA_ENTRY_FLOW_CONTROL_CLR_BIT;
ipa_phy_clear_int(fifo_base, clr_sts);
sipa_inform_evt_to_nics(sender, SIPA_ENTER_FLOWCTRL);
spin_unlock_irqrestore(&sender->send_lock, flags);
wake_up(&sender->free_waitq);
return -EAGAIN;
}
}
if (sender->free_notify_net == true){
pr_info("%s: free_notify_net is true, so return EAGAIN\n", __func__);
spin_unlock_irqrestore(&sender->send_lock, flags);
wake_up(&sender->free_waitq);
return -EAGAIN;
}
if (!atomic_read(&sender->left_cnt)) {
sender->no_free_cnt++;
sender->free_notify_net = true;
sipa_inform_evt_to_nics(sender, SIPA_ENTER_FLOWCTRL);
spin_unlock_irqrestore(&sender->send_lock, flags);
wake_up(&sender->free_waitq);
return -EAGAIN;
}
dma_addr = (u64)dma_map_single(ctrl->pci_dev, skb->head,
skb->len + skb_headroom(skb),
DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(ctrl->pci_dev, (dma_addr_t)dma_addr))) {
sender->free_notify_net = true;
sipa_inform_evt_to_nics(sender, SIPA_ENTER_FLOWCTRL);
spin_unlock_irqrestore(&sender->send_lock, flags);
wake_up(&sender->free_waitq);
return -EAGAIN;
}
dma_addr += ctrl->pcie_mem_offset;
#if defined (__BIG_ENDIAN_BITFIELD)
memset(&des, 0, sizeof(des));
des.address = dma_addr;
des.length = skb->len;
des.offset = skb_headroom(skb);
des.net_id = netid;
des.dst = dst;
des.src = sender->ep->send_fifo->cur;
des.err_code = 0;
des.intr = 0;
sipa_set_node_desc((u8 *)ctrl->hal_ops.get_rx_fifo_wr(sender->ep->send_fifo->fifo_id,
ctrl->cmn_fifo_cfg, 0), (u8 *)&des);
#else
des = ctrl->hal_ops.get_rx_fifo_wr(sender->ep->send_fifo->fifo_id,
ctrl->cmn_fifo_cfg, 0);
des->address = dma_addr;
des->length = skb->len;
des->offset = skb_headroom(skb);
des->net_id = netid;
des->dst = dst;
des->src = sender->ep->send_fifo->cur;
des->err_code = 0;
des->intr = 0;
#endif
node = list_first_entry(&sender->pair_free_list,
struct sipa_skb_dma_addr_node,
list);
node->skb = skb;
node->dma_addr = dma_addr;
list_del(&node->list);
list_add_tail(&node->list, &sender->sending_list);
ctrl->hal_ops.set_rx_fifo_wr(ctrl->pci_dev,
sender->ep->send_fifo->fifo_id,
ctrl->cmn_fifo_cfg, 1);
atomic_dec(&sender->left_cnt);
spin_unlock_irqrestore(&sender->send_lock, flags);
return 0;
}
EXPORT_SYMBOL(sipa_skb_sender_send_data);

View File

@ -1,26 +0,0 @@
menu "SIPC modules"
config SPRD_SIPC
bool "Sprd IPC"
default n
select GENERIC_ALLOCATOR
help
SIPC is a module for spreadtrum AP/CP communicaiton system.
config SPRD_SIPC_SPIPE
bool "SPRD pipe driver based on SBUF"
default n
depends on SPRD_SIPC
help
This driver is a pipe driver base on SBUF, which create
general pipes between AP & CP.
config SPRD_SIPC_SPOOL
bool "SPRD pool driver based on SBLOCK"
default n
depends on SPRD_SIPC
help
This driver is a pool driver base on SBLOCK, which create
general pools between AP & CP.
endmenu

View File

@ -1,6 +0,0 @@
ccflags-y += -DCONFIG_SPRD_PCIE_EP_DEVICE -DCONFIG_SPRD_SIPA
obj-y += sipc.o smsg.o smem.o sbuf.o sblock.o sipc_debugfs.o
obj-y += spipe.o
obj-y += spool.o

File diff suppressed because it is too large Load Diff

View File

@ -1,173 +0,0 @@
/*
* Copyright (C) 2019 Spreadtrum Communications Inc.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef __SBLOCK_H
#define __SBLOCK_H
/* flag for CMD/DONE msg type */
#define SMSG_CMD_SBLOCK_INIT 0x1
#define SMSG_DONE_SBLOCK_INIT 0x2
/* flag for EVENT msg type */
#define SMSG_EVENT_SBLOCK_SEND 0x1
#define SMSG_EVENT_SBLOCK_RELEASE 0x2
#define SBLOCK_STATE_IDLE 0
#define SBLOCK_STATE_READY 1
#define SBLOCK_BLK_STATE_DONE 0
#define SBLOCK_BLK_STATE_PENDING 1
struct sblock_blks {
u32 addr; /*phy address*/
u32 length;
};
/* ring block header */
struct sblock_ring_header {
/* get|send-block info */
u32 txblk_addr; /* tx blocks start addr */
u32 txblk_count; /* tx blocks num */
u32 txblk_size; /* one tx block size */
u32 txblk_blks; /* tx_ring or tx_pool start addr */
u32 txblk_rdptr; /* tx_ring or tx_pool read point */
u32 txblk_wrptr; /* tx_ring or tx_pool write point */
/* release|recv-block info */
u32 rxblk_addr;
u32 rxblk_count;
u32 rxblk_size;
u32 rxblk_blks;
u32 rxblk_rdptr;
u32 rxblk_wrptr;
};
struct sblock_header {
struct sblock_ring_header ring;
struct sblock_ring_header pool;
};
struct sblock_ring_header_op {
/*
* this points point to share memory
* for update rdptr and wtptr on share memory
*/
volatile u32 *tx_rd_p;
volatile u32 *tx_wt_p;
volatile u32 *rx_rd_p;
volatile u32 *rx_wt_p;
/*
* this member copy from share memory,
* because this contents will not change on share memory
*/
u32 tx_addr; /* txblk_addr */
u32 tx_count; /* txblk_count */
u32 tx_size; /* txblk_size */
u32 tx_blks; /* txblk_blks */
u32 rx_addr;
u32 rx_count;
u32 rx_size;
u32 rx_blks;
};
struct sblock_header_op {
struct sblock_ring_header_op ringhd_op;
struct sblock_ring_header_op poolhd_op;
};
struct sblock_ring {
struct sblock_header *header;
struct sblock_header_op header_op;
struct sprd_pms *tx_pms;
struct sprd_pms *rx_pms;
char tx_pms_name[20];
char rx_pms_name[20];
void *txblk_virt; /* virt of header->txblk_addr */
void *rxblk_virt; /* virt of header->rxblk_addr */
/* virt of header->ring->txblk_blks */
struct sblock_blks *r_txblks;
/* virt of header->ring->rxblk_blks */
struct sblock_blks *r_rxblks;
/* virt of header->pool->txblk_blks */
struct sblock_blks *p_txblks;
/* virt of header->pool->rxblk_blks */
struct sblock_blks *p_rxblks;
unsigned int poll_mask;
/* protect the poll_mask menber */
spinlock_t poll_lock;
int *txrecord; /* record the state of every txblk */
int *rxrecord; /* record the state of every rxblk */
int yell; /* need to notify cp */
spinlock_t r_txlock; /* send */
spinlock_t r_rxlock; /* recv */
spinlock_t p_txlock; /* get */
spinlock_t p_rxlock; /* release */
wait_queue_head_t getwait;
wait_queue_head_t recvwait;
};
struct sblock_mgr {
u8 dst;
u8 channel;
int pre_cfg; /*support in host mode only */
u32 state;
void *smem_virt;
u32 smem_addr;
u32 smem_addr_debug;
u32 smem_size;
u32 dst_smem_addr;
/*
* this address stored in share memory,
* be used to calculte the block virt address.
* in host mode, it is client physial address(dst_smem_addr),
* in client mode, it is own physial address(smem_addr).
*/
u32 stored_smem_addr;
u32 txblksz;
u32 rxblksz;
u32 txblknum;
u32 rxblknum;
struct sblock_ring *ring;
struct task_struct *thread;
void (*handler)(int event, void *data);
void *data;
};
#ifdef CONFIG_64BIT
#define SBLOCK_ALIGN_BYTES (8)
#else
#define SBLOCK_ALIGN_BYTES (4)
#endif
static inline u32 sblock_get_index(u32 x, u32 y)
{
return (x / y);
}
static inline u32 sblock_get_ringpos(u32 x, u32 y)
{
return is_power_of_2(y) ? (x & (y - 1)) : (x % y);
}
#endif

File diff suppressed because it is too large Load Diff

View File

@ -1,126 +0,0 @@
/*
* Copyright (C) 2019 Spreadtrum Communications Inc.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef __SBUF_H
#define __SBUF_H
/* flag for CMD/DONE msg type */
#define SMSG_CMD_SBUF_INIT 0x0001
#define SMSG_DONE_SBUF_INIT 0x0002
/* flag for EVENT msg type */
#define SMSG_EVENT_SBUF_WRPTR 0x0001
#define SMSG_EVENT_SBUF_RDPTR 0x0002
#if defined(CONFIG_DEBUG_FS)
#define SIPC_DEBUG_SBUF_RDWT_OWNER
#define MAX_RECORD_CNT 0x10
#endif
/* ring buf header */
struct sbuf_ring_header {
/* send-buffer info */
u32 txbuf_addr;
u32 txbuf_size;
u32 txbuf_rdptr;
u32 txbuf_wrptr;
/* recv-buffer info */
u32 rxbuf_addr;
u32 rxbuf_size;
u32 rxbuf_rdptr;
u32 rxbuf_wrptr;
};
struct sbuf_ring_header_op {
/*
* this points point to share memory
* for update rdptr and wtptr on share memory
*/
volatile u32 *rx_rd_p;
volatile u32 *rx_wt_p;
volatile u32 *tx_rd_p;
volatile u32 *tx_wt_p;
/*
* this member copy from share memory,
* because this contents will not change on share memory
*/
u32 rx_size;/* rxbuf_size */
u32 tx_size;/* txbuf_size */
};
/* sbuf_mem is the structure of smem for rings */
struct sbuf_smem_header {
u32 ringnr;
struct sbuf_ring_header headers[0];
};
struct sbuf_ring {
/* tx/rx buffer info */
volatile struct sbuf_ring_header *header;
struct sbuf_ring_header_op header_op;
void *txbuf_virt;
void *rxbuf_virt;
/* send/recv wait queue */
wait_queue_head_t txwait;
wait_queue_head_t rxwait;
#if defined(SIPC_DEBUG_SBUF_RDWT_OWNER)
/* record all task histoy */
struct list_head tx_list;
struct list_head rx_list;
#endif
/* send/recv mutex */
struct mutex txlock;
struct mutex rxlock;
struct sprd_pms *tx_pms;
struct sprd_pms *rx_pms;
char tx_pms_name[20];
char rx_pms_name[20];
bool need_wake_lock;
unsigned int poll_mask;
/* protect poll_mask member */
spinlock_t poll_lock;
void (*handler)(int event, void *data);
void *data;
};
#define SBUF_STATE_IDLE 0
#define SBUF_STATE_READY 1
struct sbuf_mgr {
u8 dst;
u8 channel;
bool force_send;
u32 state;
void *smem_virt;
u32 smem_addr;
u32 smem_size;
u32 smem_addr_debug;
u32 dst_smem_addr;
u32 ringnr;
struct sbuf_ring *rings;
struct task_struct *thread;
};
#endif

View File

@ -1,524 +0,0 @@
/*
* Copyright (C) 2019 Spreadtrum Communications Inc.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/debugfs.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of_address.h>
#include <linux/of_device.h>
#include <linux/of_irq.h>
#include <linux/platform_device.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/wait.h>
#include <linux/sizes.h>
#include <linux/version.h>
#include <linux/dma-mapping.h>
#include <linux/highmem.h>
#include "../include/sipc.h"
#include "sipc_priv.h"
#define MBOX_BAMK "mbox"
#define PCIE_BAMK "pcie"
enum {
NORMAL_MODE = 0,
CHARGE_MODE,
CALI_MODE
};
#define CALI_LATENCY (10000 * 1000)
#define NORMAL_LATENCY (1 * 1000)
/*
* In charge mode, will only boot pm system,
* so just create pm systen sipc.
*/
static u8 g_boot_mode = NORMAL_MODE;
/*
static int __init sipc_early_mode(char *str)
{
if (!memcmp(str, "charger", 7))
g_boot_mode = CHARGE_MODE;
else if (!memcmp(str, "cali", 4))
g_boot_mode = CALI_MODE;
else
g_boot_mode = NORMAL_MODE;
return 0;
}
early_param("androidboot.mode", sipc_early_mode);
*/
#if defined(CONFIG_DEBUG_FS)
void sipc_debug_putline(struct seq_file *m, char c, int n)
{
char buf[300];
int i, max, len;
/* buf will end with '\n' and 0 */
max = ARRAY_SIZE(buf) - 2;
len = (n > max) ? max : n;
for (i = 0; i < len; i++)
buf[i] = c;
buf[i] = '\n';
buf[i + 1] = 0;
seq_puts(m, buf);
}
EXPORT_SYMBOL_GPL(sipc_debug_putline);
#endif
static u32 sipc_rxirq_status(u8 dst)
{
return 0;
}
static void sipc_rxirq_clear(u8 dst)
{
}
static void sipc_txirq_trigger(u8 dst, u64 msg)
{
struct smsg_ipc *ipc;
ipc = smsg_ipcs[dst];
if (ipc) {
#ifdef CONFIG_SPRD_MAILBOX
if (ipc->type == SIPC_BASE_MBOX) {
mbox_raw_sent(ipc->core_id, msg);
return;
}
#endif
if (ipc->type == SIPC_BASE_PCIE) {
#ifdef CONFIG_SPRD_PCIE_EP_DEVICE
sprd_ep_dev_raise_irq(ipc->ep_dev, PCIE_DBELL_SIPC_IRQ);
#endif
#ifdef CONFIG_PCIE_EPF_SPRD
sprd_pci_epf_raise_irq(ipc->ep_fun, PCIE_MSI_SIPC_IRQ);
#endif
return;
}
}
}
#ifdef SPRD_PCIE_USE_DTS
static int sipc_parse_dt(struct smsg_ipc *ipc,
struct device_node *np, struct device *dev)
{
u32 val[3];
int ret;
const char *type;
/* get name */
ret = of_property_read_string(np, "sprd,name", &ipc->name);
if (ret)
return ret;
pr_info("sipc: name=%s\n", ipc->name);
/* get sipc type, optional */
if (of_property_read_string(np, "sprd,type", &type) == 0) {
pr_info("sipc: type=%s\n", type);
if (strcmp(MBOX_BAMK, type) == 0)
ipc->type = SIPC_BASE_MBOX;
else if (strcmp(PCIE_BAMK, type) == 0)
ipc->type = SIPC_BASE_PCIE;
}
/* get sipc client, optional */
if (of_property_read_u32_array(np, "sprd,client", val, 1) == 0) {
ipc->client = (u8)val[0];
pr_info("sipc: client=%d\n", ipc->client);
}
/* get sipc dst */
ret = of_property_read_u32_array(np, "sprd,dst", val, 1);
if (!ret) {
ipc->dst = (u8)val[0];
pr_info("sipc: dst =%d\n", ipc->dst);
}
if (ret || ipc->dst >= SIPC_ID_NR) {
pr_err("sipc: dst err, ret =%d.\n", ret);
return ret;
}
#ifdef CONFIG_SPRD_MAILBOX
if (ipc->type == SIPC_BASE_MBOX) {
/* get core id */
ipc->core_id = (u8)MBOX_INVALID_CORE;
ret = of_property_read_u32_array(np, "sprd,core", val, 1);
if (!ret) {
ipc->core_id = (u8)val[0];
pr_info("sipc: core=%d\n", ipc->core_id);
} else {
pr_err("sipc: core err, ret =%d.\n", ret);
return ret;
}
/* get core sensor id, optional*/
ipc->core_sensor_id = (u8)MBOX_INVALID_CORE;
if (of_property_read_u32_array(np, "sprd,core_sensor",
val, 1) == 0) {
ipc->core_sensor_id = (u8)val[0];
pr_info("sipc: core_sensor=%d\n", ipc->core_sensor_id);
}
}
#endif
#ifdef CONFIG_SPRD_PCIE_EP_DEVICE
if (ipc->type == SIPC_BASE_PCIE) {
#ifdef CONFIG_SPRD_PCIE
struct device_node *pdev_node;
#endif
ret = of_property_read_u32_array(np,
"sprd,ep-dev",
&ipc->ep_dev,
1);
pr_info("sipc: ep_dev=%d\n", ipc->ep_dev);
if (ret || ipc->ep_dev >= PCIE_EP_NR) {
pr_err("sipc: ep_dev err, ret =%d.\n", ret);
return ret;
}
#ifdef CONFIG_SPRD_PCIE
/* get pcie rc ctrl device */
pdev_node = of_parse_phandle(np, "sprd,rc-ctrl", 0);
if (!pdev_node) {
pr_err("sipc: sprd,rc-ctrl err.\n");
return -ENODEV;
}
ipc->pcie_dev = of_find_device_by_node(pdev_node);
of_node_put(pdev_node);
if (!ipc->pcie_dev) {
pr_err("sipc: find pcie_dev err.\n");
return -ENODEV;
}
#endif
}
#endif
#ifdef CONFIG_PCIE_EPF_SPRD
if (ipc->type == SIPC_BASE_PCIE) {
ret = of_property_read_u32_array(np,
"sprd,ep-fun",
&ipc->ep_fun,
1);
pr_info("sipc: ep_fun=%d\n", ipc->ep_fun);
if (ret || ipc->ep_fun >= SPRD_FUNCTION_MAX) {
pr_err("sipc: ep_fun err, ret =%d.\n", ret);
return ret;
}
/* parse doolbell irq */
ret = of_irq_get(np, 0);
if (ret < 0) {
pr_err("sipc: doorbell irq err, ret=%d\n", ret);
return -EINVAL;
}
ipc->irq = ret;
pr_info("sipc: irq=%d\n", ipc->irq);
}
#endif
/* get smem type */
ret = of_property_read_u32_array(np,
"sprd,smem-type",
&val[0],
1);
if (!ret)
ipc->smem_type = (enum smem_type)val[0];
else
ipc->smem_type = SMEM_LOCAL;
pr_info("sipc: smem_type = %d, ret =%d\n", ipc->smem_type, ret);
/* get smem info */
ret = of_property_read_u32_array(np,
"sprd,smem-info",
val,
3);
if (ret) {
pr_err("sipc: parse smem info failed.\n");
return ret;
}
ipc->smem_base = val[0];
ipc->dst_smem_base = val[1];
ipc->smem_size = val[2];
pr_info("sipc: smem_base=0x%x, dst_smem_base=0x%x, smem_size=0x%x\n",
ipc->smem_base, ipc->dst_smem_base, ipc->smem_size);
#ifdef CONFIG_PHYS_ADDR_T_64BIT
/* try to get high_offset */
ret = of_property_read_u32(np,
"sprd,high-offset",
val);
if (!ret) {
ipc->high_offset = val[0];
pr_info("sipc: high_offset=0x%xn", ipc->high_offset);
}
#endif
if (ipc->type == SIPC_BASE_PCIE) {
/* pcie sipc, the host must use loacal SMEM_LOCAL */
if (!ipc->client && ipc->smem_type != SMEM_LOCAL) {
pr_err("sipc: host must use local smem!");
return -EINVAL;
}
if (ipc->client && ipc->smem_type != SMEM_PCIE) {
pr_err("sipc: client must use pcie smem!");
return -EINVAL;
}
}
return 0;
}
#else
static u32 sipc_get_smem_base(size_t size)
{
unsigned long order = get_order(size);
struct page *page, *p, *e;
page = alloc_pages(GFP_KERNEL, order);
if(page == NULL) {
printk("sipc alloc pages fail\n");
return 0;
}
split_page(page, order);
for (p = page +(size >> PAGE_SHIFT), e = page + (1 << order); p < e; p++)
__free_page(p);
if (PageHighMem(page)) {
phys_addr_t base = __pfn_to_phys(page_to_pfn(page));
//phys_addr_t end = base + size;
while (size > 0) {
void *ptr = kmap_atomic(page);
memset(ptr, 0, PAGE_SIZE);
kunmap_atomic(ptr);
page++;
size -= PAGE_SIZE;
}
return base;
} else {
void *ptr = page_address(page);
memset(ptr, 0, size);
return __pa(ptr);
}
}
static int sipc_parse_dt(struct smsg_ipc *ipc,
struct device_node *np, struct device *dev)
{
u32 val[3];
int ret = 0;
//dma_addr_t *dma_handle;
/* get name */
ipc->name = "sprd,sipc";
pr_info("sipc: name=%s\n", ipc->name);
/* get sipc type, optional */
ipc->type = SIPC_BASE_PCIE;
pr_info("sipc: type=%d\n", ipc->type);
/* get sipc client, optional */
/* get sipc dst */
ipc->dst = 1;
pr_info("sipc: dst =%d\n", ipc->dst);
if (ipc->dst >= SIPC_ID_NR) {
pr_err("sipc: dst err\n");
return ret;
}
#ifdef CONFIG_SPRD_PCIE_EP_DEVICE
if (ipc->type == SIPC_BASE_PCIE) {
ipc->ep_dev = 0;
pr_info("sipc: ep_dev=%d\n", ipc->ep_dev);
if (ipc->ep_dev >= PCIE_EP_NR) {
pr_err("sipc: ep_dev err\n");
return -1;
}
}
#endif
/* get smem type */
ipc->smem_type = SMEM_LOCAL;
pr_info("sipc: smem_type = %d\n", ipc->smem_type);
/* get smem info */
val[0] = sipc_get_smem_base(0x0300000);
val[1] = val[0];
val[2] = 0x0300000;
ipc->smem_base = val[0];
ipc->dst_smem_base = val[1];
ipc->smem_size = val[2];
pr_info("sipc: smem_base=0x%x, dst_smem_base=0x%x, smem_size=0x%x\n",
ipc->smem_base, ipc->dst_smem_base, ipc->smem_size);
#ifdef CONFIG_PHYS_ADDR_T_64BIT
/* try to get high_offset */
ipc->high_offset = 0;
pr_info("sipc: high_offset=0x%xn", ipc->high_offset);
#endif
if (ipc->type == SIPC_BASE_PCIE) {
/* pcie sipc, the host must use loacal SMEM_LOCAL */
if (!ipc->client && ipc->smem_type != SMEM_LOCAL) {
pr_err("sipc: host must use local smem!");
return -EINVAL;
}
if (ipc->client && ipc->smem_type != SMEM_PCIE) {
pr_err("sipc: client must use pcie smem!");
return -EINVAL;
}
}
return 0;
}
#endif
static int sipc_probe(struct platform_device *pdev)
{
struct smsg_ipc *ipc;
struct device_node *np;
if (1) {
np = pdev->dev.of_node;
ipc = devm_kzalloc(&pdev->dev,
sizeof(struct smsg_ipc),
GFP_KERNEL);
if (!ipc)
return -ENOMEM;
if (sipc_parse_dt(ipc, np, &pdev->dev)) {
pr_err("%s: failed to parse dt!\n", __func__);
return -ENODEV;
}
/*
* In charge mode, will only boot pm system,
* so just create pm systen sipc.
*/
if (g_boot_mode == CHARGE_MODE && ipc->dst != SIPC_ID_PM_SYS)
return -ENODEV;
ipc->rxirq_status = sipc_rxirq_status;
ipc->rxirq_clear = sipc_rxirq_clear;
ipc->txirq_trigger = sipc_txirq_trigger;
spin_lock_init(&ipc->txpinlock);
if (ipc->type == SIPC_BASE_PCIE) {
/* init mpm delay enter idle time for pcie. */
if (g_boot_mode == CALI_MODE)
ipc->latency = CALI_LATENCY;
else
ipc->latency = NORMAL_LATENCY;
}
smsg_ipc_create(ipc);
platform_set_drvdata(pdev, ipc);
}
return 0;
}
static int sipc_remove(struct platform_device *pdev)
{
struct smsg_ipc *ipc = platform_get_drvdata(pdev);
smsg_ipc_destroy(ipc);
devm_kfree(&pdev->dev, ipc);
return 0;
}
#ifdef SPRD_PCIE_USE_DTS
static const struct of_device_id sipc_match_table[] = {
{ .compatible = "sprd,sipc", },
{ },
};
#endif
static struct platform_driver sipc_driver = {
.driver = {
.owner = THIS_MODULE,
.name = "sipc",
#ifdef SPRD_PCIE_USE_DTS
.of_match_table = sipc_match_table,
#endif
},
.probe = sipc_probe,
.remove = sipc_remove,
};
#ifndef SPRD_PCIE_USE_DTS
static void sipc_platform_device_release(struct device *dev) {}
static struct platform_device sipc_device = {
.name = "sipc",
.id = -1,
.dev = {
.release = sipc_platform_device_release,
}
};
#endif
int sipc_init(void)
{
int ret;
smsg_init_channel2index();
#ifndef SPRD_PCIE_USE_DTS
if((ret = platform_device_register(&sipc_device)))
return ret;
#endif
if((ret = platform_driver_register(&sipc_driver))) {
#ifndef SPRD_PCIE_USE_DTS
platform_device_unregister(&sipc_device);
#endif
return ret;
}
return ret;
}
EXPORT_SYMBOL_GPL(sipc_init);
void sipc_exit(void)
{
platform_driver_unregister(&sipc_driver);
printk("dayin is here0\n");
#ifndef SPRD_PCIE_USE_DTS
platform_device_unregister(&sipc_device);
#endif
}
EXPORT_SYMBOL_GPL(sipc_exit);

View File

@ -1,51 +0,0 @@
/*
* Copyright (C) 2019 Spreadtrum Communications Inc.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/debugfs.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/seq_file.h>
#include <linux/uaccess.h>
#include "../include/sipc.h"
#if defined(CONFIG_DEBUG_FS)
#include "sipc_debugfs.h"
int sipc_init_debugfs(void)
{
struct dentry *root = debugfs_create_dir("sipc", NULL);
if (!root)
return -ENXIO;
smsg_init_debugfs(root);
#if defined(CONFIG_SPRD_SIPC_SMSGC)
smsgc_init_debugfs(root);
#endif
sbuf_init_debugfs(root);
sblock_init_debugfs(root);
#ifdef CONFIG_SPRD_SIPC_ZERO_COPY_SIPX
sipx_init_debugfs(root);
#endif
#ifdef CONFIG_SPRD_SIPC_SWCNBLK
swcnblk_init_debugfs(root);
#endif
smem_init_debugfs(root);
#ifdef CONFIG_SPRD_MAILBOX
mbox_init_debugfs(root);
#endif
return 0;
}
EXPORT_SYMBOL_GPL(sipc_init_debugfs);
#endif /* CONFIG_DEBUG_FS */

View File

@ -1,37 +0,0 @@
/*
* Copyright (C) 2019 Spreadtrum Communications Inc.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef __SIPC_DEBUGFS_H
#define __SIPC_DEBUGFS_H
int sbuf_init_debugfs(void *root);
int smsg_init_debugfs(void *root);
int sblock_init_debugfs(void *root);
int smem_init_debugfs(void *root);
#ifdef CONFIG_SPRD_SIPC_ZERO_COPY_SIPX
int sipx_init_debugfs(void *root);
#endif
#ifdef CONFIG_SPRD_SIPC_SWCNBLK
int swcnblk_init_debugfs(void *root);
#endif
#if defined(CONFIG_SPRD_SIPC_SMSGC)
int smsgc_init_debugfs(void *root);
#endif
#ifdef CONFIG_SPRD_MAILBOX
int mbox_init_debugfs(void *root);
#endif
#endif /* !__SIPC_DEBUGFS_H */

View File

@ -1,188 +0,0 @@
/*
* Copyright (C) 2019 Spreadtrum Communications Inc.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef __SIPC_PRIV_H
#define __SIPC_PRIV_H
#include <linux/ktime.h>
#include <linux/device.h>
#include <linux/interrupt.h>
#include "../include/sprd_mpm.h"
#ifdef CONFIG_SPRD_MAILBOX
#include <linux/sprd_mailbox.h>
#endif
#ifdef CONFIG_SPRD_PCIE_EP_DEVICE
#include "../include/sprd_pcie_ep_device.h"
#endif
#ifdef CONFIG_PCIE_EPF_SPRD
#include <linux/pcie-epf-sprd.h>
#endif
#ifdef CONFIG_PCI
#include "../include/sprd_pcie_resource.h"
#endif
#include "../include/sipc_big_to_little.h"
#ifndef SZ_1K
#define SZ_1K 0x00000400
#define SZ_4K 0x00001000
#endif
enum {
SIPC_BASE_MBOX = 0,
SIPC_BASE_PCIE,
SIPC_BASE_IPI,
SIPC_BASE_NR
};
enum smem_type {
SMEM_LOCAL = 0,
SMEM_PCIE
};
extern struct smsg_ipc *smsg_ipcs[];
#define SMSG_CACHE_NR 256
struct smsg_channel {
/* wait queue for recv-buffer */
wait_queue_head_t rxwait;
struct mutex rxlock;
struct sprd_pms *tx_pms;
struct sprd_pms *rx_pms;
char tx_name[16];
char rx_name[16];
/* cached msgs for recv */
uintptr_t wrptr[1];
uintptr_t rdptr[1];
struct smsg caches[SMSG_CACHE_NR];
};
/* smsg ring-buffer between AP/CP ipc */
struct smsg_ipc {
const char *name;
struct sprd_pms *sipc_pms;
u8 dst;
u8 client; /* sipc is client mode */
/* target core_id over mailbox */
u8 core_id;
u8 core_sensor_id;
u32 type; /* sipc type, mbox, ipi, pcie */
void __iomem *write_addr;
#ifdef CONFIG_SPRD_PCIE_EP_DEVICE
u32 ep_dev;
struct platform_device *pcie_dev;
#endif
#ifdef CONFIG_PCIE_EPF_SPRD
u32 ep_fun;
#endif
u32 latency;
/* send-buffer info */
uintptr_t txbuf_addr;
u32 txbuf_size; /* must be 2^n */
uintptr_t txbuf_rdptr;
uintptr_t txbuf_wrptr;
/* recv-buffer info */
uintptr_t rxbuf_addr;
u32 rxbuf_size; /* must be 2^n */
uintptr_t rxbuf_rdptr;
uintptr_t rxbuf_wrptr;
/* sipc irq related */
int irq;
u32 (*rxirq_status)(u8 id);
void (*rxirq_clear)(u8 id);
void (*txirq_trigger)(u8 id, u64 msg);
u32 ring_base;
u32 ring_size;
void *smem_vbase;
u32 smem_base;
u32 smem_size;
enum smem_type smem_type;
u32 dst_smem_base;
#ifdef CONFIG_PHYS_ADDR_T_64BIT
u32 high_offset;
#endif
/* lock for send-buffer */
spinlock_t txpinlock;
/* all fixed channels receivers */
struct smsg_channel *channels[SMSG_VALID_CH_NR];
/* record the runtime status of smsg channel */
atomic_t busy[SMSG_VALID_CH_NR];
/* all channel states: 0 unused, 1 be opened by other core, 2 opend */
u8 states[SMSG_VALID_CH_NR];
};
#define CHAN_STATE_UNUSED 0
#define CHAN_STATE_CLIENT_OPENED 1
#define CHAN_STATE_HOST_OPENED 2
#define CHAN_STATE_OPENED 3
#define CHAN_STATE_FREE 4
void smsg_init_channel2index(void);
void smsg_ipc_create(struct smsg_ipc *ipc);
void smsg_ipc_destroy(struct smsg_ipc *ipc);
/*smem alloc size align*/
#define SMEM_ALIGN_POOLSZ 0x40000 /*256KB*/
#ifdef CONFIG_64BIT
#define SMEM_ALIGN_BYTES 8
#define SMEM_MIN_ORDER 3
#else
#define SMEM_ALIGN_BYTES 4
#define SMEM_MIN_ORDER 2
#endif
/* initialize smem pool for AP/CP */
int smem_init(u32 addr, u32 size, u32 dst, u32 mem_type);
void sbuf_get_status(u8 dst, char *status_info, int size);
#if defined(CONFIG_DEBUG_FS)
void sipc_debug_putline(struct seq_file *m, char c, int n);
#endif
#ifdef CONFIG_SPRD_MAILBOX
#define MBOX_INVALID_CORE 0xff
#endif
/* sipc_smem_request_resource
* local smem no need request resource, just return 0.
*/
static inline int sipc_smem_request_resource(struct sprd_pms *pms,
u8 dst, int timeout)
{
if (smsg_ipcs[dst]->smem_type == SMEM_LOCAL)
return 0;
return sprd_pms_request_resource(pms, timeout);
}
/* sipc_smem_release_resource
* local smem no need release resource, do nothing.
*/
static inline void sipc_smem_release_resource(struct sprd_pms *pms, u8 dst)
{
if (smsg_ipcs[dst]->smem_type != SMEM_LOCAL)
sprd_pms_release_resource(pms);
}
#endif

View File

@ -1,559 +0,0 @@
/*
* Copyright (C) 2019 Spreadtrum Communications Inc.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/debugfs.h>
#include <linux/genalloc.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/vmalloc.h>
#include <linux/sched.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/version.h>
#include "../include/sipc.h"
#include "sipc_priv.h"
#define CONFIG_SPRD_IPA_PCIE_WORKROUND
/*
* workround: Due to orca ipa hardware limitations
* the sipc share memory must map from
* 0x2x0000000(orca side) to 0xx0000000(roc1
* side), and the size must be 256M
*/
#ifdef CONFIG_SPRD_IPA_PCIE_WORKROUND
#define IPA_GET_SRC_BASE(addr) (((addr) & 0xf0000000) + 0x200000000)
#define IPA_GET_DST_BASE(addr) ((addr) & 0xf0000000)
#define IPA_SIZE 0x10000000
#endif
struct smem_phead {
struct list_head smem_phead;
spinlock_t lock;
u32 poolnum;
};
struct smem_pool {
struct list_head smem_head;
struct list_head smem_plist;
spinlock_t lock;
void *pcie_base;
u32 addr;
u32 size;
u32 dst;
u32 mem_type;
atomic_t used;
struct gen_pool *gen;
};
struct smem_record {
struct list_head smem_list;
struct task_struct *task;
u32 size;
u32 addr;
};
struct smem_map_list {
struct list_head map_head;
spinlock_t lock;
u32 inited;
};
struct smem_map {
struct list_head map_list;
struct task_struct *task;
const void *mem;
unsigned int count;
};
static struct smem_phead sipc_smem_phead;
static struct smem_map_list mem_mp;
static struct smem_pool *shmem_find_pool(u8 dst)
{
struct smem_phead *phead = &sipc_smem_phead;
struct smem_pool *spool = NULL;
struct smem_pool *pos;
unsigned long flags;
/* The num of one pool is 0, means the poll is not ready */
if (!phead->poolnum)
return NULL;
spin_lock_irqsave(&phead->lock, flags);
list_for_each_entry(pos, &phead->smem_phead, smem_plist) {
if (pos->dst == dst) {
spool = pos;
break;
}
}
spin_unlock_irqrestore(&phead->lock, flags);
return spool;
}
static void *soc_modem_ram_vmap(phys_addr_t start, size_t size, int noncached)
{
struct page **pages;
phys_addr_t page_start;
unsigned int page_count;
pgprot_t prot;
unsigned int i;
void *vaddr;
phys_addr_t addr;
unsigned long flags;
struct smem_map *map;
struct smem_map_list *smem = &mem_mp;
map = kzalloc(sizeof(struct smem_map), GFP_KERNEL);
if (!map)
return NULL;
page_start = start - offset_in_page(start);
page_count = DIV_ROUND_UP(size + offset_in_page(start), PAGE_SIZE);
if (noncached)
prot = pgprot_noncached(PAGE_KERNEL);
else
prot = PAGE_KERNEL;
pages = kmalloc_array(page_count, sizeof(struct page *), GFP_KERNEL);
if (!pages) {
kfree(map);
return NULL;
}
for (i = 0; i < page_count; i++) {
addr = page_start + i * PAGE_SIZE;
pages[i] = pfn_to_page(addr >> PAGE_SHIFT);
}
#if (LINUX_VERSION_CODE < KERNEL_VERSION( 5,10,0 ))
vaddr = vm_map_ram(pages, page_count, -1, prot);
#else
vaddr = vmap(pages, page_count, -1, prot);
//vaddr = vm_map_ram(pages, page_count, -1);
#endif
kfree(pages);
if (!vaddr) {
pr_err("smem: vm map failed.\n");
kfree(map);
return NULL;
}
vaddr += offset_in_page(start);
map->count = page_count;
map->mem = vaddr;
map->task = current;
if (smem->inited) {
spin_lock_irqsave(&smem->lock, flags);
list_add_tail(&map->map_list, &smem->map_head);
spin_unlock_irqrestore(&smem->lock, flags);
}
return vaddr;
}
static void *pcie_modem_ram_vmap(phys_addr_t start, size_t size, int noncached)
{
if (noncached == 0) {
pr_err("%s: cache not support!\n", __func__);
return NULL;
}
#ifdef CONFIG_SPRD_PCIE_EP_DEVICE
return sprd_ep_map_memory(PCIE_EP_MODEM, start, size);
#endif
#ifdef CONFIG_PCIE_EPF_SPRD
return sprd_pci_epf_map_memory(SPRD_FUNCTION_0, start, size);
#endif
return NULL;
}
static void pcie_modem_ram_unmap(const void *mem)
{
#ifdef CONFIG_SPRD_PCIE_EP_DEVICE
return sprd_ep_unmap_memory(PCIE_EP_MODEM, mem);
#endif
#ifdef CONFIG_PCIE_EPF_SPRD
return sprd_pci_epf_unmap_memory(SPRD_FUNCTION_0, mem);
#endif
}
static void soc_modem_ram_unmap(const void *mem)
{
struct smem_map *map, *next;
unsigned long flags;
struct smem_map_list *smem = &mem_mp;
bool found = false;
if (smem->inited) {
spin_lock_irqsave(&smem->lock, flags);
list_for_each_entry_safe(map, next, &smem->map_head, map_list) {
if (map->mem == mem) {
list_del(&map->map_list);
found = true;
break;
}
}
spin_unlock_irqrestore(&smem->lock, flags);
if (found) {
vm_unmap_ram(mem - offset_in_page(mem), map->count);
kfree(map);
}
}
}
static void *shmem_ram_vmap(u8 dst, phys_addr_t start,
size_t size,
int noncached)
{
struct smem_pool *spool;
spool = shmem_find_pool(dst);
if (spool == NULL) {
pr_err("%s: pool dst %d is not existed!\n", __func__, dst);
return NULL;
}
if (spool->mem_type == SMEM_PCIE) {
if (start < spool->addr
|| start + size > spool->addr + spool->size) {
pr_info("%s: error, start = 0x%lx, size = 0x%lx.\n",
__func__,
(unsigned long)start,
(unsigned long)size);
return NULL;
}
pr_info("%s: succ, start = 0x%lx, size = 0x%lx.\n",
__func__, (unsigned long)start, (unsigned long)size);
return (spool->pcie_base + start - spool->addr);
}
return soc_modem_ram_vmap(start, size, noncached);
}
int smem_init(u32 addr, u32 size, u32 dst, u32 mem_type)
{
struct smem_phead *phead = &sipc_smem_phead;
struct smem_map_list *smem = &mem_mp;
struct smem_pool *spool;
unsigned long flags;
/* fisrt init, create the pool head */
if (!phead->poolnum) {
spin_lock_init(&phead->lock);
INIT_LIST_HEAD(&phead->smem_phead);
}
if (shmem_find_pool(dst))
return 0;
spool = kzalloc(sizeof(struct smem_pool), GFP_KERNEL);
if (!spool)
return -1;
spin_lock_irqsave(&phead->lock, flags);
list_add_tail(&spool->smem_plist, &phead->smem_phead);
phead->poolnum++;
spin_unlock_irqrestore(&phead->lock, flags);
spool->addr = addr;
spool->dst = dst;
spool->mem_type = mem_type;
if (size >= SMEM_ALIGN_POOLSZ)
size = PAGE_ALIGN(size);
else
size = ALIGN(size, SMEM_ALIGN_BYTES);
spool->size = size;
atomic_set(&spool->used, 0);
spin_lock_init(&spool->lock);
INIT_LIST_HEAD(&spool->smem_head);
spin_lock_init(&smem->lock);
INIT_LIST_HEAD(&smem->map_head);
smem->inited = 1;
/* allocator block size is times of pages */
if (spool->size >= SMEM_ALIGN_POOLSZ)
spool->gen = gen_pool_create(PAGE_SHIFT, -1);
else
spool->gen = gen_pool_create(SMEM_MIN_ORDER, -1);
if (!spool->gen) {
pr_err("Failed to create smem gen pool!\n");
return -1;
}
if (gen_pool_add(spool->gen, spool->addr, spool->size, -1) != 0) {
pr_err("Failed to add smem gen pool!\n");
return -1;
}
pr_info("%s: pool addr = 0x%x, size = 0x%x added.\n",
__func__, spool->addr, spool->size);
if (mem_type == SMEM_PCIE) {
#ifdef CONFIG_SPRD_IPA_PCIE_WORKROUND
#ifdef CONFIG_PCIE_EPF_SPRD
spool->pcie_base = sprd_epf_ipa_map(IPA_GET_SRC_BASE(addr),
IPA_GET_DST_BASE(addr),
IPA_SIZE);
if (!spool->pcie_base)
return -ENOMEM;
spool->pcie_base += (addr - IPA_GET_DST_BASE(addr));
#else
pr_err("Failed to pcie map, can't run here!\n");
return -ENOMEM;
#endif
#else
spool->pcie_base = pcie_modem_ram_vmap(addr, size, 1);
#endif
}
return 0;
}
/* ****************************************************************** */
int smem_get_area(u8 dst, u32 *base, u32 *size)
{
struct smem_pool *spool;
if (!base || !size)
return -EINVAL;
spool = shmem_find_pool(dst);
if (!spool) {
pr_err("%s: err, dst = %d!\n", __func__, dst);
return -EINVAL;
}
pr_info("%s: addr = 0x%x, size = 0x%x.\n",
__func__, spool->addr, spool->size);
*base = spool->addr;
*size = spool->size;
return 0;
}
EXPORT_SYMBOL_GPL(smem_get_area);
u32 smem_alloc(u8 dst, u32 size)
{
struct smem_pool *spool;
struct smem_record *recd;
unsigned long flags;
u32 addr = 0;
spool = shmem_find_pool(dst);
if (spool == NULL) {
pr_err("%s: pool dst %d is not existed!\n", __func__, dst);
return 0;
}
recd = kzalloc(sizeof(struct smem_record), GFP_KERNEL);
if (!recd)
return 0;
if (spool->size >= SMEM_ALIGN_POOLSZ)
size = PAGE_ALIGN(size);
else
size = ALIGN(size, SMEM_ALIGN_BYTES);
addr = gen_pool_alloc(spool->gen, size);
if (!addr) {
pr_err("%s:pool dst=%d, size=0x%x failed to alloc smem!\n",
__func__, dst, size);
kfree(recd);
return 0;
}
/* record smem alloc info */
atomic_add(size, &spool->used);
recd->size = size;
recd->task = current;
recd->addr = addr;
spin_lock_irqsave(&spool->lock, flags);
list_add_tail(&recd->smem_list, &spool->smem_head);
spin_unlock_irqrestore(&spool->lock, flags);
return addr;
}
EXPORT_SYMBOL_GPL(smem_alloc);
void smem_free(u8 dst, u32 addr, u32 size)
{
struct smem_pool *spool;
struct smem_record *recd, *next;
unsigned long flags;
spool = shmem_find_pool(dst);
if (spool == NULL) {
pr_err("%s: pool dst %d is not existed!\n", __func__, dst);
return;
}
if (size >= SMEM_ALIGN_POOLSZ)
size = PAGE_ALIGN(size);
else
size = ALIGN(size, SMEM_ALIGN_BYTES);
atomic_sub(size, &spool->used);
gen_pool_free(spool->gen, addr, size);
/* delete record node from list */
spin_lock_irqsave(&spool->lock, flags);
list_for_each_entry_safe(recd, next, &spool->smem_head, smem_list) {
if (recd->addr == addr) {
list_del(&recd->smem_list);
kfree(recd);
break;
}
}
spin_unlock_irqrestore(&spool->lock, flags);
}
EXPORT_SYMBOL_GPL(smem_free);
void *shmem_ram_vmap_nocache(u8 dst, phys_addr_t start, size_t size)
{
return shmem_ram_vmap(dst, start, size, 1);
}
EXPORT_SYMBOL_GPL(shmem_ram_vmap_nocache);
void *shmem_ram_vmap_cache(u8 dst, phys_addr_t start, size_t size)
{
return shmem_ram_vmap(dst, start, size, 0);
}
EXPORT_SYMBOL_GPL(shmem_ram_vmap_cache);
void shmem_ram_unmap(u8 dst, const void *mem)
{
struct smem_pool *spool;
spool = shmem_find_pool(dst);
if (spool == NULL) {
pr_err("%s: pool dst %d is not existed!\n", __func__, dst);
return;
}
if (spool->mem_type == SMEM_PCIE)
/* do nothing, because it also do nothing in shmem_ram_vmap */
return;
else
return soc_modem_ram_unmap(mem);
}
EXPORT_SYMBOL_GPL(shmem_ram_unmap);
void *modem_ram_vmap_nocache(u32 modem_type, phys_addr_t start, size_t size)
{
if (modem_type == PCIE_MODEM)
return pcie_modem_ram_vmap(start, size, 1);
else
return soc_modem_ram_vmap(start, size, 1);
}
EXPORT_SYMBOL_GPL(modem_ram_vmap_nocache);
void *modem_ram_vmap_cache(u32 modem_type, phys_addr_t start, size_t size)
{
if (modem_type == PCIE_MODEM)
return pcie_modem_ram_vmap(start, size, 0);
else
return soc_modem_ram_vmap(start, size, 0);
}
EXPORT_SYMBOL_GPL(modem_ram_vmap_cache);
void modem_ram_unmap(u32 modem_type, const void *mem)
{
if (modem_type == PCIE_MODEM)
return pcie_modem_ram_unmap(mem);
else
return soc_modem_ram_unmap(mem);
}
EXPORT_SYMBOL_GPL(modem_ram_unmap);
#ifdef CONFIG_DEBUG_FS
static int smem_debug_show(struct seq_file *m, void *private)
{
struct smem_phead *phead = &sipc_smem_phead;
struct smem_pool *spool, *pos;
struct smem_record *recd;
u32 fsize;
unsigned long flags;
u32 cnt = 1;
spin_lock_irqsave(&phead->lock, flags);
list_for_each_entry(pos, &phead->smem_phead, smem_plist) {
spool = pos;
fsize = gen_pool_avail(spool->gen);
sipc_debug_putline(m, '*', 80);
seq_printf(m, "%d, dst:%d, name: %s, smem pool info:\n",
cnt++, spool->dst,
(smsg_ipcs[spool->dst])->name);
seq_printf(m, "phys_addr=0x%x, total=0x%x, used=0x%x, free=0x%x\n",
spool->addr, spool->size, spool->used.counter, fsize);
seq_puts(m, "smem record list:\n");
list_for_each_entry(recd, &spool->smem_head, smem_list) {
seq_printf(m, "task %s: pid=%u, addr=0x%x, size=0x%x\n",
recd->task->comm,
recd->task->pid,
recd->addr,
recd->size);
}
}
spin_unlock_irqrestore(&phead->lock, flags);
return 0;
}
static int smem_debug_open(struct inode *inode, struct file *file)
{
return single_open(file, smem_debug_show, inode->i_private);
}
static const struct file_operations smem_debug_fops = {
.open = smem_debug_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
int smem_init_debugfs(void *root)
{
if (!root)
return -ENXIO;
debugfs_create_file("smem", 0444,
(struct dentry *)root, NULL,
&smem_debug_fops);
return 0;
}
EXPORT_SYMBOL_GPL(smem_init_debugfs);
#endif /* endof CONFIG_DEBUG_FS */
MODULE_AUTHOR("Chen Gaopeng");
MODULE_DESCRIPTION("SIPC/SMEM driver");
MODULE_LICENSE("GPL v2");

File diff suppressed because it is too large Load Diff

View File

@ -1,480 +0,0 @@
/*
* Copyright (C) 2018 Spreadtrum Communications Inc.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/cdev.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/poll.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include "../include/sipc.h"
#include "sipc_priv.h"
#include "spipe.h"
#define SPIPE_NR_BASE_NUM MKDEV(254, 0)
#define SDIAG_NR_BASE_NUM MKDEV(253, 0)
#define STTY_NR_BASE_NUM MKDEV(252, 0)
#define SNV_NR_BASE_NUM MKDEV(251, 0)
struct spipe_device {
struct spipe_init_data *init;
int major;
int minor;
struct cdev cdev;
};
struct spipe_sbuf {
u8 dst;
u8 channel;
u32 bufid;
};
static struct class *spipe_class;
static int spipe_open(struct inode *inode, struct file *filp)
{
int minor = iminor(filp->f_path.dentry->d_inode);
struct spipe_device *spipe;
struct spipe_sbuf *sbuf;
spipe = container_of(inode->i_cdev, struct spipe_device, cdev);
if (sbuf_status(spipe->init->dst, spipe->init->channel) != 0) {
printk("spipe %d-%d not ready to open!\n",
spipe->init->dst, spipe->init->channel);
filp->private_data = NULL;
return -ENODEV;
}
sbuf = kmalloc(sizeof(struct spipe_sbuf), GFP_KERNEL);
if (!sbuf)
return -ENOMEM;
filp->private_data = sbuf;
sbuf->dst = spipe->init->dst;
sbuf->channel = spipe->init->channel;
sbuf->bufid = minor - spipe->minor;
return 0;
}
static int spipe_release(struct inode *inode, struct file *filp)
{
struct spipe_sbuf *sbuf = filp->private_data;
kfree(sbuf);
return 0;
}
static ssize_t spipe_read(struct file *filp,
char __user *buf, size_t count, loff_t *ppos)
{
struct spipe_sbuf *sbuf = filp->private_data;
int timeout = -1;
if (filp->f_flags & O_NONBLOCK)
timeout = 0;
return sbuf_read(sbuf->dst, sbuf->channel, sbuf->bufid,
(void *)buf, count, timeout);
}
static ssize_t spipe_write(struct file *filp,
const char __user *buf, size_t count, loff_t *ppos)
{
struct spipe_sbuf *sbuf = filp->private_data;
int timeout = -1;
if (filp->f_flags & O_NONBLOCK)
timeout = 0;
return sbuf_write(sbuf->dst, sbuf->channel, sbuf->bufid,
(void *)buf, count, timeout);
}
static unsigned int spipe_poll(struct file *filp, poll_table *wait)
{
struct spipe_sbuf *sbuf = filp->private_data;
return sbuf_poll_wait(sbuf->dst, sbuf->channel, sbuf->bufid,
filp, wait);
}
static long spipe_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
return 0;
}
static const struct file_operations spipe_fops = {
.open = spipe_open,
.release = spipe_release,
.read = spipe_read,
.write = spipe_write,
.poll = spipe_poll,
.unlocked_ioctl = spipe_ioctl,
.owner = THIS_MODULE,
.llseek = default_llseek,
};
#ifdef SPRD_PCIE_USE_DTS
static int spipe_parse_dt(struct spipe_init_data **init,
struct device_node *np, struct device *dev, dev_t *devid)
{
struct spipe_init_data *pdata = NULL;
int ret;
u32 data;
pdata = devm_kzalloc(dev, sizeof(struct spipe_init_data), GFP_KERNEL);
if (!pdata)
return -ENOMEM;
ret = of_property_read_string(np,
"sprd,name",
(const char **)&pdata->name);
if (ret)
goto error;
if (!strcmp(pdata->name, "spipe_nr"))
*devid = SPIPE_NR_BASE_NUM;
else if (!strcmp(pdata->name, "sdiag_nr"))
*devid = SDIAG_NR_BASE_NUM;
else if (!strcmp(pdata->name, "stty_nr"))
*devid = STTY_NR_BASE_NUM;
else if (!strcmp(pdata->name, "snv_nr"))
*devid = SNV_NR_BASE_NUM;
ret = of_property_read_u32(np, "sprd,dst", (u32 *)&data);
if (ret)
goto error;
pdata->dst = (u8)data;
ret = of_property_read_u32(np, "sprd,channel", (u32 *)&data);
if (ret)
goto error;
pdata->channel = (u8)data;
ret = of_property_read_u32(np,
"sprd,ringnr",
(u32 *)&pdata->ringnr);
if (ret)
goto error;
ret = of_property_read_u32(np,
"sprd,size-rxbuf",
(u32 *)&pdata->rxbuf_size);
if (ret)
goto error;
ret = of_property_read_u32(np,
"sprd,size-txbuf",
(u32 *)&pdata->txbuf_size);
if (ret)
goto error;
*init = pdata;
return ret;
error:
devm_kfree(dev, pdata);
*init = NULL;
return ret;
}
#else
static int spipe_parse_dt(struct spipe_init_data **init,
struct device_node *np, struct device *dev, dev_t *devid)
{
struct spipe_init_data *pdata = *init;
if (!strcmp(pdata->name, "spipe_nr"))
*devid = SPIPE_NR_BASE_NUM;
else if (!strcmp(pdata->name, "sdiag_nr"))
*devid = SDIAG_NR_BASE_NUM;
else if (!strcmp(pdata->name, "stty_nr"))
*devid = STTY_NR_BASE_NUM;
else if (!strcmp(pdata->name, "snv_nr"))
*devid = SNV_NR_BASE_NUM;
return 0;
}
#endif
static inline void spipe_destroy_pdata(struct spipe_init_data **init,
struct device *dev)
{
*init = NULL;
}
static int spipe_probe(struct platform_device *pdev)
{
struct spipe_init_data *init = pdev->dev.platform_data;
struct spipe_device *spipe;
dev_t devid;
int i, rval;
struct device_node *np;
printk("%s!\n", __func__);
if (1) {
np = pdev->dev.of_node;
rval = spipe_parse_dt(&init, np, &pdev->dev, &devid);
if (rval) {
pr_err("Failed to parse spipe device tree, ret=%d\n", rval);
return rval;
}
printk("spipe: after parse device tree, name=%s, dst=%u, channel=%u, ringnr=%u, rxbuf_size=0x%x, txbuf_size=0x%x\n",
init->name,
init->dst,
init->channel,
init->ringnr,
init->rxbuf_size,
init->txbuf_size);
rval = sbuf_create(init->dst, init->channel, init->ringnr,
init->txbuf_size, init->rxbuf_size);
if (rval != 0) {
printk("Failed to create sbuf: %d\n", rval);
spipe_destroy_pdata(&init, &pdev->dev);
return rval;
}
spipe = devm_kzalloc(&pdev->dev,
sizeof(struct spipe_device),
GFP_KERNEL);
if (spipe == NULL) {
sbuf_destroy(init->dst, init->channel);
spipe_destroy_pdata(&init, &pdev->dev);
printk("Failed to allocate spipe_device\n");
return -ENOMEM;
}
rval = alloc_chrdev_region(&devid, 0, init->ringnr, init->name);
//rval = register_chrdev_region(devid, init->ringnr, init->name);
if (rval != 0) {
sbuf_destroy(init->dst, init->channel);
devm_kfree(&pdev->dev, spipe);
spipe_destroy_pdata(&init, &pdev->dev);
printk("Failed to alloc spipe chrdev\n");
return rval;
}
cdev_init(&(spipe->cdev), &spipe_fops);
rval = cdev_add(&(spipe->cdev), devid, init->ringnr);
if (rval != 0) {
sbuf_destroy(init->dst, init->channel);
devm_kfree(&pdev->dev, spipe);
unregister_chrdev_region(devid, init->ringnr);
spipe_destroy_pdata(&init, &pdev->dev);
printk("Failed to add spipe cdev\n");
return rval;
}
spipe->major = MAJOR(devid);
spipe->minor = MINOR(devid);
if (init->ringnr > 1) {
for (i = 0; i < init->ringnr; i++) {
device_create(spipe_class, NULL,
MKDEV(spipe->major, spipe->minor + i),
NULL, "%s%d", init->name, i);
}
} else {
device_create(spipe_class, NULL,
MKDEV(spipe->major, spipe->minor),
NULL, "%s", init->name);
}
spipe->init = init;
platform_set_drvdata(pdev, spipe);
}
return 0;
}
static int spipe_remove(struct platform_device *pdev)
{
struct spipe_device *spipe = platform_get_drvdata(pdev);
int i;
if (spipe) {
for (i = 0; i < spipe->init->ringnr; i++) {
device_destroy(spipe_class, MKDEV(spipe->major, spipe->minor + i));
}
cdev_del(&(spipe->cdev));
unregister_chrdev_region(MKDEV(spipe->major, spipe->minor), spipe->init->ringnr);
sbuf_destroy(spipe->init->dst, spipe->init->channel);
spipe_destroy_pdata(&spipe->init, &pdev->dev);
devm_kfree(&pdev->dev, spipe);
platform_set_drvdata(pdev, NULL);
}
return 0;
}
#ifdef SPRD_PCIE_USE_DTS
static const struct of_device_id spipe_match_table[] = {
{.compatible = "sprd,spipe", },
{ },
};
#endif
static struct platform_driver spipe_driver = {
.driver = {
.owner = THIS_MODULE,
.name = "spipe",
#ifdef SPRD_PCIE_USE_DTS
.of_match_table = spipe_match_table,
#endif
},
.probe = spipe_probe,
.remove = spipe_remove,
};
static struct platform_device *spipe_pdev[MAX_SPIPE_CHN_NUM];
static struct spipe_init_data spipe_data[MAX_SPIPE_CHN_NUM] = {
{
.sipc_name = SPIPE_DRVIER_NMAE,
.name = "spipe_nr",
.dst = 1,
.channel = 4,
.ringnr = 15,
.txbuf_size = 0x1000,
.rxbuf_size = 0x1000
},
{
.sipc_name = SPIPE_DRVIER_NMAE,
.name = "sdiag_nr",
.dst = 1,
.channel = 21,
.ringnr = 1,
.txbuf_size = 0x40000,
.rxbuf_size = 0x8000
},
{
.sipc_name = SPIPE_DRVIER_NMAE,
.name = "stty_nr",
.dst = 1,
.channel = 6,
.ringnr = 32,
.txbuf_size = 0x0800,
.rxbuf_size = 0x0800
},
{
.sipc_name = SPIPE_DRVIER_NMAE,
.name = "snv_nr",
.dst = 1,
.channel = 40,
.ringnr = 1,
.txbuf_size = 0x40400,
.rxbuf_size = 0x1000
}
};
static int spipe_platform_device_reigster(void)
{
int retval = -ENOMEM;
int i;
for(i = 0; i < MAX_SPIPE_CHN_NUM; i++) {
spipe_pdev[i] = platform_device_alloc(SPIPE_DRVIER_NMAE, i);
if (!spipe_pdev[i]) {
i--;
while (i >= 0)
platform_device_put(spipe_pdev[i--]);
return retval;
}
}
for (i = 0; i < MAX_SPIPE_CHN_NUM; i++) {
retval = platform_device_add_data(spipe_pdev[i], &spipe_data[i],
sizeof(struct spipe_init_data));
if (retval)
goto err_add_pdata;
}
for (i = 0; i < MAX_SPIPE_CHN_NUM; i++) {
retval = platform_device_add(spipe_pdev[i]);
if (retval < 0) {
i--;
while (i >= 0)
platform_device_del(spipe_pdev[i]);
goto err_add_pdata;
}
}
return retval;
err_add_pdata:
for (i = 0; i < MAX_SPIPE_CHN_NUM; i++)
platform_device_put(spipe_pdev[i]);
return retval;
}
static void spipe_platform_device_unreigster(void)
{
int i;
for (i = 0; i < MAX_SPIPE_CHN_NUM; i++) {
platform_device_unregister(spipe_pdev[i]);
}
}
int spipe_init(void)
{
int ret;
spipe_class = class_create(THIS_MODULE, "spipe");
if (IS_ERR(spipe_class))
return PTR_ERR(spipe_class);
#ifndef SPRD_PCIE_USE_DTS
if((ret = spipe_platform_device_reigster()))
return ret;
#endif
if((ret = platform_driver_register(&spipe_driver))) {
#ifndef SPRD_PCIE_USE_DTS
spipe_platform_device_unreigster();
#endif
return ret;
}
return ret;
}
EXPORT_SYMBOL_GPL(spipe_init);
void spipe_exit(void)
{
platform_driver_unregister(&spipe_driver);
#ifndef SPRD_PCIE_USE_DTS
spipe_platform_device_unreigster();
#endif
class_destroy(spipe_class);
}
void spipe_device_down(void)
{
int retval = -ENOMEM;
int i;
for(i = 0; i < MAX_SPIPE_CHN_NUM; i++) {
sbuf_down(spipe_data[i].dst, spipe_data[i].channel);
}
}
EXPORT_SYMBOL_GPL(spipe_exit);
EXPORT_SYMBOL_GPL(spipe_device_down);

View File

@ -1,29 +0,0 @@
/*
* Copyright (C) 2019 Spreadtrum Communications Inc.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef __SPIPE_H
#define __SPIPE_H
#define MAX_SPIPE_CHN_NUM 4
#define SPIPE_DRVIER_NMAE "spipe"
struct spipe_init_data {
char *name;
char *sipc_name;
u8 dst;
u8 channel;
u32 ringnr;
u32 txbuf_size;
u32 rxbuf_size;
};
#endif

View File

@ -1,538 +0,0 @@
/*
* Copyright (C) 2018 Spreadtrum Communications Inc.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/cdev.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/poll.h>
#include <linux/slab.h>
#include <linux/of_device.h>
#include "../include/sipc.h"
#include "spool.h"
#define SLOG_NR_BASE_NUM MKDEV(156, 0)
struct spool_device;
struct spool_device {
struct spool_init_data *init;
int major;
int minor;
struct cdev cdev;
struct platform_device *plt_dev;
struct device *sys_dev; /* Device object in sysfs */
};
struct spool_sblock {
u8 dst;
u8 channel;
bool is_hold;
struct sblock hold;
};
static struct class *spool_class;
static int spool_open(struct inode *inode, struct file *filp)
{
struct spool_device *spool;
struct spool_sblock *sblock;
int ret;
spool = container_of(inode->i_cdev, struct spool_device, cdev);
ret = sblock_query(spool->init->dst, spool->init->channel);
if (ret)
return ret;
sblock = kmalloc(sizeof(struct spool_sblock), GFP_KERNEL);
if (!sblock)
return -ENOMEM;
filp->private_data = sblock;
sblock->dst = spool->init->dst;
sblock->channel = spool->init->channel;
sblock->is_hold = 0;
return 0;
}
static int spool_release(struct inode *inode, struct file *filp)
{
struct spool_sblock *sblock = filp->private_data;
if (sblock->is_hold) {
if (sblock_release(sblock->dst, sblock->channel, &sblock->hold))
pr_debug("failed to release block!\n");
}
kfree(sblock);
return 0;
}
static ssize_t spool_read(struct file *filp,
char __user *buf, size_t count, loff_t *ppos)
{
struct spool_sblock *sblock = filp->private_data;
int timeout = -1;
int ret = 0;
int rdsize = 0;
struct sblock blk = {0};
if (filp->f_flags & O_NONBLOCK)
timeout = 0;
if (sblock->is_hold) {
if (count < sblock->hold.length - *ppos) {
rdsize = count;
} else {
rdsize = sblock->hold.length - *ppos;
sblock->is_hold = 0;
}
blk = sblock->hold;
} else{
*ppos = 0;
ret = sblock_receive(sblock->dst,
sblock->channel, &blk, timeout);
if (ret < 0) {
pr_debug("%s: failed to receive block!\n", __func__);
return ret;
}
if (blk.length <= count)
rdsize = blk.length;
else {
rdsize = count;
sblock->is_hold = 1;
sblock->hold = blk;
}
}
if (unalign_copy_to_user(buf, blk.addr + *ppos, rdsize)) {
pr_err("%s: failed to copy to user!\n", __func__);
sblock->is_hold = 0;
*ppos = 0;
ret = -EFAULT;
} else {
ret = rdsize;
*ppos += rdsize;
}
if (sblock->is_hold == 0) {
if (sblock_release(sblock->dst, sblock->channel, &blk))
pr_err("%s: failed to release block!\n", __func__);
}
return ret;
}
static ssize_t spool_write(struct file *filp,
const char __user *buf, size_t count, loff_t *ppos)
{
struct spool_sblock *sblock = filp->private_data;
int timeout = -1;
int ret = 0;
int wrsize = 0;
int pos = 0;
struct sblock blk = {0};
size_t len = count;
if (filp->f_flags & O_NONBLOCK)
timeout = 0;
do {
ret = sblock_get(sblock->dst, sblock->channel, &blk, timeout);
if (ret < 0) {
pr_info("%s: failed to get block!\n", __func__);
return ret;
}
wrsize = (blk.length > len ? len : blk.length);
if (unalign_copy_from_user(blk.addr, buf + pos, wrsize)) {
pr_info("%s: failed to copy from user!\n", __func__);
ret = -EFAULT;
} else {
blk.length = wrsize;
len -= wrsize;
pos += wrsize;
}
if (sblock_send(sblock->dst, sblock->channel, &blk))
pr_debug("%s: failed to send block!", __func__);
} while (len > 0 && ret == 0);
return count - len;
}
static unsigned int spool_poll(struct file *filp, poll_table *wait)
{
struct spool_sblock *sblock = filp->private_data;
return sblock_poll_wait(sblock->dst, sblock->channel, filp, wait);
}
static long spool_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
return 0;
}
static const struct file_operations spool_fops = {
.open = spool_open,
.release = spool_release,
.read = spool_read,
.write = spool_write,
.poll = spool_poll,
.unlocked_ioctl = spool_ioctl,
.owner = THIS_MODULE,
.llseek = default_llseek,
};
#ifdef SPRD_PCIE_USE_DTS
static int spool_parse_dt(struct spool_init_data **init, struct device *dev,
struct device_node *np, dev_t *dev_no)
{
struct spool_init_data *pdata = NULL;
int ret;
u32 data;
pdata = devm_kzalloc(dev, sizeof(struct spool_init_data), GFP_KERNEL);
if (!pdata)
return -ENOMEM;
ret = of_property_read_string(np, "sprd,name",
(const char **)&pdata->name);
if (ret)
goto error;
if (!strcmp(pdata->name, "slog_nr"))
*dev_no = SLOG_NR_BASE_NUM;
ret = of_property_read_u32(np, "sprd,dst", (u32 *)&data);
if (ret)
goto error;
pdata->dst = (u8)data;
ret = of_property_read_u32(np, "sprd,channel", (u32 *)&data);
if (ret)
goto error;
pdata->channel = (u8)data;
ret = of_property_read_u32(np, "sprd,preconfigured", (u32 *)&data);
if (!ret)
pdata->pre_cfg = (int)data;
ret = of_property_read_u32(np, "sprd,tx-blksize",
(u32 *)&pdata->txblocksize);
if (ret)
goto error;
ret = of_property_read_u32(np, "sprd,tx-blknum",
(u32 *)&pdata->txblocknum);
if (ret)
goto error;
ret = of_property_read_u32(np, "sprd,rx-blksize",
(u32 *)&pdata->rxblocksize);
if (ret)
goto error;
ret = of_property_read_u32(np, "sprd,rx-blknum",
(u32 *)&pdata->rxblocknum);
if (ret)
goto error;
if (!of_property_read_u32(np, "sprd,nodev", (u32 *)&data))
pdata->nodev = (u8)data;
*init = pdata;
return ret;
error:
devm_kfree(dev, pdata);
*init = NULL;
return ret;
}
#else
static int spool_parse_dt(struct spool_init_data **init, struct device *dev,
struct device_node *np, dev_t *dev_no)
{
struct spool_init_data *pdata = NULL;
pdata = devm_kzalloc(dev, sizeof(struct spool_init_data), GFP_KERNEL);
if (!pdata)
return -ENOMEM;
pdata->name = "slog_nr";
if (!strcmp(pdata->name, "slog_nr"))
*dev_no = SLOG_NR_BASE_NUM;
pdata->dst = 1;
pdata->channel = 5;
pdata->txblocksize = 0;
pdata->txblocknum = 0;
pdata->rxblocksize = 0x10000;
pdata->rxblocknum = 32;
*init = pdata;
return 0;
}
#endif
static ssize_t base_addr_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct spool_device *spool = (struct spool_device *)
dev_get_drvdata(dev);
struct spool_init_data *init = spool->init;
uint32_t addr;
int ret;
ret = sblock_get_smem_cp_addr(init->dst, init->channel,
&addr);
if (ret < 0)
return ret;
return snprintf(buf, PAGE_SIZE, "%u %u 0x%08X %d %u %u %u %u\n",
(unsigned int)init->dst,
(unsigned int)init->channel,
addr,
init->pre_cfg,
(unsigned int)init->txblocknum,
(unsigned int)init->txblocksize,
(unsigned int)init->rxblocknum,
(unsigned int)init->rxblocksize);
}
static DEVICE_ATTR(base_addr, 0440,
base_addr_show, NULL);
static int create_spool(struct platform_device *pdev,
struct spool_init_data *init,
struct spool_device **out, dev_t dev_no)
{
int rval;
struct spool_device *spool;
//dev_t dev_no;
char sp_name[16];
snprintf(sp_name, sizeof(sp_name), "spool-%u-%u",
(unsigned int)init->dst,
(unsigned int)init->channel);
//rval = alloc_chrdev_region(&dev_no, 0, 1, sp_name);
rval = register_chrdev_region(dev_no, 1, sp_name);
if (rval)
return rval;
if (init->pre_cfg)
rval = sblock_pcfg_create(init->dst,
init->channel,
init->txblocknum,
init->txblocksize,
init->rxblocknum,
init->rxblocksize);
else
rval = sblock_create(init->dst,
init->channel,
init->txblocknum,
init->txblocksize,
init->rxblocknum,
init->rxblocksize);
if (rval) {
pr_info("Failed to create sblock: %d\n", rval);
goto free_devno;
}
spool = devm_kzalloc(&pdev->dev,
sizeof(struct spool_device),
GFP_KERNEL);
if (!spool) {
pr_info("Failed to allocate spool_device\n");
rval = -ENOMEM;
goto free_sblock;
}
spool->init = init;
spool->major = MAJOR(dev_no);
spool->minor = MINOR(dev_no);
spool->plt_dev = pdev;
if (!init->nodev) {
cdev_init(&spool->cdev, &spool_fops);
rval = cdev_add(&spool->cdev, dev_no, 1);
if (rval) {
pr_info("Failed to add spool cdev\n");
goto free_spool;
}
}
spool->sys_dev = device_create(spool_class, NULL,
dev_no,
spool, "%s", init->name);
device_create_file(&pdev->dev, &dev_attr_base_addr);
platform_set_drvdata(pdev, spool);
*out = spool;
return 0;
free_spool:
devm_kfree(&pdev->dev, spool);
free_sblock:
sblock_destroy(init->dst, init->channel);
free_devno:
unregister_chrdev_region(dev_no, 1);
return rval;
}
static int destroy_spool(struct spool_device *spool)
{
dev_t dev_no = MKDEV(spool->major, spool->minor);
struct spool_init_data *init = spool->init;
if (spool->sys_dev) {
device_destroy(spool_class, dev_no);
spool->sys_dev = NULL;
}
if (!init->nodev)
cdev_del(&spool->cdev);
sblock_destroy(init->dst, init->channel);
unregister_chrdev_region(dev_no, 1);
devm_kfree(&spool->plt_dev->dev, init);
devm_kfree(&spool->plt_dev->dev, spool);
return 0;
}
static int spool_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
int rval;
struct spool_init_data *init;
struct spool_device *spool;
dev_t dev_no;
#ifdef SPRD_PCIE_USE_DTS
if (!np)
return -ENODEV;
#endif
rval = spool_parse_dt(&init, &pdev->dev, np, &dev_no);
if (rval) {
pr_err("Failed to parse spool device tree, ret=%d\n",
rval);
return rval;
}
pr_info("spool: name=%s, dst=%u, channel=%u, pre_cfg=%u\n",
init->name,
init->dst,
init->channel,
init->pre_cfg);
pr_info("spool: tx_num=%u, tx_size=%u, rx_num=%u, rx_size=%u\n",
init->txblocknum,
init->txblocksize,
init->rxblocknum,
init->rxblocksize);
rval = create_spool(pdev, init, &spool, dev_no);
if (rval) {
pr_err("Failed to create spool device %u:%u, ret=%d\n",
(unsigned int)init->dst,
(unsigned int)init->channel, rval);
devm_kfree(&pdev->dev, init);
}
return 0;
}
static int spool_remove(struct platform_device *pdev)
{
struct spool_device *priv = (struct spool_device *)
platform_get_drvdata(pdev);
destroy_spool(priv);
platform_set_drvdata(pdev, NULL);
return 0;
}
#ifdef SPRD_PCIE_USE_DTS
static const struct of_device_id spool_match_table[] = {
{ .compatible = "sprd,spool", },
{ },
};
#endif
static struct platform_driver spool_driver = {
.driver = {
.owner = THIS_MODULE,
.name = "spool",
#ifdef SPRD_PCIE_USE_DTS
.of_match_table = spool_match_table,
#endif
},
.probe = spool_probe,
.remove = spool_remove,
};
#ifndef SPRD_PCIE_USE_DTS
static void spool_platform_device_release(struct device *dev) {}
static struct platform_device spool_device = {
.name = "spool",
.id = -1,
.dev = {
.release = spool_platform_device_release,
}
};
#endif
int spool_init(void)
{
int ret;
spool_class = class_create(THIS_MODULE, "spool");
if (IS_ERR(spool_class))
return PTR_ERR(spool_class);
#ifndef SPRD_PCIE_USE_DTS
if((ret = platform_device_register(&spool_device)))
return ret;
#endif
if((ret = platform_driver_register(&spool_driver))) {
#ifndef SPRD_PCIE_USE_DTS
platform_device_unregister(&spool_device);
#endif
return ret;
}
return ret;
}
EXPORT_SYMBOL_GPL(spool_init);
void spool_exit(void)
{
platform_driver_unregister(&spool_driver);
#ifndef SPRD_PCIE_USE_DTS
platform_device_unregister(&spool_device);
#endif
class_destroy(spool_class);
}
void spool_device_down(void)
{
sblock_down(1, 5);
}
EXPORT_SYMBOL_GPL(spool_exit);
EXPORT_SYMBOL_GPL(spool_device_down);

View File

@ -1,29 +0,0 @@
/*
* Copyright (C) 2019 Spreadtrum Communications Inc.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef __SPOOL_H
#define __SPOOL_H
struct spool_init_data {
char *name;
u8 dst;
u8 channel;
u8 nodev;
/* Preconfigured channel */
int pre_cfg;
u32 txblocknum;
u32 txblocksize;
u32 rxblocknum;
u32 rxblocksize;
};
#endif